Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- ckpts/universal/global_step40/zero/13.attention.query_key_value.weight/exp_avg_sq.pt +3 -0
- ckpts/universal/global_step40/zero/19.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt +3 -0
- ckpts/universal/global_step40/zero/19.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt +3 -0
- ckpts/universal/global_step40/zero/7.post_attention_layernorm.weight/exp_avg.pt +3 -0
- ckpts/universal/global_step40/zero/7.post_attention_layernorm.weight/exp_avg_sq.pt +3 -0
- ckpts/universal/global_step40/zero/7.post_attention_layernorm.weight/fp32.pt +3 -0
- venv/lib/python3.10/site-packages/sklearn/inspection/__init__.py +14 -0
- venv/lib/python3.10/site-packages/sklearn/inspection/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/sklearn/inspection/_partial_dependence.py +743 -0
- venv/lib/python3.10/site-packages/sklearn/inspection/_pd_utils.py +64 -0
- venv/lib/python3.10/site-packages/sklearn/inspection/_permutation_importance.py +317 -0
- venv/lib/python3.10/site-packages/sklearn/inspection/_plot/__init__.py +0 -0
- venv/lib/python3.10/site-packages/sklearn/inspection/_plot/decision_boundary.py +406 -0
- venv/lib/python3.10/site-packages/sklearn/inspection/_plot/partial_dependence.py +1473 -0
- venv/lib/python3.10/site-packages/sklearn/inspection/_plot/tests/test_boundary_decision_display.py +609 -0
- venv/lib/python3.10/site-packages/sklearn/inspection/_plot/tests/test_plot_partial_dependence.py +1140 -0
- venv/lib/python3.10/site-packages/sklearn/inspection/tests/__init__.py +0 -0
- venv/lib/python3.10/site-packages/sklearn/inspection/tests/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/sklearn/inspection/tests/__pycache__/test_partial_dependence.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/sklearn/inspection/tests/__pycache__/test_pd_utils.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/sklearn/inspection/tests/__pycache__/test_permutation_importance.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/sklearn/inspection/tests/test_partial_dependence.py +958 -0
- venv/lib/python3.10/site-packages/sklearn/inspection/tests/test_pd_utils.py +47 -0
- venv/lib/python3.10/site-packages/sklearn/inspection/tests/test_permutation_importance.py +542 -0
- venv/lib/python3.10/site-packages/sklearn/metrics/__init__.py +180 -0
- venv/lib/python3.10/site-packages/sklearn/metrics/_base.py +199 -0
- venv/lib/python3.10/site-packages/sklearn/metrics/_classification.py +0 -0
- venv/lib/python3.10/site-packages/sklearn/metrics/_dist_metrics.cpython-310-x86_64-linux-gnu.so +0 -0
- venv/lib/python3.10/site-packages/sklearn/metrics/_dist_metrics.pxd +272 -0
- venv/lib/python3.10/site-packages/sklearn/metrics/_pairwise_distances_reduction/__init__.py +112 -0
- venv/lib/python3.10/site-packages/sklearn/metrics/_pairwise_distances_reduction/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/sklearn/metrics/_pairwise_distances_reduction/__pycache__/_dispatcher.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/sklearn/metrics/_pairwise_distances_reduction/_argkmin.cpython-310-x86_64-linux-gnu.so +0 -0
- venv/lib/python3.10/site-packages/sklearn/metrics/_pairwise_distances_reduction/_argkmin.pxd +57 -0
- venv/lib/python3.10/site-packages/sklearn/metrics/_pairwise_distances_reduction/_argkmin_classmode.cpython-310-x86_64-linux-gnu.so +0 -0
- venv/lib/python3.10/site-packages/sklearn/metrics/_pairwise_distances_reduction/_base.cpython-310-x86_64-linux-gnu.so +0 -0
- venv/lib/python3.10/site-packages/sklearn/metrics/_pairwise_distances_reduction/_base.pxd +265 -0
- venv/lib/python3.10/site-packages/sklearn/metrics/_pairwise_distances_reduction/_classmode.pxd +5 -0
- venv/lib/python3.10/site-packages/sklearn/metrics/_pairwise_distances_reduction/_datasets_pair.cpython-310-x86_64-linux-gnu.so +0 -0
- venv/lib/python3.10/site-packages/sklearn/metrics/_pairwise_distances_reduction/_datasets_pair.pxd +106 -0
- venv/lib/python3.10/site-packages/sklearn/metrics/_pairwise_distances_reduction/_dispatcher.py +764 -0
- venv/lib/python3.10/site-packages/sklearn/metrics/_pairwise_distances_reduction/_middle_term_computer.cpython-310-x86_64-linux-gnu.so +0 -0
- venv/lib/python3.10/site-packages/sklearn/metrics/_pairwise_distances_reduction/_middle_term_computer.pxd +396 -0
- venv/lib/python3.10/site-packages/sklearn/metrics/_pairwise_distances_reduction/_radius_neighbors.cpython-310-x86_64-linux-gnu.so +0 -0
- venv/lib/python3.10/site-packages/sklearn/metrics/_pairwise_distances_reduction/_radius_neighbors.pxd +150 -0
- venv/lib/python3.10/site-packages/sklearn/metrics/_pairwise_distances_reduction/_radius_neighbors_classmode.cpython-310-x86_64-linux-gnu.so +0 -0
- venv/lib/python3.10/site-packages/sklearn/metrics/_pairwise_fast.cpython-310-x86_64-linux-gnu.so +0 -0
- venv/lib/python3.10/site-packages/sklearn/metrics/_plot/__init__.py +0 -0
- venv/lib/python3.10/site-packages/sklearn/metrics/_plot/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/sklearn/metrics/_plot/__pycache__/confusion_matrix.cpython-310.pyc +0 -0
ckpts/universal/global_step40/zero/13.attention.query_key_value.weight/exp_avg_sq.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9f7c5f90dcc736b10c9a860fee444b18b8a80b0c0e1070f7d4664b26c43b687d
|
3 |
+
size 50332843
|
ckpts/universal/global_step40/zero/19.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:bee4537af3d84a16470ecce9a9f4fd1fe0ac6bc730c97111f07370d4f811d2b1
|
3 |
+
size 33555612
|
ckpts/universal/global_step40/zero/19.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:39bf520e527c0e2a618541079856e71dd96119a642808df2dd28f35172dcdd4f
|
3 |
+
size 33555627
|
ckpts/universal/global_step40/zero/7.post_attention_layernorm.weight/exp_avg.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:df0904ae0d573f831a319d36df66437abfde9e930925b37b49cc55f33c9121fb
|
3 |
+
size 9372
|
ckpts/universal/global_step40/zero/7.post_attention_layernorm.weight/exp_avg_sq.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:848b426a17e8a254353595bfa2925cc39bc4e6ac59d7b00e636a81177b08d1bd
|
3 |
+
size 9387
|
ckpts/universal/global_step40/zero/7.post_attention_layernorm.weight/fp32.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9446fed7682f41a6b4b537b5730b03286a34116c71ecf4b2f26b6fcdb4d901fb
|
3 |
+
size 9293
|
venv/lib/python3.10/site-packages/sklearn/inspection/__init__.py
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""The :mod:`sklearn.inspection` module includes tools for model inspection."""
|
2 |
+
|
3 |
+
|
4 |
+
from ._partial_dependence import partial_dependence
|
5 |
+
from ._permutation_importance import permutation_importance
|
6 |
+
from ._plot.decision_boundary import DecisionBoundaryDisplay
|
7 |
+
from ._plot.partial_dependence import PartialDependenceDisplay
|
8 |
+
|
9 |
+
__all__ = [
|
10 |
+
"partial_dependence",
|
11 |
+
"permutation_importance",
|
12 |
+
"PartialDependenceDisplay",
|
13 |
+
"DecisionBoundaryDisplay",
|
14 |
+
]
|
venv/lib/python3.10/site-packages/sklearn/inspection/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (593 Bytes). View file
|
|
venv/lib/python3.10/site-packages/sklearn/inspection/_partial_dependence.py
ADDED
@@ -0,0 +1,743 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Partial dependence plots for regression and classification models."""
|
2 |
+
|
3 |
+
# Authors: Peter Prettenhofer
|
4 |
+
# Trevor Stephens
|
5 |
+
# Nicolas Hug
|
6 |
+
# License: BSD 3 clause
|
7 |
+
|
8 |
+
from collections.abc import Iterable
|
9 |
+
|
10 |
+
import numpy as np
|
11 |
+
from scipy import sparse
|
12 |
+
from scipy.stats.mstats import mquantiles
|
13 |
+
|
14 |
+
from ..base import is_classifier, is_regressor
|
15 |
+
from ..ensemble import RandomForestRegressor
|
16 |
+
from ..ensemble._gb import BaseGradientBoosting
|
17 |
+
from ..ensemble._hist_gradient_boosting.gradient_boosting import (
|
18 |
+
BaseHistGradientBoosting,
|
19 |
+
)
|
20 |
+
from ..exceptions import NotFittedError
|
21 |
+
from ..tree import DecisionTreeRegressor
|
22 |
+
from ..utils import (
|
23 |
+
Bunch,
|
24 |
+
_determine_key_type,
|
25 |
+
_get_column_indices,
|
26 |
+
_safe_assign,
|
27 |
+
_safe_indexing,
|
28 |
+
check_array,
|
29 |
+
check_matplotlib_support, # noqa
|
30 |
+
)
|
31 |
+
from ..utils._param_validation import (
|
32 |
+
HasMethods,
|
33 |
+
Integral,
|
34 |
+
Interval,
|
35 |
+
StrOptions,
|
36 |
+
validate_params,
|
37 |
+
)
|
38 |
+
from ..utils.extmath import cartesian
|
39 |
+
from ..utils.validation import _check_sample_weight, check_is_fitted
|
40 |
+
from ._pd_utils import _check_feature_names, _get_feature_index
|
41 |
+
|
42 |
+
__all__ = [
|
43 |
+
"partial_dependence",
|
44 |
+
]
|
45 |
+
|
46 |
+
|
47 |
+
def _grid_from_X(X, percentiles, is_categorical, grid_resolution):
|
48 |
+
"""Generate a grid of points based on the percentiles of X.
|
49 |
+
|
50 |
+
The grid is a cartesian product between the columns of ``values``. The
|
51 |
+
ith column of ``values`` consists in ``grid_resolution`` equally-spaced
|
52 |
+
points between the percentiles of the jth column of X.
|
53 |
+
|
54 |
+
If ``grid_resolution`` is bigger than the number of unique values in the
|
55 |
+
j-th column of X or if the feature is a categorical feature (by inspecting
|
56 |
+
`is_categorical`) , then those unique values will be used instead.
|
57 |
+
|
58 |
+
Parameters
|
59 |
+
----------
|
60 |
+
X : array-like of shape (n_samples, n_target_features)
|
61 |
+
The data.
|
62 |
+
|
63 |
+
percentiles : tuple of float
|
64 |
+
The percentiles which are used to construct the extreme values of
|
65 |
+
the grid. Must be in [0, 1].
|
66 |
+
|
67 |
+
is_categorical : list of bool
|
68 |
+
For each feature, tells whether it is categorical or not. If a feature
|
69 |
+
is categorical, then the values used will be the unique ones
|
70 |
+
(i.e. categories) instead of the percentiles.
|
71 |
+
|
72 |
+
grid_resolution : int
|
73 |
+
The number of equally spaced points to be placed on the grid for each
|
74 |
+
feature.
|
75 |
+
|
76 |
+
Returns
|
77 |
+
-------
|
78 |
+
grid : ndarray of shape (n_points, n_target_features)
|
79 |
+
A value for each feature at each point in the grid. ``n_points`` is
|
80 |
+
always ``<= grid_resolution ** X.shape[1]``.
|
81 |
+
|
82 |
+
values : list of 1d ndarrays
|
83 |
+
The values with which the grid has been created. The size of each
|
84 |
+
array ``values[j]`` is either ``grid_resolution``, or the number of
|
85 |
+
unique values in ``X[:, j]``, whichever is smaller.
|
86 |
+
"""
|
87 |
+
if not isinstance(percentiles, Iterable) or len(percentiles) != 2:
|
88 |
+
raise ValueError("'percentiles' must be a sequence of 2 elements.")
|
89 |
+
if not all(0 <= x <= 1 for x in percentiles):
|
90 |
+
raise ValueError("'percentiles' values must be in [0, 1].")
|
91 |
+
if percentiles[0] >= percentiles[1]:
|
92 |
+
raise ValueError("percentiles[0] must be strictly less than percentiles[1].")
|
93 |
+
|
94 |
+
if grid_resolution <= 1:
|
95 |
+
raise ValueError("'grid_resolution' must be strictly greater than 1.")
|
96 |
+
|
97 |
+
values = []
|
98 |
+
# TODO: we should handle missing values (i.e. `np.nan`) specifically and store them
|
99 |
+
# in a different Bunch attribute.
|
100 |
+
for feature, is_cat in enumerate(is_categorical):
|
101 |
+
try:
|
102 |
+
uniques = np.unique(_safe_indexing(X, feature, axis=1))
|
103 |
+
except TypeError as exc:
|
104 |
+
# `np.unique` will fail in the presence of `np.nan` and `str` categories
|
105 |
+
# due to sorting. Temporary, we reraise an error explaining the problem.
|
106 |
+
raise ValueError(
|
107 |
+
f"The column #{feature} contains mixed data types. Finding unique "
|
108 |
+
"categories fail due to sorting. It usually means that the column "
|
109 |
+
"contains `np.nan` values together with `str` categories. Such use "
|
110 |
+
"case is not yet supported in scikit-learn."
|
111 |
+
) from exc
|
112 |
+
if is_cat or uniques.shape[0] < grid_resolution:
|
113 |
+
# Use the unique values either because:
|
114 |
+
# - feature has low resolution use unique values
|
115 |
+
# - feature is categorical
|
116 |
+
axis = uniques
|
117 |
+
else:
|
118 |
+
# create axis based on percentiles and grid resolution
|
119 |
+
emp_percentiles = mquantiles(
|
120 |
+
_safe_indexing(X, feature, axis=1), prob=percentiles, axis=0
|
121 |
+
)
|
122 |
+
if np.allclose(emp_percentiles[0], emp_percentiles[1]):
|
123 |
+
raise ValueError(
|
124 |
+
"percentiles are too close to each other, "
|
125 |
+
"unable to build the grid. Please choose percentiles "
|
126 |
+
"that are further apart."
|
127 |
+
)
|
128 |
+
axis = np.linspace(
|
129 |
+
emp_percentiles[0],
|
130 |
+
emp_percentiles[1],
|
131 |
+
num=grid_resolution,
|
132 |
+
endpoint=True,
|
133 |
+
)
|
134 |
+
values.append(axis)
|
135 |
+
|
136 |
+
return cartesian(values), values
|
137 |
+
|
138 |
+
|
139 |
+
def _partial_dependence_recursion(est, grid, features):
|
140 |
+
"""Calculate partial dependence via the recursion method.
|
141 |
+
|
142 |
+
The recursion method is in particular enabled for tree-based estimators.
|
143 |
+
|
144 |
+
For each `grid` value, a weighted tree traversal is performed: if a split node
|
145 |
+
involves an input feature of interest, the corresponding left or right branch
|
146 |
+
is followed; otherwise both branches are followed, each branch being weighted
|
147 |
+
by the fraction of training samples that entered that branch. Finally, the
|
148 |
+
partial dependence is given by a weighted average of all the visited leaves
|
149 |
+
values.
|
150 |
+
|
151 |
+
This method is more efficient in terms of speed than the `'brute'` method
|
152 |
+
(:func:`~sklearn.inspection._partial_dependence._partial_dependence_brute`).
|
153 |
+
However, here, the partial dependence computation is done explicitly with the
|
154 |
+
`X` used during training of `est`.
|
155 |
+
|
156 |
+
Parameters
|
157 |
+
----------
|
158 |
+
est : BaseEstimator
|
159 |
+
A fitted estimator object implementing :term:`predict` or
|
160 |
+
:term:`decision_function`. Multioutput-multiclass classifiers are not
|
161 |
+
supported. Note that `'recursion'` is only supported for some tree-based
|
162 |
+
estimators (namely
|
163 |
+
:class:`~sklearn.ensemble.GradientBoostingClassifier`,
|
164 |
+
:class:`~sklearn.ensemble.GradientBoostingRegressor`,
|
165 |
+
:class:`~sklearn.ensemble.HistGradientBoostingClassifier`,
|
166 |
+
:class:`~sklearn.ensemble.HistGradientBoostingRegressor`,
|
167 |
+
:class:`~sklearn.tree.DecisionTreeRegressor`,
|
168 |
+
:class:`~sklearn.ensemble.RandomForestRegressor`,
|
169 |
+
).
|
170 |
+
|
171 |
+
grid : array-like of shape (n_points, n_target_features)
|
172 |
+
The grid of feature values for which the partial dependence is calculated.
|
173 |
+
Note that `n_points` is the number of points in the grid and `n_target_features`
|
174 |
+
is the number of features you are doing partial dependence at.
|
175 |
+
|
176 |
+
features : array-like of {int, str}
|
177 |
+
The feature (e.g. `[0]`) or pair of interacting features
|
178 |
+
(e.g. `[(0, 1)]`) for which the partial dependency should be computed.
|
179 |
+
|
180 |
+
Returns
|
181 |
+
-------
|
182 |
+
averaged_predictions : array-like of shape (n_targets, n_points)
|
183 |
+
The averaged predictions for the given `grid` of features values.
|
184 |
+
Note that `n_targets` is the number of targets (e.g. 1 for binary
|
185 |
+
classification, `n_tasks` for multi-output regression, and `n_classes` for
|
186 |
+
multiclass classification) and `n_points` is the number of points in the `grid`.
|
187 |
+
"""
|
188 |
+
averaged_predictions = est._compute_partial_dependence_recursion(grid, features)
|
189 |
+
if averaged_predictions.ndim == 1:
|
190 |
+
# reshape to (1, n_points) for consistency with
|
191 |
+
# _partial_dependence_brute
|
192 |
+
averaged_predictions = averaged_predictions.reshape(1, -1)
|
193 |
+
|
194 |
+
return averaged_predictions
|
195 |
+
|
196 |
+
|
197 |
+
def _partial_dependence_brute(
|
198 |
+
est, grid, features, X, response_method, sample_weight=None
|
199 |
+
):
|
200 |
+
"""Calculate partial dependence via the brute force method.
|
201 |
+
|
202 |
+
The brute method explicitly averages the predictions of an estimator over a
|
203 |
+
grid of feature values.
|
204 |
+
|
205 |
+
For each `grid` value, all the samples from `X` have their variables of
|
206 |
+
interest replaced by that specific `grid` value. The predictions are then made
|
207 |
+
and averaged across the samples.
|
208 |
+
|
209 |
+
This method is slower than the `'recursion'`
|
210 |
+
(:func:`~sklearn.inspection._partial_dependence._partial_dependence_recursion`)
|
211 |
+
version for estimators with this second option. However, with the `'brute'`
|
212 |
+
force method, the average will be done with the given `X` and not the `X`
|
213 |
+
used during training, as it is done in the `'recursion'` version. Therefore
|
214 |
+
the average can always accept `sample_weight` (even when the estimator was
|
215 |
+
fitted without).
|
216 |
+
|
217 |
+
Parameters
|
218 |
+
----------
|
219 |
+
est : BaseEstimator
|
220 |
+
A fitted estimator object implementing :term:`predict`,
|
221 |
+
:term:`predict_proba`, or :term:`decision_function`.
|
222 |
+
Multioutput-multiclass classifiers are not supported.
|
223 |
+
|
224 |
+
grid : array-like of shape (n_points, n_target_features)
|
225 |
+
The grid of feature values for which the partial dependence is calculated.
|
226 |
+
Note that `n_points` is the number of points in the grid and `n_target_features`
|
227 |
+
is the number of features you are doing partial dependence at.
|
228 |
+
|
229 |
+
features : array-like of {int, str}
|
230 |
+
The feature (e.g. `[0]`) or pair of interacting features
|
231 |
+
(e.g. `[(0, 1)]`) for which the partial dependency should be computed.
|
232 |
+
|
233 |
+
X : array-like of shape (n_samples, n_features)
|
234 |
+
`X` is used to generate values for the complement features. That is, for
|
235 |
+
each value in `grid`, the method will average the prediction of each
|
236 |
+
sample from `X` having that grid value for `features`.
|
237 |
+
|
238 |
+
response_method : {'auto', 'predict_proba', 'decision_function'}, \
|
239 |
+
default='auto'
|
240 |
+
Specifies whether to use :term:`predict_proba` or
|
241 |
+
:term:`decision_function` as the target response. For regressors
|
242 |
+
this parameter is ignored and the response is always the output of
|
243 |
+
:term:`predict`. By default, :term:`predict_proba` is tried first
|
244 |
+
and we revert to :term:`decision_function` if it doesn't exist.
|
245 |
+
|
246 |
+
sample_weight : array-like of shape (n_samples,), default=None
|
247 |
+
Sample weights are used to calculate weighted means when averaging the
|
248 |
+
model output. If `None`, then samples are equally weighted. Note that
|
249 |
+
`sample_weight` does not change the individual predictions.
|
250 |
+
|
251 |
+
Returns
|
252 |
+
-------
|
253 |
+
averaged_predictions : array-like of shape (n_targets, n_points)
|
254 |
+
The averaged predictions for the given `grid` of features values.
|
255 |
+
Note that `n_targets` is the number of targets (e.g. 1 for binary
|
256 |
+
classification, `n_tasks` for multi-output regression, and `n_classes` for
|
257 |
+
multiclass classification) and `n_points` is the number of points in the `grid`.
|
258 |
+
|
259 |
+
predictions : array-like
|
260 |
+
The predictions for the given `grid` of features values over the samples
|
261 |
+
from `X`. For non-multioutput regression and binary classification the
|
262 |
+
shape is `(n_instances, n_points)` and for multi-output regression and
|
263 |
+
multiclass classification the shape is `(n_targets, n_instances, n_points)`,
|
264 |
+
where `n_targets` is the number of targets (`n_tasks` for multi-output
|
265 |
+
regression, and `n_classes` for multiclass classification), `n_instances`
|
266 |
+
is the number of instances in `X`, and `n_points` is the number of points
|
267 |
+
in the `grid`.
|
268 |
+
"""
|
269 |
+
predictions = []
|
270 |
+
averaged_predictions = []
|
271 |
+
|
272 |
+
# define the prediction_method (predict, predict_proba, decision_function).
|
273 |
+
if is_regressor(est):
|
274 |
+
prediction_method = est.predict
|
275 |
+
else:
|
276 |
+
predict_proba = getattr(est, "predict_proba", None)
|
277 |
+
decision_function = getattr(est, "decision_function", None)
|
278 |
+
if response_method == "auto":
|
279 |
+
# try predict_proba, then decision_function if it doesn't exist
|
280 |
+
prediction_method = predict_proba or decision_function
|
281 |
+
else:
|
282 |
+
prediction_method = (
|
283 |
+
predict_proba
|
284 |
+
if response_method == "predict_proba"
|
285 |
+
else decision_function
|
286 |
+
)
|
287 |
+
if prediction_method is None:
|
288 |
+
if response_method == "auto":
|
289 |
+
raise ValueError(
|
290 |
+
"The estimator has no predict_proba and no "
|
291 |
+
"decision_function method."
|
292 |
+
)
|
293 |
+
elif response_method == "predict_proba":
|
294 |
+
raise ValueError("The estimator has no predict_proba method.")
|
295 |
+
else:
|
296 |
+
raise ValueError("The estimator has no decision_function method.")
|
297 |
+
|
298 |
+
X_eval = X.copy()
|
299 |
+
for new_values in grid:
|
300 |
+
for i, variable in enumerate(features):
|
301 |
+
_safe_assign(X_eval, new_values[i], column_indexer=variable)
|
302 |
+
|
303 |
+
try:
|
304 |
+
# Note: predictions is of shape
|
305 |
+
# (n_points,) for non-multioutput regressors
|
306 |
+
# (n_points, n_tasks) for multioutput regressors
|
307 |
+
# (n_points, 1) for the regressors in cross_decomposition (I think)
|
308 |
+
# (n_points, 2) for binary classification
|
309 |
+
# (n_points, n_classes) for multiclass classification
|
310 |
+
pred = prediction_method(X_eval)
|
311 |
+
|
312 |
+
predictions.append(pred)
|
313 |
+
# average over samples
|
314 |
+
averaged_predictions.append(np.average(pred, axis=0, weights=sample_weight))
|
315 |
+
except NotFittedError as e:
|
316 |
+
raise ValueError("'estimator' parameter must be a fitted estimator") from e
|
317 |
+
|
318 |
+
n_samples = X.shape[0]
|
319 |
+
|
320 |
+
# reshape to (n_targets, n_instances, n_points) where n_targets is:
|
321 |
+
# - 1 for non-multioutput regression and binary classification (shape is
|
322 |
+
# already correct in those cases)
|
323 |
+
# - n_tasks for multi-output regression
|
324 |
+
# - n_classes for multiclass classification.
|
325 |
+
predictions = np.array(predictions).T
|
326 |
+
if is_regressor(est) and predictions.ndim == 2:
|
327 |
+
# non-multioutput regression, shape is (n_instances, n_points,)
|
328 |
+
predictions = predictions.reshape(n_samples, -1)
|
329 |
+
elif is_classifier(est) and predictions.shape[0] == 2:
|
330 |
+
# Binary classification, shape is (2, n_instances, n_points).
|
331 |
+
# we output the effect of **positive** class
|
332 |
+
predictions = predictions[1]
|
333 |
+
predictions = predictions.reshape(n_samples, -1)
|
334 |
+
|
335 |
+
# reshape averaged_predictions to (n_targets, n_points) where n_targets is:
|
336 |
+
# - 1 for non-multioutput regression and binary classification (shape is
|
337 |
+
# already correct in those cases)
|
338 |
+
# - n_tasks for multi-output regression
|
339 |
+
# - n_classes for multiclass classification.
|
340 |
+
averaged_predictions = np.array(averaged_predictions).T
|
341 |
+
if is_regressor(est) and averaged_predictions.ndim == 1:
|
342 |
+
# non-multioutput regression, shape is (n_points,)
|
343 |
+
averaged_predictions = averaged_predictions.reshape(1, -1)
|
344 |
+
elif is_classifier(est) and averaged_predictions.shape[0] == 2:
|
345 |
+
# Binary classification, shape is (2, n_points).
|
346 |
+
# we output the effect of **positive** class
|
347 |
+
averaged_predictions = averaged_predictions[1]
|
348 |
+
averaged_predictions = averaged_predictions.reshape(1, -1)
|
349 |
+
|
350 |
+
return averaged_predictions, predictions
|
351 |
+
|
352 |
+
|
353 |
+
@validate_params(
|
354 |
+
{
|
355 |
+
"estimator": [
|
356 |
+
HasMethods(["fit", "predict"]),
|
357 |
+
HasMethods(["fit", "predict_proba"]),
|
358 |
+
HasMethods(["fit", "decision_function"]),
|
359 |
+
],
|
360 |
+
"X": ["array-like", "sparse matrix"],
|
361 |
+
"features": ["array-like", Integral, str],
|
362 |
+
"sample_weight": ["array-like", None],
|
363 |
+
"categorical_features": ["array-like", None],
|
364 |
+
"feature_names": ["array-like", None],
|
365 |
+
"response_method": [StrOptions({"auto", "predict_proba", "decision_function"})],
|
366 |
+
"percentiles": [tuple],
|
367 |
+
"grid_resolution": [Interval(Integral, 1, None, closed="left")],
|
368 |
+
"method": [StrOptions({"auto", "recursion", "brute"})],
|
369 |
+
"kind": [StrOptions({"average", "individual", "both"})],
|
370 |
+
},
|
371 |
+
prefer_skip_nested_validation=True,
|
372 |
+
)
|
373 |
+
def partial_dependence(
|
374 |
+
estimator,
|
375 |
+
X,
|
376 |
+
features,
|
377 |
+
*,
|
378 |
+
sample_weight=None,
|
379 |
+
categorical_features=None,
|
380 |
+
feature_names=None,
|
381 |
+
response_method="auto",
|
382 |
+
percentiles=(0.05, 0.95),
|
383 |
+
grid_resolution=100,
|
384 |
+
method="auto",
|
385 |
+
kind="average",
|
386 |
+
):
|
387 |
+
"""Partial dependence of ``features``.
|
388 |
+
|
389 |
+
Partial dependence of a feature (or a set of features) corresponds to
|
390 |
+
the average response of an estimator for each possible value of the
|
391 |
+
feature.
|
392 |
+
|
393 |
+
Read more in the :ref:`User Guide <partial_dependence>`.
|
394 |
+
|
395 |
+
.. warning::
|
396 |
+
|
397 |
+
For :class:`~sklearn.ensemble.GradientBoostingClassifier` and
|
398 |
+
:class:`~sklearn.ensemble.GradientBoostingRegressor`, the
|
399 |
+
`'recursion'` method (used by default) will not account for the `init`
|
400 |
+
predictor of the boosting process. In practice, this will produce
|
401 |
+
the same values as `'brute'` up to a constant offset in the target
|
402 |
+
response, provided that `init` is a constant estimator (which is the
|
403 |
+
default). However, if `init` is not a constant estimator, the
|
404 |
+
partial dependence values are incorrect for `'recursion'` because the
|
405 |
+
offset will be sample-dependent. It is preferable to use the `'brute'`
|
406 |
+
method. Note that this only applies to
|
407 |
+
:class:`~sklearn.ensemble.GradientBoostingClassifier` and
|
408 |
+
:class:`~sklearn.ensemble.GradientBoostingRegressor`, not to
|
409 |
+
:class:`~sklearn.ensemble.HistGradientBoostingClassifier` and
|
410 |
+
:class:`~sklearn.ensemble.HistGradientBoostingRegressor`.
|
411 |
+
|
412 |
+
Parameters
|
413 |
+
----------
|
414 |
+
estimator : BaseEstimator
|
415 |
+
A fitted estimator object implementing :term:`predict`,
|
416 |
+
:term:`predict_proba`, or :term:`decision_function`.
|
417 |
+
Multioutput-multiclass classifiers are not supported.
|
418 |
+
|
419 |
+
X : {array-like, sparse matrix or dataframe} of shape (n_samples, n_features)
|
420 |
+
``X`` is used to generate a grid of values for the target
|
421 |
+
``features`` (where the partial dependence will be evaluated), and
|
422 |
+
also to generate values for the complement features when the
|
423 |
+
`method` is 'brute'.
|
424 |
+
|
425 |
+
features : array-like of {int, str, bool} or int or str
|
426 |
+
The feature (e.g. `[0]`) or pair of interacting features
|
427 |
+
(e.g. `[(0, 1)]`) for which the partial dependency should be computed.
|
428 |
+
|
429 |
+
sample_weight : array-like of shape (n_samples,), default=None
|
430 |
+
Sample weights are used to calculate weighted means when averaging the
|
431 |
+
model output. If `None`, then samples are equally weighted. If
|
432 |
+
`sample_weight` is not `None`, then `method` will be set to `'brute'`.
|
433 |
+
Note that `sample_weight` is ignored for `kind='individual'`.
|
434 |
+
|
435 |
+
.. versionadded:: 1.3
|
436 |
+
|
437 |
+
categorical_features : array-like of shape (n_features,) or shape \
|
438 |
+
(n_categorical_features,), dtype={bool, int, str}, default=None
|
439 |
+
Indicates the categorical features.
|
440 |
+
|
441 |
+
- `None`: no feature will be considered categorical;
|
442 |
+
- boolean array-like: boolean mask of shape `(n_features,)`
|
443 |
+
indicating which features are categorical. Thus, this array has
|
444 |
+
the same shape has `X.shape[1]`;
|
445 |
+
- integer or string array-like: integer indices or strings
|
446 |
+
indicating categorical features.
|
447 |
+
|
448 |
+
.. versionadded:: 1.2
|
449 |
+
|
450 |
+
feature_names : array-like of shape (n_features,), dtype=str, default=None
|
451 |
+
Name of each feature; `feature_names[i]` holds the name of the feature
|
452 |
+
with index `i`.
|
453 |
+
By default, the name of the feature corresponds to their numerical
|
454 |
+
index for NumPy array and their column name for pandas dataframe.
|
455 |
+
|
456 |
+
.. versionadded:: 1.2
|
457 |
+
|
458 |
+
response_method : {'auto', 'predict_proba', 'decision_function'}, \
|
459 |
+
default='auto'
|
460 |
+
Specifies whether to use :term:`predict_proba` or
|
461 |
+
:term:`decision_function` as the target response. For regressors
|
462 |
+
this parameter is ignored and the response is always the output of
|
463 |
+
:term:`predict`. By default, :term:`predict_proba` is tried first
|
464 |
+
and we revert to :term:`decision_function` if it doesn't exist. If
|
465 |
+
``method`` is 'recursion', the response is always the output of
|
466 |
+
:term:`decision_function`.
|
467 |
+
|
468 |
+
percentiles : tuple of float, default=(0.05, 0.95)
|
469 |
+
The lower and upper percentile used to create the extreme values
|
470 |
+
for the grid. Must be in [0, 1].
|
471 |
+
|
472 |
+
grid_resolution : int, default=100
|
473 |
+
The number of equally spaced points on the grid, for each target
|
474 |
+
feature.
|
475 |
+
|
476 |
+
method : {'auto', 'recursion', 'brute'}, default='auto'
|
477 |
+
The method used to calculate the averaged predictions:
|
478 |
+
|
479 |
+
- `'recursion'` is only supported for some tree-based estimators
|
480 |
+
(namely
|
481 |
+
:class:`~sklearn.ensemble.GradientBoostingClassifier`,
|
482 |
+
:class:`~sklearn.ensemble.GradientBoostingRegressor`,
|
483 |
+
:class:`~sklearn.ensemble.HistGradientBoostingClassifier`,
|
484 |
+
:class:`~sklearn.ensemble.HistGradientBoostingRegressor`,
|
485 |
+
:class:`~sklearn.tree.DecisionTreeRegressor`,
|
486 |
+
:class:`~sklearn.ensemble.RandomForestRegressor`,
|
487 |
+
) when `kind='average'`.
|
488 |
+
This is more efficient in terms of speed.
|
489 |
+
With this method, the target response of a
|
490 |
+
classifier is always the decision function, not the predicted
|
491 |
+
probabilities. Since the `'recursion'` method implicitly computes
|
492 |
+
the average of the Individual Conditional Expectation (ICE) by
|
493 |
+
design, it is not compatible with ICE and thus `kind` must be
|
494 |
+
`'average'`.
|
495 |
+
|
496 |
+
- `'brute'` is supported for any estimator, but is more
|
497 |
+
computationally intensive.
|
498 |
+
|
499 |
+
- `'auto'`: the `'recursion'` is used for estimators that support it,
|
500 |
+
and `'brute'` is used otherwise. If `sample_weight` is not `None`,
|
501 |
+
then `'brute'` is used regardless of the estimator.
|
502 |
+
|
503 |
+
Please see :ref:`this note <pdp_method_differences>` for
|
504 |
+
differences between the `'brute'` and `'recursion'` method.
|
505 |
+
|
506 |
+
kind : {'average', 'individual', 'both'}, default='average'
|
507 |
+
Whether to return the partial dependence averaged across all the
|
508 |
+
samples in the dataset or one value per sample or both.
|
509 |
+
See Returns below.
|
510 |
+
|
511 |
+
Note that the fast `method='recursion'` option is only available for
|
512 |
+
`kind='average'` and `sample_weights=None`. Computing individual
|
513 |
+
dependencies and doing weighted averages requires using the slower
|
514 |
+
`method='brute'`.
|
515 |
+
|
516 |
+
.. versionadded:: 0.24
|
517 |
+
|
518 |
+
Returns
|
519 |
+
-------
|
520 |
+
predictions : :class:`~sklearn.utils.Bunch`
|
521 |
+
Dictionary-like object, with the following attributes.
|
522 |
+
|
523 |
+
individual : ndarray of shape (n_outputs, n_instances, \
|
524 |
+
len(values[0]), len(values[1]), ...)
|
525 |
+
The predictions for all the points in the grid for all
|
526 |
+
samples in X. This is also known as Individual
|
527 |
+
Conditional Expectation (ICE).
|
528 |
+
Only available when `kind='individual'` or `kind='both'`.
|
529 |
+
|
530 |
+
average : ndarray of shape (n_outputs, len(values[0]), \
|
531 |
+
len(values[1]), ...)
|
532 |
+
The predictions for all the points in the grid, averaged
|
533 |
+
over all samples in X (or over the training data if
|
534 |
+
`method` is 'recursion').
|
535 |
+
Only available when `kind='average'` or `kind='both'`.
|
536 |
+
|
537 |
+
values : seq of 1d ndarrays
|
538 |
+
The values with which the grid has been created.
|
539 |
+
|
540 |
+
.. deprecated:: 1.3
|
541 |
+
The key `values` has been deprecated in 1.3 and will be removed
|
542 |
+
in 1.5 in favor of `grid_values`. See `grid_values` for details
|
543 |
+
about the `values` attribute.
|
544 |
+
|
545 |
+
grid_values : seq of 1d ndarrays
|
546 |
+
The values with which the grid has been created. The generated
|
547 |
+
grid is a cartesian product of the arrays in `grid_values` where
|
548 |
+
`len(grid_values) == len(features)`. The size of each array
|
549 |
+
`grid_values[j]` is either `grid_resolution`, or the number of
|
550 |
+
unique values in `X[:, j]`, whichever is smaller.
|
551 |
+
|
552 |
+
.. versionadded:: 1.3
|
553 |
+
|
554 |
+
`n_outputs` corresponds to the number of classes in a multi-class
|
555 |
+
setting, or to the number of tasks for multi-output regression.
|
556 |
+
For classical regression and binary classification `n_outputs==1`.
|
557 |
+
`n_values_feature_j` corresponds to the size `grid_values[j]`.
|
558 |
+
|
559 |
+
See Also
|
560 |
+
--------
|
561 |
+
PartialDependenceDisplay.from_estimator : Plot Partial Dependence.
|
562 |
+
PartialDependenceDisplay : Partial Dependence visualization.
|
563 |
+
|
564 |
+
Examples
|
565 |
+
--------
|
566 |
+
>>> X = [[0, 0, 2], [1, 0, 0]]
|
567 |
+
>>> y = [0, 1]
|
568 |
+
>>> from sklearn.ensemble import GradientBoostingClassifier
|
569 |
+
>>> gb = GradientBoostingClassifier(random_state=0).fit(X, y)
|
570 |
+
>>> partial_dependence(gb, features=[0], X=X, percentiles=(0, 1),
|
571 |
+
... grid_resolution=2) # doctest: +SKIP
|
572 |
+
(array([[-4.52..., 4.52...]]), [array([ 0., 1.])])
|
573 |
+
"""
|
574 |
+
check_is_fitted(estimator)
|
575 |
+
|
576 |
+
if not (is_classifier(estimator) or is_regressor(estimator)):
|
577 |
+
raise ValueError("'estimator' must be a fitted regressor or classifier.")
|
578 |
+
|
579 |
+
if is_classifier(estimator) and isinstance(estimator.classes_[0], np.ndarray):
|
580 |
+
raise ValueError("Multiclass-multioutput estimators are not supported")
|
581 |
+
|
582 |
+
# Use check_array only on lists and other non-array-likes / sparse. Do not
|
583 |
+
# convert DataFrame into a NumPy array.
|
584 |
+
if not (hasattr(X, "__array__") or sparse.issparse(X)):
|
585 |
+
X = check_array(X, force_all_finite="allow-nan", dtype=object)
|
586 |
+
|
587 |
+
if is_regressor(estimator) and response_method != "auto":
|
588 |
+
raise ValueError(
|
589 |
+
"The response_method parameter is ignored for regressors and "
|
590 |
+
"must be 'auto'."
|
591 |
+
)
|
592 |
+
|
593 |
+
if kind != "average":
|
594 |
+
if method == "recursion":
|
595 |
+
raise ValueError(
|
596 |
+
"The 'recursion' method only applies when 'kind' is set to 'average'"
|
597 |
+
)
|
598 |
+
method = "brute"
|
599 |
+
|
600 |
+
if method == "recursion" and sample_weight is not None:
|
601 |
+
raise ValueError(
|
602 |
+
"The 'recursion' method can only be applied when sample_weight is None."
|
603 |
+
)
|
604 |
+
|
605 |
+
if method == "auto":
|
606 |
+
if sample_weight is not None:
|
607 |
+
method = "brute"
|
608 |
+
elif isinstance(estimator, BaseGradientBoosting) and estimator.init is None:
|
609 |
+
method = "recursion"
|
610 |
+
elif isinstance(
|
611 |
+
estimator,
|
612 |
+
(BaseHistGradientBoosting, DecisionTreeRegressor, RandomForestRegressor),
|
613 |
+
):
|
614 |
+
method = "recursion"
|
615 |
+
else:
|
616 |
+
method = "brute"
|
617 |
+
|
618 |
+
if method == "recursion":
|
619 |
+
if not isinstance(
|
620 |
+
estimator,
|
621 |
+
(
|
622 |
+
BaseGradientBoosting,
|
623 |
+
BaseHistGradientBoosting,
|
624 |
+
DecisionTreeRegressor,
|
625 |
+
RandomForestRegressor,
|
626 |
+
),
|
627 |
+
):
|
628 |
+
supported_classes_recursion = (
|
629 |
+
"GradientBoostingClassifier",
|
630 |
+
"GradientBoostingRegressor",
|
631 |
+
"HistGradientBoostingClassifier",
|
632 |
+
"HistGradientBoostingRegressor",
|
633 |
+
"HistGradientBoostingRegressor",
|
634 |
+
"DecisionTreeRegressor",
|
635 |
+
"RandomForestRegressor",
|
636 |
+
)
|
637 |
+
raise ValueError(
|
638 |
+
"Only the following estimators support the 'recursion' "
|
639 |
+
"method: {}. Try using method='brute'.".format(
|
640 |
+
", ".join(supported_classes_recursion)
|
641 |
+
)
|
642 |
+
)
|
643 |
+
if response_method == "auto":
|
644 |
+
response_method = "decision_function"
|
645 |
+
|
646 |
+
if response_method != "decision_function":
|
647 |
+
raise ValueError(
|
648 |
+
"With the 'recursion' method, the response_method must be "
|
649 |
+
"'decision_function'. Got {}.".format(response_method)
|
650 |
+
)
|
651 |
+
|
652 |
+
if sample_weight is not None:
|
653 |
+
sample_weight = _check_sample_weight(sample_weight, X)
|
654 |
+
|
655 |
+
if _determine_key_type(features, accept_slice=False) == "int":
|
656 |
+
# _get_column_indices() supports negative indexing. Here, we limit
|
657 |
+
# the indexing to be positive. The upper bound will be checked
|
658 |
+
# by _get_column_indices()
|
659 |
+
if np.any(np.less(features, 0)):
|
660 |
+
raise ValueError("all features must be in [0, {}]".format(X.shape[1] - 1))
|
661 |
+
|
662 |
+
features_indices = np.asarray(
|
663 |
+
_get_column_indices(X, features), dtype=np.int32, order="C"
|
664 |
+
).ravel()
|
665 |
+
|
666 |
+
feature_names = _check_feature_names(X, feature_names)
|
667 |
+
|
668 |
+
n_features = X.shape[1]
|
669 |
+
if categorical_features is None:
|
670 |
+
is_categorical = [False] * len(features_indices)
|
671 |
+
else:
|
672 |
+
categorical_features = np.asarray(categorical_features)
|
673 |
+
if categorical_features.dtype.kind == "b":
|
674 |
+
# categorical features provided as a list of boolean
|
675 |
+
if categorical_features.size != n_features:
|
676 |
+
raise ValueError(
|
677 |
+
"When `categorical_features` is a boolean array-like, "
|
678 |
+
"the array should be of shape (n_features,). Got "
|
679 |
+
f"{categorical_features.size} elements while `X` contains "
|
680 |
+
f"{n_features} features."
|
681 |
+
)
|
682 |
+
is_categorical = [categorical_features[idx] for idx in features_indices]
|
683 |
+
elif categorical_features.dtype.kind in ("i", "O", "U"):
|
684 |
+
# categorical features provided as a list of indices or feature names
|
685 |
+
categorical_features_idx = [
|
686 |
+
_get_feature_index(cat, feature_names=feature_names)
|
687 |
+
for cat in categorical_features
|
688 |
+
]
|
689 |
+
is_categorical = [
|
690 |
+
idx in categorical_features_idx for idx in features_indices
|
691 |
+
]
|
692 |
+
else:
|
693 |
+
raise ValueError(
|
694 |
+
"Expected `categorical_features` to be an array-like of boolean,"
|
695 |
+
f" integer, or string. Got {categorical_features.dtype} instead."
|
696 |
+
)
|
697 |
+
|
698 |
+
grid, values = _grid_from_X(
|
699 |
+
_safe_indexing(X, features_indices, axis=1),
|
700 |
+
percentiles,
|
701 |
+
is_categorical,
|
702 |
+
grid_resolution,
|
703 |
+
)
|
704 |
+
|
705 |
+
if method == "brute":
|
706 |
+
averaged_predictions, predictions = _partial_dependence_brute(
|
707 |
+
estimator, grid, features_indices, X, response_method, sample_weight
|
708 |
+
)
|
709 |
+
|
710 |
+
# reshape predictions to
|
711 |
+
# (n_outputs, n_instances, n_values_feature_0, n_values_feature_1, ...)
|
712 |
+
predictions = predictions.reshape(
|
713 |
+
-1, X.shape[0], *[val.shape[0] for val in values]
|
714 |
+
)
|
715 |
+
else:
|
716 |
+
averaged_predictions = _partial_dependence_recursion(
|
717 |
+
estimator, grid, features_indices
|
718 |
+
)
|
719 |
+
|
720 |
+
# reshape averaged_predictions to
|
721 |
+
# (n_outputs, n_values_feature_0, n_values_feature_1, ...)
|
722 |
+
averaged_predictions = averaged_predictions.reshape(
|
723 |
+
-1, *[val.shape[0] for val in values]
|
724 |
+
)
|
725 |
+
pdp_results = Bunch()
|
726 |
+
|
727 |
+
msg = (
|
728 |
+
"Key: 'values', is deprecated in 1.3 and will be removed in 1.5. "
|
729 |
+
"Please use 'grid_values' instead."
|
730 |
+
)
|
731 |
+
pdp_results._set_deprecated(
|
732 |
+
values, new_key="grid_values", deprecated_key="values", warning_message=msg
|
733 |
+
)
|
734 |
+
|
735 |
+
if kind == "average":
|
736 |
+
pdp_results["average"] = averaged_predictions
|
737 |
+
elif kind == "individual":
|
738 |
+
pdp_results["individual"] = predictions
|
739 |
+
else: # kind='both'
|
740 |
+
pdp_results["average"] = averaged_predictions
|
741 |
+
pdp_results["individual"] = predictions
|
742 |
+
|
743 |
+
return pdp_results
|
venv/lib/python3.10/site-packages/sklearn/inspection/_pd_utils.py
ADDED
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
def _check_feature_names(X, feature_names=None):
|
2 |
+
"""Check feature names.
|
3 |
+
|
4 |
+
Parameters
|
5 |
+
----------
|
6 |
+
X : array-like of shape (n_samples, n_features)
|
7 |
+
Input data.
|
8 |
+
|
9 |
+
feature_names : None or array-like of shape (n_names,), dtype=str
|
10 |
+
Feature names to check or `None`.
|
11 |
+
|
12 |
+
Returns
|
13 |
+
-------
|
14 |
+
feature_names : list of str
|
15 |
+
Feature names validated. If `feature_names` is `None`, then a list of
|
16 |
+
feature names is provided, i.e. the column names of a pandas dataframe
|
17 |
+
or a generic list of feature names (e.g. `["x0", "x1", ...]`) for a
|
18 |
+
NumPy array.
|
19 |
+
"""
|
20 |
+
if feature_names is None:
|
21 |
+
if hasattr(X, "columns") and hasattr(X.columns, "tolist"):
|
22 |
+
# get the column names for a pandas dataframe
|
23 |
+
feature_names = X.columns.tolist()
|
24 |
+
else:
|
25 |
+
# define a list of numbered indices for a numpy array
|
26 |
+
feature_names = [f"x{i}" for i in range(X.shape[1])]
|
27 |
+
elif hasattr(feature_names, "tolist"):
|
28 |
+
# convert numpy array or pandas index to a list
|
29 |
+
feature_names = feature_names.tolist()
|
30 |
+
if len(set(feature_names)) != len(feature_names):
|
31 |
+
raise ValueError("feature_names should not contain duplicates.")
|
32 |
+
|
33 |
+
return feature_names
|
34 |
+
|
35 |
+
|
36 |
+
def _get_feature_index(fx, feature_names=None):
|
37 |
+
"""Get feature index.
|
38 |
+
|
39 |
+
Parameters
|
40 |
+
----------
|
41 |
+
fx : int or str
|
42 |
+
Feature index or name.
|
43 |
+
|
44 |
+
feature_names : list of str, default=None
|
45 |
+
All feature names from which to search the indices.
|
46 |
+
|
47 |
+
Returns
|
48 |
+
-------
|
49 |
+
idx : int
|
50 |
+
Feature index.
|
51 |
+
"""
|
52 |
+
if isinstance(fx, str):
|
53 |
+
if feature_names is None:
|
54 |
+
raise ValueError(
|
55 |
+
f"Cannot plot partial dependence for feature {fx!r} since "
|
56 |
+
"the list of feature names was not provided, neither as "
|
57 |
+
"column names of a pandas data-frame nor via the feature_names "
|
58 |
+
"parameter."
|
59 |
+
)
|
60 |
+
try:
|
61 |
+
return feature_names.index(fx)
|
62 |
+
except ValueError as e:
|
63 |
+
raise ValueError(f"Feature {fx!r} not in feature_names") from e
|
64 |
+
return fx
|
venv/lib/python3.10/site-packages/sklearn/inspection/_permutation_importance.py
ADDED
@@ -0,0 +1,317 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Permutation importance for estimators."""
|
2 |
+
|
3 |
+
import numbers
|
4 |
+
|
5 |
+
import numpy as np
|
6 |
+
|
7 |
+
from ..ensemble._bagging import _generate_indices
|
8 |
+
from ..metrics import check_scoring, get_scorer_names
|
9 |
+
from ..metrics._scorer import _check_multimetric_scoring, _MultimetricScorer
|
10 |
+
from ..model_selection._validation import _aggregate_score_dicts
|
11 |
+
from ..utils import Bunch, _safe_indexing, check_array, check_random_state
|
12 |
+
from ..utils._param_validation import (
|
13 |
+
HasMethods,
|
14 |
+
Integral,
|
15 |
+
Interval,
|
16 |
+
RealNotInt,
|
17 |
+
StrOptions,
|
18 |
+
validate_params,
|
19 |
+
)
|
20 |
+
from ..utils.parallel import Parallel, delayed
|
21 |
+
|
22 |
+
|
23 |
+
def _weights_scorer(scorer, estimator, X, y, sample_weight):
|
24 |
+
if sample_weight is not None:
|
25 |
+
return scorer(estimator, X, y, sample_weight=sample_weight)
|
26 |
+
return scorer(estimator, X, y)
|
27 |
+
|
28 |
+
|
29 |
+
def _calculate_permutation_scores(
|
30 |
+
estimator,
|
31 |
+
X,
|
32 |
+
y,
|
33 |
+
sample_weight,
|
34 |
+
col_idx,
|
35 |
+
random_state,
|
36 |
+
n_repeats,
|
37 |
+
scorer,
|
38 |
+
max_samples,
|
39 |
+
):
|
40 |
+
"""Calculate score when `col_idx` is permuted."""
|
41 |
+
random_state = check_random_state(random_state)
|
42 |
+
|
43 |
+
# Work on a copy of X to ensure thread-safety in case of threading based
|
44 |
+
# parallelism. Furthermore, making a copy is also useful when the joblib
|
45 |
+
# backend is 'loky' (default) or the old 'multiprocessing': in those cases,
|
46 |
+
# if X is large it will be automatically be backed by a readonly memory map
|
47 |
+
# (memmap). X.copy() on the other hand is always guaranteed to return a
|
48 |
+
# writable data-structure whose columns can be shuffled inplace.
|
49 |
+
if max_samples < X.shape[0]:
|
50 |
+
row_indices = _generate_indices(
|
51 |
+
random_state=random_state,
|
52 |
+
bootstrap=False,
|
53 |
+
n_population=X.shape[0],
|
54 |
+
n_samples=max_samples,
|
55 |
+
)
|
56 |
+
X_permuted = _safe_indexing(X, row_indices, axis=0)
|
57 |
+
y = _safe_indexing(y, row_indices, axis=0)
|
58 |
+
if sample_weight is not None:
|
59 |
+
sample_weight = _safe_indexing(sample_weight, row_indices, axis=0)
|
60 |
+
else:
|
61 |
+
X_permuted = X.copy()
|
62 |
+
|
63 |
+
scores = []
|
64 |
+
shuffling_idx = np.arange(X_permuted.shape[0])
|
65 |
+
for _ in range(n_repeats):
|
66 |
+
random_state.shuffle(shuffling_idx)
|
67 |
+
if hasattr(X_permuted, "iloc"):
|
68 |
+
col = X_permuted.iloc[shuffling_idx, col_idx]
|
69 |
+
col.index = X_permuted.index
|
70 |
+
X_permuted[X_permuted.columns[col_idx]] = col
|
71 |
+
else:
|
72 |
+
X_permuted[:, col_idx] = X_permuted[shuffling_idx, col_idx]
|
73 |
+
scores.append(_weights_scorer(scorer, estimator, X_permuted, y, sample_weight))
|
74 |
+
|
75 |
+
if isinstance(scores[0], dict):
|
76 |
+
scores = _aggregate_score_dicts(scores)
|
77 |
+
else:
|
78 |
+
scores = np.array(scores)
|
79 |
+
|
80 |
+
return scores
|
81 |
+
|
82 |
+
|
83 |
+
def _create_importances_bunch(baseline_score, permuted_score):
|
84 |
+
"""Compute the importances as the decrease in score.
|
85 |
+
|
86 |
+
Parameters
|
87 |
+
----------
|
88 |
+
baseline_score : ndarray of shape (n_features,)
|
89 |
+
The baseline score without permutation.
|
90 |
+
permuted_score : ndarray of shape (n_features, n_repeats)
|
91 |
+
The permuted scores for the `n` repetitions.
|
92 |
+
|
93 |
+
Returns
|
94 |
+
-------
|
95 |
+
importances : :class:`~sklearn.utils.Bunch`
|
96 |
+
Dictionary-like object, with the following attributes.
|
97 |
+
importances_mean : ndarray, shape (n_features, )
|
98 |
+
Mean of feature importance over `n_repeats`.
|
99 |
+
importances_std : ndarray, shape (n_features, )
|
100 |
+
Standard deviation over `n_repeats`.
|
101 |
+
importances : ndarray, shape (n_features, n_repeats)
|
102 |
+
Raw permutation importance scores.
|
103 |
+
"""
|
104 |
+
importances = baseline_score - permuted_score
|
105 |
+
return Bunch(
|
106 |
+
importances_mean=np.mean(importances, axis=1),
|
107 |
+
importances_std=np.std(importances, axis=1),
|
108 |
+
importances=importances,
|
109 |
+
)
|
110 |
+
|
111 |
+
|
112 |
+
@validate_params(
|
113 |
+
{
|
114 |
+
"estimator": [HasMethods(["fit"])],
|
115 |
+
"X": ["array-like"],
|
116 |
+
"y": ["array-like", None],
|
117 |
+
"scoring": [
|
118 |
+
StrOptions(set(get_scorer_names())),
|
119 |
+
callable,
|
120 |
+
list,
|
121 |
+
tuple,
|
122 |
+
dict,
|
123 |
+
None,
|
124 |
+
],
|
125 |
+
"n_repeats": [Interval(Integral, 1, None, closed="left")],
|
126 |
+
"n_jobs": [Integral, None],
|
127 |
+
"random_state": ["random_state"],
|
128 |
+
"sample_weight": ["array-like", None],
|
129 |
+
"max_samples": [
|
130 |
+
Interval(Integral, 1, None, closed="left"),
|
131 |
+
Interval(RealNotInt, 0, 1, closed="right"),
|
132 |
+
],
|
133 |
+
},
|
134 |
+
prefer_skip_nested_validation=True,
|
135 |
+
)
|
136 |
+
def permutation_importance(
|
137 |
+
estimator,
|
138 |
+
X,
|
139 |
+
y,
|
140 |
+
*,
|
141 |
+
scoring=None,
|
142 |
+
n_repeats=5,
|
143 |
+
n_jobs=None,
|
144 |
+
random_state=None,
|
145 |
+
sample_weight=None,
|
146 |
+
max_samples=1.0,
|
147 |
+
):
|
148 |
+
"""Permutation importance for feature evaluation [BRE]_.
|
149 |
+
|
150 |
+
The :term:`estimator` is required to be a fitted estimator. `X` can be the
|
151 |
+
data set used to train the estimator or a hold-out set. The permutation
|
152 |
+
importance of a feature is calculated as follows. First, a baseline metric,
|
153 |
+
defined by :term:`scoring`, is evaluated on a (potentially different)
|
154 |
+
dataset defined by the `X`. Next, a feature column from the validation set
|
155 |
+
is permuted and the metric is evaluated again. The permutation importance
|
156 |
+
is defined to be the difference between the baseline metric and metric from
|
157 |
+
permutating the feature column.
|
158 |
+
|
159 |
+
Read more in the :ref:`User Guide <permutation_importance>`.
|
160 |
+
|
161 |
+
Parameters
|
162 |
+
----------
|
163 |
+
estimator : object
|
164 |
+
An estimator that has already been :term:`fitted` and is compatible
|
165 |
+
with :term:`scorer`.
|
166 |
+
|
167 |
+
X : ndarray or DataFrame, shape (n_samples, n_features)
|
168 |
+
Data on which permutation importance will be computed.
|
169 |
+
|
170 |
+
y : array-like or None, shape (n_samples, ) or (n_samples, n_classes)
|
171 |
+
Targets for supervised or `None` for unsupervised.
|
172 |
+
|
173 |
+
scoring : str, callable, list, tuple, or dict, default=None
|
174 |
+
Scorer to use.
|
175 |
+
If `scoring` represents a single score, one can use:
|
176 |
+
|
177 |
+
- a single string (see :ref:`scoring_parameter`);
|
178 |
+
- a callable (see :ref:`scoring`) that returns a single value.
|
179 |
+
|
180 |
+
If `scoring` represents multiple scores, one can use:
|
181 |
+
|
182 |
+
- a list or tuple of unique strings;
|
183 |
+
- a callable returning a dictionary where the keys are the metric
|
184 |
+
names and the values are the metric scores;
|
185 |
+
- a dictionary with metric names as keys and callables a values.
|
186 |
+
|
187 |
+
Passing multiple scores to `scoring` is more efficient than calling
|
188 |
+
`permutation_importance` for each of the scores as it reuses
|
189 |
+
predictions to avoid redundant computation.
|
190 |
+
|
191 |
+
If None, the estimator's default scorer is used.
|
192 |
+
|
193 |
+
n_repeats : int, default=5
|
194 |
+
Number of times to permute a feature.
|
195 |
+
|
196 |
+
n_jobs : int or None, default=None
|
197 |
+
Number of jobs to run in parallel. The computation is done by computing
|
198 |
+
permutation score for each columns and parallelized over the columns.
|
199 |
+
`None` means 1 unless in a :obj:`joblib.parallel_backend` context.
|
200 |
+
`-1` means using all processors. See :term:`Glossary <n_jobs>`
|
201 |
+
for more details.
|
202 |
+
|
203 |
+
random_state : int, RandomState instance, default=None
|
204 |
+
Pseudo-random number generator to control the permutations of each
|
205 |
+
feature.
|
206 |
+
Pass an int to get reproducible results across function calls.
|
207 |
+
See :term:`Glossary <random_state>`.
|
208 |
+
|
209 |
+
sample_weight : array-like of shape (n_samples,), default=None
|
210 |
+
Sample weights used in scoring.
|
211 |
+
|
212 |
+
.. versionadded:: 0.24
|
213 |
+
|
214 |
+
max_samples : int or float, default=1.0
|
215 |
+
The number of samples to draw from X to compute feature importance
|
216 |
+
in each repeat (without replacement).
|
217 |
+
|
218 |
+
- If int, then draw `max_samples` samples.
|
219 |
+
- If float, then draw `max_samples * X.shape[0]` samples.
|
220 |
+
- If `max_samples` is equal to `1.0` or `X.shape[0]`, all samples
|
221 |
+
will be used.
|
222 |
+
|
223 |
+
While using this option may provide less accurate importance estimates,
|
224 |
+
it keeps the method tractable when evaluating feature importance on
|
225 |
+
large datasets. In combination with `n_repeats`, this allows to control
|
226 |
+
the computational speed vs statistical accuracy trade-off of this method.
|
227 |
+
|
228 |
+
.. versionadded:: 1.0
|
229 |
+
|
230 |
+
Returns
|
231 |
+
-------
|
232 |
+
result : :class:`~sklearn.utils.Bunch` or dict of such instances
|
233 |
+
Dictionary-like object, with the following attributes.
|
234 |
+
|
235 |
+
importances_mean : ndarray of shape (n_features, )
|
236 |
+
Mean of feature importance over `n_repeats`.
|
237 |
+
importances_std : ndarray of shape (n_features, )
|
238 |
+
Standard deviation over `n_repeats`.
|
239 |
+
importances : ndarray of shape (n_features, n_repeats)
|
240 |
+
Raw permutation importance scores.
|
241 |
+
|
242 |
+
If there are multiple scoring metrics in the scoring parameter
|
243 |
+
`result` is a dict with scorer names as keys (e.g. 'roc_auc') and
|
244 |
+
`Bunch` objects like above as values.
|
245 |
+
|
246 |
+
References
|
247 |
+
----------
|
248 |
+
.. [BRE] :doi:`L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32,
|
249 |
+
2001. <10.1023/A:1010933404324>`
|
250 |
+
|
251 |
+
Examples
|
252 |
+
--------
|
253 |
+
>>> from sklearn.linear_model import LogisticRegression
|
254 |
+
>>> from sklearn.inspection import permutation_importance
|
255 |
+
>>> X = [[1, 9, 9],[1, 9, 9],[1, 9, 9],
|
256 |
+
... [0, 9, 9],[0, 9, 9],[0, 9, 9]]
|
257 |
+
>>> y = [1, 1, 1, 0, 0, 0]
|
258 |
+
>>> clf = LogisticRegression().fit(X, y)
|
259 |
+
>>> result = permutation_importance(clf, X, y, n_repeats=10,
|
260 |
+
... random_state=0)
|
261 |
+
>>> result.importances_mean
|
262 |
+
array([0.4666..., 0. , 0. ])
|
263 |
+
>>> result.importances_std
|
264 |
+
array([0.2211..., 0. , 0. ])
|
265 |
+
"""
|
266 |
+
if not hasattr(X, "iloc"):
|
267 |
+
X = check_array(X, force_all_finite="allow-nan", dtype=None)
|
268 |
+
|
269 |
+
# Precompute random seed from the random state to be used
|
270 |
+
# to get a fresh independent RandomState instance for each
|
271 |
+
# parallel call to _calculate_permutation_scores, irrespective of
|
272 |
+
# the fact that variables are shared or not depending on the active
|
273 |
+
# joblib backend (sequential, thread-based or process-based).
|
274 |
+
random_state = check_random_state(random_state)
|
275 |
+
random_seed = random_state.randint(np.iinfo(np.int32).max + 1)
|
276 |
+
|
277 |
+
if not isinstance(max_samples, numbers.Integral):
|
278 |
+
max_samples = int(max_samples * X.shape[0])
|
279 |
+
elif max_samples > X.shape[0]:
|
280 |
+
raise ValueError("max_samples must be <= n_samples")
|
281 |
+
|
282 |
+
if callable(scoring):
|
283 |
+
scorer = scoring
|
284 |
+
elif scoring is None or isinstance(scoring, str):
|
285 |
+
scorer = check_scoring(estimator, scoring=scoring)
|
286 |
+
else:
|
287 |
+
scorers_dict = _check_multimetric_scoring(estimator, scoring)
|
288 |
+
scorer = _MultimetricScorer(scorers=scorers_dict)
|
289 |
+
|
290 |
+
baseline_score = _weights_scorer(scorer, estimator, X, y, sample_weight)
|
291 |
+
|
292 |
+
scores = Parallel(n_jobs=n_jobs)(
|
293 |
+
delayed(_calculate_permutation_scores)(
|
294 |
+
estimator,
|
295 |
+
X,
|
296 |
+
y,
|
297 |
+
sample_weight,
|
298 |
+
col_idx,
|
299 |
+
random_seed,
|
300 |
+
n_repeats,
|
301 |
+
scorer,
|
302 |
+
max_samples,
|
303 |
+
)
|
304 |
+
for col_idx in range(X.shape[1])
|
305 |
+
)
|
306 |
+
|
307 |
+
if isinstance(baseline_score, dict):
|
308 |
+
return {
|
309 |
+
name: _create_importances_bunch(
|
310 |
+
baseline_score[name],
|
311 |
+
# unpack the permuted scores
|
312 |
+
np.array([scores[col_idx][name] for col_idx in range(X.shape[1])]),
|
313 |
+
)
|
314 |
+
for name in baseline_score
|
315 |
+
}
|
316 |
+
else:
|
317 |
+
return _create_importances_bunch(baseline_score, np.array(scores))
|
venv/lib/python3.10/site-packages/sklearn/inspection/_plot/__init__.py
ADDED
File without changes
|
venv/lib/python3.10/site-packages/sklearn/inspection/_plot/decision_boundary.py
ADDED
@@ -0,0 +1,406 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
|
3 |
+
from ...base import is_regressor
|
4 |
+
from ...preprocessing import LabelEncoder
|
5 |
+
from ...utils import _safe_indexing, check_matplotlib_support
|
6 |
+
from ...utils._response import _get_response_values
|
7 |
+
from ...utils.validation import (
|
8 |
+
_is_arraylike_not_scalar,
|
9 |
+
_num_features,
|
10 |
+
check_is_fitted,
|
11 |
+
)
|
12 |
+
|
13 |
+
|
14 |
+
def _check_boundary_response_method(estimator, response_method, class_of_interest):
|
15 |
+
"""Validate the response methods to be used with the fitted estimator.
|
16 |
+
|
17 |
+
Parameters
|
18 |
+
----------
|
19 |
+
estimator : object
|
20 |
+
Fitted estimator to check.
|
21 |
+
|
22 |
+
response_method : {'auto', 'predict_proba', 'decision_function', 'predict'}
|
23 |
+
Specifies whether to use :term:`predict_proba`,
|
24 |
+
:term:`decision_function`, :term:`predict` as the target response.
|
25 |
+
If set to 'auto', the response method is tried in the following order:
|
26 |
+
:term:`decision_function`, :term:`predict_proba`, :term:`predict`.
|
27 |
+
|
28 |
+
class_of_interest : int, float, bool, str or None
|
29 |
+
The class considered when plotting the decision. If the label is specified, it
|
30 |
+
is then possible to plot the decision boundary in multiclass settings.
|
31 |
+
|
32 |
+
.. versionadded:: 1.4
|
33 |
+
|
34 |
+
Returns
|
35 |
+
-------
|
36 |
+
prediction_method : list of str or str
|
37 |
+
The name or list of names of the response methods to use.
|
38 |
+
"""
|
39 |
+
has_classes = hasattr(estimator, "classes_")
|
40 |
+
if has_classes and _is_arraylike_not_scalar(estimator.classes_[0]):
|
41 |
+
msg = "Multi-label and multi-output multi-class classifiers are not supported"
|
42 |
+
raise ValueError(msg)
|
43 |
+
|
44 |
+
if has_classes and len(estimator.classes_) > 2:
|
45 |
+
if response_method not in {"auto", "predict"} and class_of_interest is None:
|
46 |
+
msg = (
|
47 |
+
"Multiclass classifiers are only supported when `response_method` is "
|
48 |
+
"'predict' or 'auto'. Else you must provide `class_of_interest` to "
|
49 |
+
"plot the decision boundary of a specific class."
|
50 |
+
)
|
51 |
+
raise ValueError(msg)
|
52 |
+
prediction_method = "predict" if response_method == "auto" else response_method
|
53 |
+
elif response_method == "auto":
|
54 |
+
if is_regressor(estimator):
|
55 |
+
prediction_method = "predict"
|
56 |
+
else:
|
57 |
+
prediction_method = ["decision_function", "predict_proba", "predict"]
|
58 |
+
else:
|
59 |
+
prediction_method = response_method
|
60 |
+
|
61 |
+
return prediction_method
|
62 |
+
|
63 |
+
|
64 |
+
class DecisionBoundaryDisplay:
|
65 |
+
"""Decisions boundary visualization.
|
66 |
+
|
67 |
+
It is recommended to use
|
68 |
+
:func:`~sklearn.inspection.DecisionBoundaryDisplay.from_estimator`
|
69 |
+
to create a :class:`DecisionBoundaryDisplay`. All parameters are stored as
|
70 |
+
attributes.
|
71 |
+
|
72 |
+
Read more in the :ref:`User Guide <visualizations>`.
|
73 |
+
|
74 |
+
.. versionadded:: 1.1
|
75 |
+
|
76 |
+
Parameters
|
77 |
+
----------
|
78 |
+
xx0 : ndarray of shape (grid_resolution, grid_resolution)
|
79 |
+
First output of :func:`meshgrid <numpy.meshgrid>`.
|
80 |
+
|
81 |
+
xx1 : ndarray of shape (grid_resolution, grid_resolution)
|
82 |
+
Second output of :func:`meshgrid <numpy.meshgrid>`.
|
83 |
+
|
84 |
+
response : ndarray of shape (grid_resolution, grid_resolution)
|
85 |
+
Values of the response function.
|
86 |
+
|
87 |
+
xlabel : str, default=None
|
88 |
+
Default label to place on x axis.
|
89 |
+
|
90 |
+
ylabel : str, default=None
|
91 |
+
Default label to place on y axis.
|
92 |
+
|
93 |
+
Attributes
|
94 |
+
----------
|
95 |
+
surface_ : matplotlib `QuadContourSet` or `QuadMesh`
|
96 |
+
If `plot_method` is 'contour' or 'contourf', `surface_` is a
|
97 |
+
:class:`QuadContourSet <matplotlib.contour.QuadContourSet>`. If
|
98 |
+
`plot_method` is 'pcolormesh', `surface_` is a
|
99 |
+
:class:`QuadMesh <matplotlib.collections.QuadMesh>`.
|
100 |
+
|
101 |
+
ax_ : matplotlib Axes
|
102 |
+
Axes with decision boundary.
|
103 |
+
|
104 |
+
figure_ : matplotlib Figure
|
105 |
+
Figure containing the decision boundary.
|
106 |
+
|
107 |
+
See Also
|
108 |
+
--------
|
109 |
+
DecisionBoundaryDisplay.from_estimator : Plot decision boundary given an estimator.
|
110 |
+
|
111 |
+
Examples
|
112 |
+
--------
|
113 |
+
>>> import matplotlib.pyplot as plt
|
114 |
+
>>> import numpy as np
|
115 |
+
>>> from sklearn.datasets import load_iris
|
116 |
+
>>> from sklearn.inspection import DecisionBoundaryDisplay
|
117 |
+
>>> from sklearn.tree import DecisionTreeClassifier
|
118 |
+
>>> iris = load_iris()
|
119 |
+
>>> feature_1, feature_2 = np.meshgrid(
|
120 |
+
... np.linspace(iris.data[:, 0].min(), iris.data[:, 0].max()),
|
121 |
+
... np.linspace(iris.data[:, 1].min(), iris.data[:, 1].max())
|
122 |
+
... )
|
123 |
+
>>> grid = np.vstack([feature_1.ravel(), feature_2.ravel()]).T
|
124 |
+
>>> tree = DecisionTreeClassifier().fit(iris.data[:, :2], iris.target)
|
125 |
+
>>> y_pred = np.reshape(tree.predict(grid), feature_1.shape)
|
126 |
+
>>> display = DecisionBoundaryDisplay(
|
127 |
+
... xx0=feature_1, xx1=feature_2, response=y_pred
|
128 |
+
... )
|
129 |
+
>>> display.plot()
|
130 |
+
<...>
|
131 |
+
>>> display.ax_.scatter(
|
132 |
+
... iris.data[:, 0], iris.data[:, 1], c=iris.target, edgecolor="black"
|
133 |
+
... )
|
134 |
+
<...>
|
135 |
+
>>> plt.show()
|
136 |
+
"""
|
137 |
+
|
138 |
+
def __init__(self, *, xx0, xx1, response, xlabel=None, ylabel=None):
|
139 |
+
self.xx0 = xx0
|
140 |
+
self.xx1 = xx1
|
141 |
+
self.response = response
|
142 |
+
self.xlabel = xlabel
|
143 |
+
self.ylabel = ylabel
|
144 |
+
|
145 |
+
def plot(self, plot_method="contourf", ax=None, xlabel=None, ylabel=None, **kwargs):
|
146 |
+
"""Plot visualization.
|
147 |
+
|
148 |
+
Parameters
|
149 |
+
----------
|
150 |
+
plot_method : {'contourf', 'contour', 'pcolormesh'}, default='contourf'
|
151 |
+
Plotting method to call when plotting the response. Please refer
|
152 |
+
to the following matplotlib documentation for details:
|
153 |
+
:func:`contourf <matplotlib.pyplot.contourf>`,
|
154 |
+
:func:`contour <matplotlib.pyplot.contour>`,
|
155 |
+
:func:`pcolormesh <matplotlib.pyplot.pcolormesh>`.
|
156 |
+
|
157 |
+
ax : Matplotlib axes, default=None
|
158 |
+
Axes object to plot on. If `None`, a new figure and axes is
|
159 |
+
created.
|
160 |
+
|
161 |
+
xlabel : str, default=None
|
162 |
+
Overwrite the x-axis label.
|
163 |
+
|
164 |
+
ylabel : str, default=None
|
165 |
+
Overwrite the y-axis label.
|
166 |
+
|
167 |
+
**kwargs : dict
|
168 |
+
Additional keyword arguments to be passed to the `plot_method`.
|
169 |
+
|
170 |
+
Returns
|
171 |
+
-------
|
172 |
+
display: :class:`~sklearn.inspection.DecisionBoundaryDisplay`
|
173 |
+
Object that stores computed values.
|
174 |
+
"""
|
175 |
+
check_matplotlib_support("DecisionBoundaryDisplay.plot")
|
176 |
+
import matplotlib.pyplot as plt # noqa
|
177 |
+
|
178 |
+
if plot_method not in ("contourf", "contour", "pcolormesh"):
|
179 |
+
raise ValueError(
|
180 |
+
"plot_method must be 'contourf', 'contour', or 'pcolormesh'"
|
181 |
+
)
|
182 |
+
|
183 |
+
if ax is None:
|
184 |
+
_, ax = plt.subplots()
|
185 |
+
|
186 |
+
plot_func = getattr(ax, plot_method)
|
187 |
+
self.surface_ = plot_func(self.xx0, self.xx1, self.response, **kwargs)
|
188 |
+
|
189 |
+
if xlabel is not None or not ax.get_xlabel():
|
190 |
+
xlabel = self.xlabel if xlabel is None else xlabel
|
191 |
+
ax.set_xlabel(xlabel)
|
192 |
+
if ylabel is not None or not ax.get_ylabel():
|
193 |
+
ylabel = self.ylabel if ylabel is None else ylabel
|
194 |
+
ax.set_ylabel(ylabel)
|
195 |
+
|
196 |
+
self.ax_ = ax
|
197 |
+
self.figure_ = ax.figure
|
198 |
+
return self
|
199 |
+
|
200 |
+
@classmethod
|
201 |
+
def from_estimator(
|
202 |
+
cls,
|
203 |
+
estimator,
|
204 |
+
X,
|
205 |
+
*,
|
206 |
+
grid_resolution=100,
|
207 |
+
eps=1.0,
|
208 |
+
plot_method="contourf",
|
209 |
+
response_method="auto",
|
210 |
+
class_of_interest=None,
|
211 |
+
xlabel=None,
|
212 |
+
ylabel=None,
|
213 |
+
ax=None,
|
214 |
+
**kwargs,
|
215 |
+
):
|
216 |
+
"""Plot decision boundary given an estimator.
|
217 |
+
|
218 |
+
Read more in the :ref:`User Guide <visualizations>`.
|
219 |
+
|
220 |
+
Parameters
|
221 |
+
----------
|
222 |
+
estimator : object
|
223 |
+
Trained estimator used to plot the decision boundary.
|
224 |
+
|
225 |
+
X : {array-like, sparse matrix, dataframe} of shape (n_samples, 2)
|
226 |
+
Input data that should be only 2-dimensional.
|
227 |
+
|
228 |
+
grid_resolution : int, default=100
|
229 |
+
Number of grid points to use for plotting decision boundary.
|
230 |
+
Higher values will make the plot look nicer but be slower to
|
231 |
+
render.
|
232 |
+
|
233 |
+
eps : float, default=1.0
|
234 |
+
Extends the minimum and maximum values of X for evaluating the
|
235 |
+
response function.
|
236 |
+
|
237 |
+
plot_method : {'contourf', 'contour', 'pcolormesh'}, default='contourf'
|
238 |
+
Plotting method to call when plotting the response. Please refer
|
239 |
+
to the following matplotlib documentation for details:
|
240 |
+
:func:`contourf <matplotlib.pyplot.contourf>`,
|
241 |
+
:func:`contour <matplotlib.pyplot.contour>`,
|
242 |
+
:func:`pcolormesh <matplotlib.pyplot.pcolormesh>`.
|
243 |
+
|
244 |
+
response_method : {'auto', 'predict_proba', 'decision_function', \
|
245 |
+
'predict'}, default='auto'
|
246 |
+
Specifies whether to use :term:`predict_proba`,
|
247 |
+
:term:`decision_function`, :term:`predict` as the target response.
|
248 |
+
If set to 'auto', the response method is tried in the following order:
|
249 |
+
:term:`decision_function`, :term:`predict_proba`, :term:`predict`.
|
250 |
+
For multiclass problems, :term:`predict` is selected when
|
251 |
+
`response_method="auto"`.
|
252 |
+
|
253 |
+
class_of_interest : int, float, bool or str, default=None
|
254 |
+
The class considered when plotting the decision. If None,
|
255 |
+
`estimator.classes_[1]` is considered as the positive class
|
256 |
+
for binary classifiers. For multiclass classifiers, passing
|
257 |
+
an explicit value for `class_of_interest` is mandatory.
|
258 |
+
|
259 |
+
.. versionadded:: 1.4
|
260 |
+
|
261 |
+
xlabel : str, default=None
|
262 |
+
The label used for the x-axis. If `None`, an attempt is made to
|
263 |
+
extract a label from `X` if it is a dataframe, otherwise an empty
|
264 |
+
string is used.
|
265 |
+
|
266 |
+
ylabel : str, default=None
|
267 |
+
The label used for the y-axis. If `None`, an attempt is made to
|
268 |
+
extract a label from `X` if it is a dataframe, otherwise an empty
|
269 |
+
string is used.
|
270 |
+
|
271 |
+
ax : Matplotlib axes, default=None
|
272 |
+
Axes object to plot on. If `None`, a new figure and axes is
|
273 |
+
created.
|
274 |
+
|
275 |
+
**kwargs : dict
|
276 |
+
Additional keyword arguments to be passed to the
|
277 |
+
`plot_method`.
|
278 |
+
|
279 |
+
Returns
|
280 |
+
-------
|
281 |
+
display : :class:`~sklearn.inspection.DecisionBoundaryDisplay`
|
282 |
+
Object that stores the result.
|
283 |
+
|
284 |
+
See Also
|
285 |
+
--------
|
286 |
+
DecisionBoundaryDisplay : Decision boundary visualization.
|
287 |
+
sklearn.metrics.ConfusionMatrixDisplay.from_estimator : Plot the
|
288 |
+
confusion matrix given an estimator, the data, and the label.
|
289 |
+
sklearn.metrics.ConfusionMatrixDisplay.from_predictions : Plot the
|
290 |
+
confusion matrix given the true and predicted labels.
|
291 |
+
|
292 |
+
Examples
|
293 |
+
--------
|
294 |
+
>>> import matplotlib.pyplot as plt
|
295 |
+
>>> from sklearn.datasets import load_iris
|
296 |
+
>>> from sklearn.linear_model import LogisticRegression
|
297 |
+
>>> from sklearn.inspection import DecisionBoundaryDisplay
|
298 |
+
>>> iris = load_iris()
|
299 |
+
>>> X = iris.data[:, :2]
|
300 |
+
>>> classifier = LogisticRegression().fit(X, iris.target)
|
301 |
+
>>> disp = DecisionBoundaryDisplay.from_estimator(
|
302 |
+
... classifier, X, response_method="predict",
|
303 |
+
... xlabel=iris.feature_names[0], ylabel=iris.feature_names[1],
|
304 |
+
... alpha=0.5,
|
305 |
+
... )
|
306 |
+
>>> disp.ax_.scatter(X[:, 0], X[:, 1], c=iris.target, edgecolor="k")
|
307 |
+
<...>
|
308 |
+
>>> plt.show()
|
309 |
+
"""
|
310 |
+
check_matplotlib_support(f"{cls.__name__}.from_estimator")
|
311 |
+
check_is_fitted(estimator)
|
312 |
+
|
313 |
+
if not grid_resolution > 1:
|
314 |
+
raise ValueError(
|
315 |
+
"grid_resolution must be greater than 1. Got"
|
316 |
+
f" {grid_resolution} instead."
|
317 |
+
)
|
318 |
+
|
319 |
+
if not eps >= 0:
|
320 |
+
raise ValueError(
|
321 |
+
f"eps must be greater than or equal to 0. Got {eps} instead."
|
322 |
+
)
|
323 |
+
|
324 |
+
possible_plot_methods = ("contourf", "contour", "pcolormesh")
|
325 |
+
if plot_method not in possible_plot_methods:
|
326 |
+
available_methods = ", ".join(possible_plot_methods)
|
327 |
+
raise ValueError(
|
328 |
+
f"plot_method must be one of {available_methods}. "
|
329 |
+
f"Got {plot_method} instead."
|
330 |
+
)
|
331 |
+
|
332 |
+
num_features = _num_features(X)
|
333 |
+
if num_features != 2:
|
334 |
+
raise ValueError(
|
335 |
+
f"n_features must be equal to 2. Got {num_features} instead."
|
336 |
+
)
|
337 |
+
|
338 |
+
x0, x1 = _safe_indexing(X, 0, axis=1), _safe_indexing(X, 1, axis=1)
|
339 |
+
|
340 |
+
x0_min, x0_max = x0.min() - eps, x0.max() + eps
|
341 |
+
x1_min, x1_max = x1.min() - eps, x1.max() + eps
|
342 |
+
|
343 |
+
xx0, xx1 = np.meshgrid(
|
344 |
+
np.linspace(x0_min, x0_max, grid_resolution),
|
345 |
+
np.linspace(x1_min, x1_max, grid_resolution),
|
346 |
+
)
|
347 |
+
if hasattr(X, "iloc"):
|
348 |
+
# we need to preserve the feature names and therefore get an empty dataframe
|
349 |
+
X_grid = X.iloc[[], :].copy()
|
350 |
+
X_grid.iloc[:, 0] = xx0.ravel()
|
351 |
+
X_grid.iloc[:, 1] = xx1.ravel()
|
352 |
+
else:
|
353 |
+
X_grid = np.c_[xx0.ravel(), xx1.ravel()]
|
354 |
+
|
355 |
+
prediction_method = _check_boundary_response_method(
|
356 |
+
estimator, response_method, class_of_interest
|
357 |
+
)
|
358 |
+
try:
|
359 |
+
response, _, response_method_used = _get_response_values(
|
360 |
+
estimator,
|
361 |
+
X_grid,
|
362 |
+
response_method=prediction_method,
|
363 |
+
pos_label=class_of_interest,
|
364 |
+
return_response_method_used=True,
|
365 |
+
)
|
366 |
+
except ValueError as exc:
|
367 |
+
if "is not a valid label" in str(exc):
|
368 |
+
# re-raise a more informative error message since `pos_label` is unknown
|
369 |
+
# to our user when interacting with
|
370 |
+
# `DecisionBoundaryDisplay.from_estimator`
|
371 |
+
raise ValueError(
|
372 |
+
f"class_of_interest={class_of_interest} is not a valid label: It "
|
373 |
+
f"should be one of {estimator.classes_}"
|
374 |
+
) from exc
|
375 |
+
raise
|
376 |
+
|
377 |
+
# convert classes predictions into integers
|
378 |
+
if response_method_used == "predict" and hasattr(estimator, "classes_"):
|
379 |
+
encoder = LabelEncoder()
|
380 |
+
encoder.classes_ = estimator.classes_
|
381 |
+
response = encoder.transform(response)
|
382 |
+
|
383 |
+
if response.ndim != 1:
|
384 |
+
if is_regressor(estimator):
|
385 |
+
raise ValueError("Multi-output regressors are not supported")
|
386 |
+
|
387 |
+
# For the multiclass case, `_get_response_values` returns the response
|
388 |
+
# as-is. Thus, we have a column per class and we need to select the column
|
389 |
+
# corresponding to the positive class.
|
390 |
+
col_idx = np.flatnonzero(estimator.classes_ == class_of_interest)[0]
|
391 |
+
response = response[:, col_idx]
|
392 |
+
|
393 |
+
if xlabel is None:
|
394 |
+
xlabel = X.columns[0] if hasattr(X, "columns") else ""
|
395 |
+
|
396 |
+
if ylabel is None:
|
397 |
+
ylabel = X.columns[1] if hasattr(X, "columns") else ""
|
398 |
+
|
399 |
+
display = cls(
|
400 |
+
xx0=xx0,
|
401 |
+
xx1=xx1,
|
402 |
+
response=response.reshape(xx0.shape),
|
403 |
+
xlabel=xlabel,
|
404 |
+
ylabel=ylabel,
|
405 |
+
)
|
406 |
+
return display.plot(ax=ax, plot_method=plot_method, **kwargs)
|
venv/lib/python3.10/site-packages/sklearn/inspection/_plot/partial_dependence.py
ADDED
@@ -0,0 +1,1473 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numbers
|
2 |
+
from itertools import chain
|
3 |
+
from math import ceil
|
4 |
+
|
5 |
+
import numpy as np
|
6 |
+
from scipy import sparse
|
7 |
+
from scipy.stats.mstats import mquantiles
|
8 |
+
|
9 |
+
from ...base import is_regressor
|
10 |
+
from ...utils import (
|
11 |
+
Bunch,
|
12 |
+
_safe_indexing,
|
13 |
+
check_array,
|
14 |
+
check_matplotlib_support, # noqa
|
15 |
+
check_random_state,
|
16 |
+
)
|
17 |
+
from ...utils._encode import _unique
|
18 |
+
from ...utils.parallel import Parallel, delayed
|
19 |
+
from .. import partial_dependence
|
20 |
+
from .._pd_utils import _check_feature_names, _get_feature_index
|
21 |
+
|
22 |
+
|
23 |
+
class PartialDependenceDisplay:
|
24 |
+
"""Partial Dependence Plot (PDP).
|
25 |
+
|
26 |
+
This can also display individual partial dependencies which are often
|
27 |
+
referred to as: Individual Condition Expectation (ICE).
|
28 |
+
|
29 |
+
It is recommended to use
|
30 |
+
:func:`~sklearn.inspection.PartialDependenceDisplay.from_estimator` to create a
|
31 |
+
:class:`~sklearn.inspection.PartialDependenceDisplay`. All parameters are
|
32 |
+
stored as attributes.
|
33 |
+
|
34 |
+
Read more in
|
35 |
+
:ref:`sphx_glr_auto_examples_miscellaneous_plot_partial_dependence_visualization_api.py`
|
36 |
+
and the :ref:`User Guide <partial_dependence>`.
|
37 |
+
|
38 |
+
.. versionadded:: 0.22
|
39 |
+
|
40 |
+
Parameters
|
41 |
+
----------
|
42 |
+
pd_results : list of Bunch
|
43 |
+
Results of :func:`~sklearn.inspection.partial_dependence` for
|
44 |
+
``features``.
|
45 |
+
|
46 |
+
features : list of (int,) or list of (int, int)
|
47 |
+
Indices of features for a given plot. A tuple of one integer will plot
|
48 |
+
a partial dependence curve of one feature. A tuple of two integers will
|
49 |
+
plot a two-way partial dependence curve as a contour plot.
|
50 |
+
|
51 |
+
feature_names : list of str
|
52 |
+
Feature names corresponding to the indices in ``features``.
|
53 |
+
|
54 |
+
target_idx : int
|
55 |
+
|
56 |
+
- In a multiclass setting, specifies the class for which the PDPs
|
57 |
+
should be computed. Note that for binary classification, the
|
58 |
+
positive class (index 1) is always used.
|
59 |
+
- In a multioutput setting, specifies the task for which the PDPs
|
60 |
+
should be computed.
|
61 |
+
|
62 |
+
Ignored in binary classification or classical regression settings.
|
63 |
+
|
64 |
+
deciles : dict
|
65 |
+
Deciles for feature indices in ``features``.
|
66 |
+
|
67 |
+
kind : {'average', 'individual', 'both'} or list of such str, \
|
68 |
+
default='average'
|
69 |
+
Whether to plot the partial dependence averaged across all the samples
|
70 |
+
in the dataset or one line per sample or both.
|
71 |
+
|
72 |
+
- ``kind='average'`` results in the traditional PD plot;
|
73 |
+
- ``kind='individual'`` results in the ICE plot;
|
74 |
+
- ``kind='both'`` results in plotting both the ICE and PD on the same
|
75 |
+
plot.
|
76 |
+
|
77 |
+
A list of such strings can be provided to specify `kind` on a per-plot
|
78 |
+
basis. The length of the list should be the same as the number of
|
79 |
+
interaction requested in `features`.
|
80 |
+
|
81 |
+
.. note::
|
82 |
+
ICE ('individual' or 'both') is not a valid option for 2-ways
|
83 |
+
interactions plot. As a result, an error will be raised.
|
84 |
+
2-ways interaction plots should always be configured to
|
85 |
+
use the 'average' kind instead.
|
86 |
+
|
87 |
+
.. note::
|
88 |
+
The fast ``method='recursion'`` option is only available for
|
89 |
+
`kind='average'` and `sample_weights=None`. Computing individual
|
90 |
+
dependencies and doing weighted averages requires using the slower
|
91 |
+
`method='brute'`.
|
92 |
+
|
93 |
+
.. versionadded:: 0.24
|
94 |
+
Add `kind` parameter with `'average'`, `'individual'`, and `'both'`
|
95 |
+
options.
|
96 |
+
|
97 |
+
.. versionadded:: 1.1
|
98 |
+
Add the possibility to pass a list of string specifying `kind`
|
99 |
+
for each plot.
|
100 |
+
|
101 |
+
subsample : float, int or None, default=1000
|
102 |
+
Sampling for ICE curves when `kind` is 'individual' or 'both'.
|
103 |
+
If float, should be between 0.0 and 1.0 and represent the proportion
|
104 |
+
of the dataset to be used to plot ICE curves. If int, represents the
|
105 |
+
maximum absolute number of samples to use.
|
106 |
+
|
107 |
+
Note that the full dataset is still used to calculate partial
|
108 |
+
dependence when `kind='both'`.
|
109 |
+
|
110 |
+
.. versionadded:: 0.24
|
111 |
+
|
112 |
+
random_state : int, RandomState instance or None, default=None
|
113 |
+
Controls the randomness of the selected samples when subsamples is not
|
114 |
+
`None`. See :term:`Glossary <random_state>` for details.
|
115 |
+
|
116 |
+
.. versionadded:: 0.24
|
117 |
+
|
118 |
+
is_categorical : list of (bool,) or list of (bool, bool), default=None
|
119 |
+
Whether each target feature in `features` is categorical or not.
|
120 |
+
The list should be same size as `features`. If `None`, all features
|
121 |
+
are assumed to be continuous.
|
122 |
+
|
123 |
+
.. versionadded:: 1.2
|
124 |
+
|
125 |
+
Attributes
|
126 |
+
----------
|
127 |
+
bounding_ax_ : matplotlib Axes or None
|
128 |
+
If `ax` is an axes or None, the `bounding_ax_` is the axes where the
|
129 |
+
grid of partial dependence plots are drawn. If `ax` is a list of axes
|
130 |
+
or a numpy array of axes, `bounding_ax_` is None.
|
131 |
+
|
132 |
+
axes_ : ndarray of matplotlib Axes
|
133 |
+
If `ax` is an axes or None, `axes_[i, j]` is the axes on the i-th row
|
134 |
+
and j-th column. If `ax` is a list of axes, `axes_[i]` is the i-th item
|
135 |
+
in `ax`. Elements that are None correspond to a nonexisting axes in
|
136 |
+
that position.
|
137 |
+
|
138 |
+
lines_ : ndarray of matplotlib Artists
|
139 |
+
If `ax` is an axes or None, `lines_[i, j]` is the partial dependence
|
140 |
+
curve on the i-th row and j-th column. If `ax` is a list of axes,
|
141 |
+
`lines_[i]` is the partial dependence curve corresponding to the i-th
|
142 |
+
item in `ax`. Elements that are None correspond to a nonexisting axes
|
143 |
+
or an axes that does not include a line plot.
|
144 |
+
|
145 |
+
deciles_vlines_ : ndarray of matplotlib LineCollection
|
146 |
+
If `ax` is an axes or None, `vlines_[i, j]` is the line collection
|
147 |
+
representing the x axis deciles of the i-th row and j-th column. If
|
148 |
+
`ax` is a list of axes, `vlines_[i]` corresponds to the i-th item in
|
149 |
+
`ax`. Elements that are None correspond to a nonexisting axes or an
|
150 |
+
axes that does not include a PDP plot.
|
151 |
+
|
152 |
+
.. versionadded:: 0.23
|
153 |
+
|
154 |
+
deciles_hlines_ : ndarray of matplotlib LineCollection
|
155 |
+
If `ax` is an axes or None, `vlines_[i, j]` is the line collection
|
156 |
+
representing the y axis deciles of the i-th row and j-th column. If
|
157 |
+
`ax` is a list of axes, `vlines_[i]` corresponds to the i-th item in
|
158 |
+
`ax`. Elements that are None correspond to a nonexisting axes or an
|
159 |
+
axes that does not include a 2-way plot.
|
160 |
+
|
161 |
+
.. versionadded:: 0.23
|
162 |
+
|
163 |
+
contours_ : ndarray of matplotlib Artists
|
164 |
+
If `ax` is an axes or None, `contours_[i, j]` is the partial dependence
|
165 |
+
plot on the i-th row and j-th column. If `ax` is a list of axes,
|
166 |
+
`contours_[i]` is the partial dependence plot corresponding to the i-th
|
167 |
+
item in `ax`. Elements that are None correspond to a nonexisting axes
|
168 |
+
or an axes that does not include a contour plot.
|
169 |
+
|
170 |
+
bars_ : ndarray of matplotlib Artists
|
171 |
+
If `ax` is an axes or None, `bars_[i, j]` is the partial dependence bar
|
172 |
+
plot on the i-th row and j-th column (for a categorical feature).
|
173 |
+
If `ax` is a list of axes, `bars_[i]` is the partial dependence bar
|
174 |
+
plot corresponding to the i-th item in `ax`. Elements that are None
|
175 |
+
correspond to a nonexisting axes or an axes that does not include a
|
176 |
+
bar plot.
|
177 |
+
|
178 |
+
.. versionadded:: 1.2
|
179 |
+
|
180 |
+
heatmaps_ : ndarray of matplotlib Artists
|
181 |
+
If `ax` is an axes or None, `heatmaps_[i, j]` is the partial dependence
|
182 |
+
heatmap on the i-th row and j-th column (for a pair of categorical
|
183 |
+
features) . If `ax` is a list of axes, `heatmaps_[i]` is the partial
|
184 |
+
dependence heatmap corresponding to the i-th item in `ax`. Elements
|
185 |
+
that are None correspond to a nonexisting axes or an axes that does not
|
186 |
+
include a heatmap.
|
187 |
+
|
188 |
+
.. versionadded:: 1.2
|
189 |
+
|
190 |
+
figure_ : matplotlib Figure
|
191 |
+
Figure containing partial dependence plots.
|
192 |
+
|
193 |
+
See Also
|
194 |
+
--------
|
195 |
+
partial_dependence : Compute Partial Dependence values.
|
196 |
+
PartialDependenceDisplay.from_estimator : Plot Partial Dependence.
|
197 |
+
|
198 |
+
Examples
|
199 |
+
--------
|
200 |
+
>>> import numpy as np
|
201 |
+
>>> import matplotlib.pyplot as plt
|
202 |
+
>>> from sklearn.datasets import make_friedman1
|
203 |
+
>>> from sklearn.ensemble import GradientBoostingRegressor
|
204 |
+
>>> from sklearn.inspection import PartialDependenceDisplay
|
205 |
+
>>> from sklearn.inspection import partial_dependence
|
206 |
+
>>> X, y = make_friedman1()
|
207 |
+
>>> clf = GradientBoostingRegressor(n_estimators=10).fit(X, y)
|
208 |
+
>>> features, feature_names = [(0,)], [f"Features #{i}" for i in range(X.shape[1])]
|
209 |
+
>>> deciles = {0: np.linspace(0, 1, num=5)}
|
210 |
+
>>> pd_results = partial_dependence(
|
211 |
+
... clf, X, features=0, kind="average", grid_resolution=5)
|
212 |
+
>>> display = PartialDependenceDisplay(
|
213 |
+
... [pd_results], features=features, feature_names=feature_names,
|
214 |
+
... target_idx=0, deciles=deciles
|
215 |
+
... )
|
216 |
+
>>> display.plot(pdp_lim={1: (-1.38, 0.66)})
|
217 |
+
<...>
|
218 |
+
>>> plt.show()
|
219 |
+
"""
|
220 |
+
|
221 |
+
def __init__(
|
222 |
+
self,
|
223 |
+
pd_results,
|
224 |
+
*,
|
225 |
+
features,
|
226 |
+
feature_names,
|
227 |
+
target_idx,
|
228 |
+
deciles,
|
229 |
+
kind="average",
|
230 |
+
subsample=1000,
|
231 |
+
random_state=None,
|
232 |
+
is_categorical=None,
|
233 |
+
):
|
234 |
+
self.pd_results = pd_results
|
235 |
+
self.features = features
|
236 |
+
self.feature_names = feature_names
|
237 |
+
self.target_idx = target_idx
|
238 |
+
self.deciles = deciles
|
239 |
+
self.kind = kind
|
240 |
+
self.subsample = subsample
|
241 |
+
self.random_state = random_state
|
242 |
+
self.is_categorical = is_categorical
|
243 |
+
|
244 |
+
@classmethod
|
245 |
+
def from_estimator(
|
246 |
+
cls,
|
247 |
+
estimator,
|
248 |
+
X,
|
249 |
+
features,
|
250 |
+
*,
|
251 |
+
sample_weight=None,
|
252 |
+
categorical_features=None,
|
253 |
+
feature_names=None,
|
254 |
+
target=None,
|
255 |
+
response_method="auto",
|
256 |
+
n_cols=3,
|
257 |
+
grid_resolution=100,
|
258 |
+
percentiles=(0.05, 0.95),
|
259 |
+
method="auto",
|
260 |
+
n_jobs=None,
|
261 |
+
verbose=0,
|
262 |
+
line_kw=None,
|
263 |
+
ice_lines_kw=None,
|
264 |
+
pd_line_kw=None,
|
265 |
+
contour_kw=None,
|
266 |
+
ax=None,
|
267 |
+
kind="average",
|
268 |
+
centered=False,
|
269 |
+
subsample=1000,
|
270 |
+
random_state=None,
|
271 |
+
):
|
272 |
+
"""Partial dependence (PD) and individual conditional expectation (ICE) plots.
|
273 |
+
|
274 |
+
Partial dependence plots, individual conditional expectation plots or an
|
275 |
+
overlay of both of them can be plotted by setting the ``kind``
|
276 |
+
parameter. The ``len(features)`` plots are arranged in a grid with
|
277 |
+
``n_cols`` columns. Two-way partial dependence plots are plotted as
|
278 |
+
contour plots. The deciles of the feature values will be shown with tick
|
279 |
+
marks on the x-axes for one-way plots, and on both axes for two-way
|
280 |
+
plots.
|
281 |
+
|
282 |
+
Read more in the :ref:`User Guide <partial_dependence>`.
|
283 |
+
|
284 |
+
.. note::
|
285 |
+
|
286 |
+
:func:`PartialDependenceDisplay.from_estimator` does not support using the
|
287 |
+
same axes with multiple calls. To plot the partial dependence for
|
288 |
+
multiple estimators, please pass the axes created by the first call to the
|
289 |
+
second call::
|
290 |
+
|
291 |
+
>>> from sklearn.inspection import PartialDependenceDisplay
|
292 |
+
>>> from sklearn.datasets import make_friedman1
|
293 |
+
>>> from sklearn.linear_model import LinearRegression
|
294 |
+
>>> from sklearn.ensemble import RandomForestRegressor
|
295 |
+
>>> X, y = make_friedman1()
|
296 |
+
>>> est1 = LinearRegression().fit(X, y)
|
297 |
+
>>> est2 = RandomForestRegressor().fit(X, y)
|
298 |
+
>>> disp1 = PartialDependenceDisplay.from_estimator(est1, X,
|
299 |
+
... [1, 2])
|
300 |
+
>>> disp2 = PartialDependenceDisplay.from_estimator(est2, X, [1, 2],
|
301 |
+
... ax=disp1.axes_)
|
302 |
+
|
303 |
+
.. warning::
|
304 |
+
|
305 |
+
For :class:`~sklearn.ensemble.GradientBoostingClassifier` and
|
306 |
+
:class:`~sklearn.ensemble.GradientBoostingRegressor`, the
|
307 |
+
`'recursion'` method (used by default) will not account for the `init`
|
308 |
+
predictor of the boosting process. In practice, this will produce
|
309 |
+
the same values as `'brute'` up to a constant offset in the target
|
310 |
+
response, provided that `init` is a constant estimator (which is the
|
311 |
+
default). However, if `init` is not a constant estimator, the
|
312 |
+
partial dependence values are incorrect for `'recursion'` because the
|
313 |
+
offset will be sample-dependent. It is preferable to use the `'brute'`
|
314 |
+
method. Note that this only applies to
|
315 |
+
:class:`~sklearn.ensemble.GradientBoostingClassifier` and
|
316 |
+
:class:`~sklearn.ensemble.GradientBoostingRegressor`, not to
|
317 |
+
:class:`~sklearn.ensemble.HistGradientBoostingClassifier` and
|
318 |
+
:class:`~sklearn.ensemble.HistGradientBoostingRegressor`.
|
319 |
+
|
320 |
+
.. versionadded:: 1.0
|
321 |
+
|
322 |
+
Parameters
|
323 |
+
----------
|
324 |
+
estimator : BaseEstimator
|
325 |
+
A fitted estimator object implementing :term:`predict`,
|
326 |
+
:term:`predict_proba`, or :term:`decision_function`.
|
327 |
+
Multioutput-multiclass classifiers are not supported.
|
328 |
+
|
329 |
+
X : {array-like, dataframe} of shape (n_samples, n_features)
|
330 |
+
``X`` is used to generate a grid of values for the target
|
331 |
+
``features`` (where the partial dependence will be evaluated), and
|
332 |
+
also to generate values for the complement features when the
|
333 |
+
`method` is `'brute'`.
|
334 |
+
|
335 |
+
features : list of {int, str, pair of int, pair of str}
|
336 |
+
The target features for which to create the PDPs.
|
337 |
+
If `features[i]` is an integer or a string, a one-way PDP is created;
|
338 |
+
if `features[i]` is a tuple, a two-way PDP is created (only supported
|
339 |
+
with `kind='average'`). Each tuple must be of size 2.
|
340 |
+
If any entry is a string, then it must be in ``feature_names``.
|
341 |
+
|
342 |
+
sample_weight : array-like of shape (n_samples,), default=None
|
343 |
+
Sample weights are used to calculate weighted means when averaging the
|
344 |
+
model output. If `None`, then samples are equally weighted. If
|
345 |
+
`sample_weight` is not `None`, then `method` will be set to `'brute'`.
|
346 |
+
Note that `sample_weight` is ignored for `kind='individual'`.
|
347 |
+
|
348 |
+
.. versionadded:: 1.3
|
349 |
+
|
350 |
+
categorical_features : array-like of shape (n_features,) or shape \
|
351 |
+
(n_categorical_features,), dtype={bool, int, str}, default=None
|
352 |
+
Indicates the categorical features.
|
353 |
+
|
354 |
+
- `None`: no feature will be considered categorical;
|
355 |
+
- boolean array-like: boolean mask of shape `(n_features,)`
|
356 |
+
indicating which features are categorical. Thus, this array has
|
357 |
+
the same shape has `X.shape[1]`;
|
358 |
+
- integer or string array-like: integer indices or strings
|
359 |
+
indicating categorical features.
|
360 |
+
|
361 |
+
.. versionadded:: 1.2
|
362 |
+
|
363 |
+
feature_names : array-like of shape (n_features,), dtype=str, default=None
|
364 |
+
Name of each feature; `feature_names[i]` holds the name of the feature
|
365 |
+
with index `i`.
|
366 |
+
By default, the name of the feature corresponds to their numerical
|
367 |
+
index for NumPy array and their column name for pandas dataframe.
|
368 |
+
|
369 |
+
target : int, default=None
|
370 |
+
- In a multiclass setting, specifies the class for which the PDPs
|
371 |
+
should be computed. Note that for binary classification, the
|
372 |
+
positive class (index 1) is always used.
|
373 |
+
- In a multioutput setting, specifies the task for which the PDPs
|
374 |
+
should be computed.
|
375 |
+
|
376 |
+
Ignored in binary classification or classical regression settings.
|
377 |
+
|
378 |
+
response_method : {'auto', 'predict_proba', 'decision_function'}, \
|
379 |
+
default='auto'
|
380 |
+
Specifies whether to use :term:`predict_proba` or
|
381 |
+
:term:`decision_function` as the target response. For regressors
|
382 |
+
this parameter is ignored and the response is always the output of
|
383 |
+
:term:`predict`. By default, :term:`predict_proba` is tried first
|
384 |
+
and we revert to :term:`decision_function` if it doesn't exist. If
|
385 |
+
``method`` is `'recursion'`, the response is always the output of
|
386 |
+
:term:`decision_function`.
|
387 |
+
|
388 |
+
n_cols : int, default=3
|
389 |
+
The maximum number of columns in the grid plot. Only active when `ax`
|
390 |
+
is a single axis or `None`.
|
391 |
+
|
392 |
+
grid_resolution : int, default=100
|
393 |
+
The number of equally spaced points on the axes of the plots, for each
|
394 |
+
target feature.
|
395 |
+
|
396 |
+
percentiles : tuple of float, default=(0.05, 0.95)
|
397 |
+
The lower and upper percentile used to create the extreme values
|
398 |
+
for the PDP axes. Must be in [0, 1].
|
399 |
+
|
400 |
+
method : str, default='auto'
|
401 |
+
The method used to calculate the averaged predictions:
|
402 |
+
|
403 |
+
- `'recursion'` is only supported for some tree-based estimators
|
404 |
+
(namely
|
405 |
+
:class:`~sklearn.ensemble.GradientBoostingClassifier`,
|
406 |
+
:class:`~sklearn.ensemble.GradientBoostingRegressor`,
|
407 |
+
:class:`~sklearn.ensemble.HistGradientBoostingClassifier`,
|
408 |
+
:class:`~sklearn.ensemble.HistGradientBoostingRegressor`,
|
409 |
+
:class:`~sklearn.tree.DecisionTreeRegressor`,
|
410 |
+
:class:`~sklearn.ensemble.RandomForestRegressor`
|
411 |
+
but is more efficient in terms of speed.
|
412 |
+
With this method, the target response of a
|
413 |
+
classifier is always the decision function, not the predicted
|
414 |
+
probabilities. Since the `'recursion'` method implicitly computes
|
415 |
+
the average of the ICEs by design, it is not compatible with ICE and
|
416 |
+
thus `kind` must be `'average'`.
|
417 |
+
|
418 |
+
- `'brute'` is supported for any estimator, but is more
|
419 |
+
computationally intensive.
|
420 |
+
|
421 |
+
- `'auto'`: the `'recursion'` is used for estimators that support it,
|
422 |
+
and `'brute'` is used otherwise. If `sample_weight` is not `None`,
|
423 |
+
then `'brute'` is used regardless of the estimator.
|
424 |
+
|
425 |
+
Please see :ref:`this note <pdp_method_differences>` for
|
426 |
+
differences between the `'brute'` and `'recursion'` method.
|
427 |
+
|
428 |
+
n_jobs : int, default=None
|
429 |
+
The number of CPUs to use to compute the partial dependences.
|
430 |
+
Computation is parallelized over features specified by the `features`
|
431 |
+
parameter.
|
432 |
+
|
433 |
+
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
|
434 |
+
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
|
435 |
+
for more details.
|
436 |
+
|
437 |
+
verbose : int, default=0
|
438 |
+
Verbose output during PD computations.
|
439 |
+
|
440 |
+
line_kw : dict, default=None
|
441 |
+
Dict with keywords passed to the ``matplotlib.pyplot.plot`` call.
|
442 |
+
For one-way partial dependence plots. It can be used to define common
|
443 |
+
properties for both `ice_lines_kw` and `pdp_line_kw`.
|
444 |
+
|
445 |
+
ice_lines_kw : dict, default=None
|
446 |
+
Dictionary with keywords passed to the `matplotlib.pyplot.plot` call.
|
447 |
+
For ICE lines in the one-way partial dependence plots.
|
448 |
+
The key value pairs defined in `ice_lines_kw` takes priority over
|
449 |
+
`line_kw`.
|
450 |
+
|
451 |
+
pd_line_kw : dict, default=None
|
452 |
+
Dictionary with keywords passed to the `matplotlib.pyplot.plot` call.
|
453 |
+
For partial dependence in one-way partial dependence plots.
|
454 |
+
The key value pairs defined in `pd_line_kw` takes priority over
|
455 |
+
`line_kw`.
|
456 |
+
|
457 |
+
contour_kw : dict, default=None
|
458 |
+
Dict with keywords passed to the ``matplotlib.pyplot.contourf`` call.
|
459 |
+
For two-way partial dependence plots.
|
460 |
+
|
461 |
+
ax : Matplotlib axes or array-like of Matplotlib axes, default=None
|
462 |
+
- If a single axis is passed in, it is treated as a bounding axes
|
463 |
+
and a grid of partial dependence plots will be drawn within
|
464 |
+
these bounds. The `n_cols` parameter controls the number of
|
465 |
+
columns in the grid.
|
466 |
+
- If an array-like of axes are passed in, the partial dependence
|
467 |
+
plots will be drawn directly into these axes.
|
468 |
+
- If `None`, a figure and a bounding axes is created and treated
|
469 |
+
as the single axes case.
|
470 |
+
|
471 |
+
kind : {'average', 'individual', 'both'}, default='average'
|
472 |
+
Whether to plot the partial dependence averaged across all the samples
|
473 |
+
in the dataset or one line per sample or both.
|
474 |
+
|
475 |
+
- ``kind='average'`` results in the traditional PD plot;
|
476 |
+
- ``kind='individual'`` results in the ICE plot.
|
477 |
+
|
478 |
+
Note that the fast `method='recursion'` option is only available for
|
479 |
+
`kind='average'` and `sample_weights=None`. Computing individual
|
480 |
+
dependencies and doing weighted averages requires using the slower
|
481 |
+
`method='brute'`.
|
482 |
+
|
483 |
+
centered : bool, default=False
|
484 |
+
If `True`, the ICE and PD lines will start at the origin of the
|
485 |
+
y-axis. By default, no centering is done.
|
486 |
+
|
487 |
+
.. versionadded:: 1.1
|
488 |
+
|
489 |
+
subsample : float, int or None, default=1000
|
490 |
+
Sampling for ICE curves when `kind` is 'individual' or 'both'.
|
491 |
+
If `float`, should be between 0.0 and 1.0 and represent the proportion
|
492 |
+
of the dataset to be used to plot ICE curves. If `int`, represents the
|
493 |
+
absolute number samples to use.
|
494 |
+
|
495 |
+
Note that the full dataset is still used to calculate averaged partial
|
496 |
+
dependence when `kind='both'`.
|
497 |
+
|
498 |
+
random_state : int, RandomState instance or None, default=None
|
499 |
+
Controls the randomness of the selected samples when subsamples is not
|
500 |
+
`None` and `kind` is either `'both'` or `'individual'`.
|
501 |
+
See :term:`Glossary <random_state>` for details.
|
502 |
+
|
503 |
+
Returns
|
504 |
+
-------
|
505 |
+
display : :class:`~sklearn.inspection.PartialDependenceDisplay`
|
506 |
+
|
507 |
+
See Also
|
508 |
+
--------
|
509 |
+
partial_dependence : Compute Partial Dependence values.
|
510 |
+
|
511 |
+
Examples
|
512 |
+
--------
|
513 |
+
>>> import matplotlib.pyplot as plt
|
514 |
+
>>> from sklearn.datasets import make_friedman1
|
515 |
+
>>> from sklearn.ensemble import GradientBoostingRegressor
|
516 |
+
>>> from sklearn.inspection import PartialDependenceDisplay
|
517 |
+
>>> X, y = make_friedman1()
|
518 |
+
>>> clf = GradientBoostingRegressor(n_estimators=10).fit(X, y)
|
519 |
+
>>> PartialDependenceDisplay.from_estimator(clf, X, [0, (0, 1)])
|
520 |
+
<...>
|
521 |
+
>>> plt.show()
|
522 |
+
"""
|
523 |
+
check_matplotlib_support(f"{cls.__name__}.from_estimator") # noqa
|
524 |
+
import matplotlib.pyplot as plt # noqa
|
525 |
+
|
526 |
+
# set target_idx for multi-class estimators
|
527 |
+
if hasattr(estimator, "classes_") and np.size(estimator.classes_) > 2:
|
528 |
+
if target is None:
|
529 |
+
raise ValueError("target must be specified for multi-class")
|
530 |
+
target_idx = np.searchsorted(estimator.classes_, target)
|
531 |
+
if (
|
532 |
+
not (0 <= target_idx < len(estimator.classes_))
|
533 |
+
or estimator.classes_[target_idx] != target
|
534 |
+
):
|
535 |
+
raise ValueError("target not in est.classes_, got {}".format(target))
|
536 |
+
else:
|
537 |
+
# regression and binary classification
|
538 |
+
target_idx = 0
|
539 |
+
|
540 |
+
# Use check_array only on lists and other non-array-likes / sparse. Do not
|
541 |
+
# convert DataFrame into a NumPy array.
|
542 |
+
if not (hasattr(X, "__array__") or sparse.issparse(X)):
|
543 |
+
X = check_array(X, force_all_finite="allow-nan", dtype=object)
|
544 |
+
n_features = X.shape[1]
|
545 |
+
|
546 |
+
feature_names = _check_feature_names(X, feature_names)
|
547 |
+
# expand kind to always be a list of str
|
548 |
+
kind_ = [kind] * len(features) if isinstance(kind, str) else kind
|
549 |
+
if len(kind_) != len(features):
|
550 |
+
raise ValueError(
|
551 |
+
"When `kind` is provided as a list of strings, it should contain "
|
552 |
+
f"as many elements as `features`. `kind` contains {len(kind_)} "
|
553 |
+
f"element(s) and `features` contains {len(features)} element(s)."
|
554 |
+
)
|
555 |
+
|
556 |
+
# convert features into a seq of int tuples
|
557 |
+
tmp_features, ice_for_two_way_pd = [], []
|
558 |
+
for kind_plot, fxs in zip(kind_, features):
|
559 |
+
if isinstance(fxs, (numbers.Integral, str)):
|
560 |
+
fxs = (fxs,)
|
561 |
+
try:
|
562 |
+
fxs = tuple(
|
563 |
+
_get_feature_index(fx, feature_names=feature_names) for fx in fxs
|
564 |
+
)
|
565 |
+
except TypeError as e:
|
566 |
+
raise ValueError(
|
567 |
+
"Each entry in features must be either an int, "
|
568 |
+
"a string, or an iterable of size at most 2."
|
569 |
+
) from e
|
570 |
+
if not 1 <= np.size(fxs) <= 2:
|
571 |
+
raise ValueError(
|
572 |
+
"Each entry in features must be either an int, "
|
573 |
+
"a string, or an iterable of size at most 2."
|
574 |
+
)
|
575 |
+
# store the information if 2-way PD was requested with ICE to later
|
576 |
+
# raise a ValueError with an exhaustive list of problematic
|
577 |
+
# settings.
|
578 |
+
ice_for_two_way_pd.append(kind_plot != "average" and np.size(fxs) > 1)
|
579 |
+
|
580 |
+
tmp_features.append(fxs)
|
581 |
+
|
582 |
+
if any(ice_for_two_way_pd):
|
583 |
+
# raise an error and be specific regarding the parameter values
|
584 |
+
# when 1- and 2-way PD were requested
|
585 |
+
kind_ = [
|
586 |
+
"average" if forcing_average else kind_plot
|
587 |
+
for forcing_average, kind_plot in zip(ice_for_two_way_pd, kind_)
|
588 |
+
]
|
589 |
+
raise ValueError(
|
590 |
+
"ICE plot cannot be rendered for 2-way feature interactions. "
|
591 |
+
"2-way feature interactions mandates PD plots using the "
|
592 |
+
"'average' kind: "
|
593 |
+
f"features={features!r} should be configured to use "
|
594 |
+
f"kind={kind_!r} explicitly."
|
595 |
+
)
|
596 |
+
features = tmp_features
|
597 |
+
|
598 |
+
if categorical_features is None:
|
599 |
+
is_categorical = [
|
600 |
+
(False,) if len(fxs) == 1 else (False, False) for fxs in features
|
601 |
+
]
|
602 |
+
else:
|
603 |
+
# we need to create a boolean indicator of which features are
|
604 |
+
# categorical from the categorical_features list.
|
605 |
+
categorical_features = np.asarray(categorical_features)
|
606 |
+
if categorical_features.dtype.kind == "b":
|
607 |
+
# categorical features provided as a list of boolean
|
608 |
+
if categorical_features.size != n_features:
|
609 |
+
raise ValueError(
|
610 |
+
"When `categorical_features` is a boolean array-like, "
|
611 |
+
"the array should be of shape (n_features,). Got "
|
612 |
+
f"{categorical_features.size} elements while `X` contains "
|
613 |
+
f"{n_features} features."
|
614 |
+
)
|
615 |
+
is_categorical = [
|
616 |
+
tuple(categorical_features[fx] for fx in fxs) for fxs in features
|
617 |
+
]
|
618 |
+
elif categorical_features.dtype.kind in ("i", "O", "U"):
|
619 |
+
# categorical features provided as a list of indices or feature names
|
620 |
+
categorical_features_idx = [
|
621 |
+
_get_feature_index(cat, feature_names=feature_names)
|
622 |
+
for cat in categorical_features
|
623 |
+
]
|
624 |
+
is_categorical = [
|
625 |
+
tuple([idx in categorical_features_idx for idx in fxs])
|
626 |
+
for fxs in features
|
627 |
+
]
|
628 |
+
else:
|
629 |
+
raise ValueError(
|
630 |
+
"Expected `categorical_features` to be an array-like of boolean,"
|
631 |
+
f" integer, or string. Got {categorical_features.dtype} instead."
|
632 |
+
)
|
633 |
+
|
634 |
+
for cats in is_categorical:
|
635 |
+
if np.size(cats) == 2 and (cats[0] != cats[1]):
|
636 |
+
raise ValueError(
|
637 |
+
"Two-way partial dependence plots are not supported for pairs"
|
638 |
+
" of continuous and categorical features."
|
639 |
+
)
|
640 |
+
|
641 |
+
# collect the indices of the categorical features targeted by the partial
|
642 |
+
# dependence computation
|
643 |
+
categorical_features_targeted = set(
|
644 |
+
[
|
645 |
+
fx
|
646 |
+
for fxs, cats in zip(features, is_categorical)
|
647 |
+
for fx in fxs
|
648 |
+
if any(cats)
|
649 |
+
]
|
650 |
+
)
|
651 |
+
if categorical_features_targeted:
|
652 |
+
min_n_cats = min(
|
653 |
+
[
|
654 |
+
len(_unique(_safe_indexing(X, idx, axis=1)))
|
655 |
+
for idx in categorical_features_targeted
|
656 |
+
]
|
657 |
+
)
|
658 |
+
if grid_resolution < min_n_cats:
|
659 |
+
raise ValueError(
|
660 |
+
"The resolution of the computed grid is less than the "
|
661 |
+
"minimum number of categories in the targeted categorical "
|
662 |
+
"features. Expect the `grid_resolution` to be greater than "
|
663 |
+
f"{min_n_cats}. Got {grid_resolution} instead."
|
664 |
+
)
|
665 |
+
|
666 |
+
for is_cat, kind_plot in zip(is_categorical, kind_):
|
667 |
+
if any(is_cat) and kind_plot != "average":
|
668 |
+
raise ValueError(
|
669 |
+
"It is not possible to display individual effects for"
|
670 |
+
" categorical features."
|
671 |
+
)
|
672 |
+
|
673 |
+
# Early exit if the axes does not have the correct number of axes
|
674 |
+
if ax is not None and not isinstance(ax, plt.Axes):
|
675 |
+
axes = np.asarray(ax, dtype=object)
|
676 |
+
if axes.size != len(features):
|
677 |
+
raise ValueError(
|
678 |
+
"Expected ax to have {} axes, got {}".format(
|
679 |
+
len(features), axes.size
|
680 |
+
)
|
681 |
+
)
|
682 |
+
|
683 |
+
for i in chain.from_iterable(features):
|
684 |
+
if i >= len(feature_names):
|
685 |
+
raise ValueError(
|
686 |
+
"All entries of features must be less than "
|
687 |
+
"len(feature_names) = {0}, got {1}.".format(len(feature_names), i)
|
688 |
+
)
|
689 |
+
|
690 |
+
if isinstance(subsample, numbers.Integral):
|
691 |
+
if subsample <= 0:
|
692 |
+
raise ValueError(
|
693 |
+
f"When an integer, subsample={subsample} should be positive."
|
694 |
+
)
|
695 |
+
elif isinstance(subsample, numbers.Real):
|
696 |
+
if subsample <= 0 or subsample >= 1:
|
697 |
+
raise ValueError(
|
698 |
+
f"When a floating-point, subsample={subsample} should be in "
|
699 |
+
"the (0, 1) range."
|
700 |
+
)
|
701 |
+
|
702 |
+
# compute predictions and/or averaged predictions
|
703 |
+
pd_results = Parallel(n_jobs=n_jobs, verbose=verbose)(
|
704 |
+
delayed(partial_dependence)(
|
705 |
+
estimator,
|
706 |
+
X,
|
707 |
+
fxs,
|
708 |
+
sample_weight=sample_weight,
|
709 |
+
feature_names=feature_names,
|
710 |
+
categorical_features=categorical_features,
|
711 |
+
response_method=response_method,
|
712 |
+
method=method,
|
713 |
+
grid_resolution=grid_resolution,
|
714 |
+
percentiles=percentiles,
|
715 |
+
kind=kind_plot,
|
716 |
+
)
|
717 |
+
for kind_plot, fxs in zip(kind_, features)
|
718 |
+
)
|
719 |
+
|
720 |
+
# For multioutput regression, we can only check the validity of target
|
721 |
+
# now that we have the predictions.
|
722 |
+
# Also note: as multiclass-multioutput classifiers are not supported,
|
723 |
+
# multiclass and multioutput scenario are mutually exclusive. So there is
|
724 |
+
# no risk of overwriting target_idx here.
|
725 |
+
pd_result = pd_results[0] # checking the first result is enough
|
726 |
+
n_tasks = (
|
727 |
+
pd_result.average.shape[0]
|
728 |
+
if kind_[0] == "average"
|
729 |
+
else pd_result.individual.shape[0]
|
730 |
+
)
|
731 |
+
if is_regressor(estimator) and n_tasks > 1:
|
732 |
+
if target is None:
|
733 |
+
raise ValueError("target must be specified for multi-output regressors")
|
734 |
+
if not 0 <= target <= n_tasks:
|
735 |
+
raise ValueError(
|
736 |
+
"target must be in [0, n_tasks], got {}.".format(target)
|
737 |
+
)
|
738 |
+
target_idx = target
|
739 |
+
|
740 |
+
deciles = {}
|
741 |
+
for fxs, cats in zip(features, is_categorical):
|
742 |
+
for fx, cat in zip(fxs, cats):
|
743 |
+
if not cat and fx not in deciles:
|
744 |
+
X_col = _safe_indexing(X, fx, axis=1)
|
745 |
+
deciles[fx] = mquantiles(X_col, prob=np.arange(0.1, 1.0, 0.1))
|
746 |
+
|
747 |
+
display = cls(
|
748 |
+
pd_results=pd_results,
|
749 |
+
features=features,
|
750 |
+
feature_names=feature_names,
|
751 |
+
target_idx=target_idx,
|
752 |
+
deciles=deciles,
|
753 |
+
kind=kind,
|
754 |
+
subsample=subsample,
|
755 |
+
random_state=random_state,
|
756 |
+
is_categorical=is_categorical,
|
757 |
+
)
|
758 |
+
return display.plot(
|
759 |
+
ax=ax,
|
760 |
+
n_cols=n_cols,
|
761 |
+
line_kw=line_kw,
|
762 |
+
ice_lines_kw=ice_lines_kw,
|
763 |
+
pd_line_kw=pd_line_kw,
|
764 |
+
contour_kw=contour_kw,
|
765 |
+
centered=centered,
|
766 |
+
)
|
767 |
+
|
768 |
+
def _get_sample_count(self, n_samples):
|
769 |
+
"""Compute the number of samples as an integer."""
|
770 |
+
if isinstance(self.subsample, numbers.Integral):
|
771 |
+
if self.subsample < n_samples:
|
772 |
+
return self.subsample
|
773 |
+
return n_samples
|
774 |
+
elif isinstance(self.subsample, numbers.Real):
|
775 |
+
return ceil(n_samples * self.subsample)
|
776 |
+
return n_samples
|
777 |
+
|
778 |
+
def _plot_ice_lines(
|
779 |
+
self,
|
780 |
+
preds,
|
781 |
+
feature_values,
|
782 |
+
n_ice_to_plot,
|
783 |
+
ax,
|
784 |
+
pd_plot_idx,
|
785 |
+
n_total_lines_by_plot,
|
786 |
+
individual_line_kw,
|
787 |
+
):
|
788 |
+
"""Plot the ICE lines.
|
789 |
+
|
790 |
+
Parameters
|
791 |
+
----------
|
792 |
+
preds : ndarray of shape \
|
793 |
+
(n_instances, n_grid_points)
|
794 |
+
The predictions computed for all points of `feature_values` for a
|
795 |
+
given feature for all samples in `X`.
|
796 |
+
feature_values : ndarray of shape (n_grid_points,)
|
797 |
+
The feature values for which the predictions have been computed.
|
798 |
+
n_ice_to_plot : int
|
799 |
+
The number of ICE lines to plot.
|
800 |
+
ax : Matplotlib axes
|
801 |
+
The axis on which to plot the ICE lines.
|
802 |
+
pd_plot_idx : int
|
803 |
+
The sequential index of the plot. It will be unraveled to find the
|
804 |
+
matching 2D position in the grid layout.
|
805 |
+
n_total_lines_by_plot : int
|
806 |
+
The total number of lines expected to be plot on the axis.
|
807 |
+
individual_line_kw : dict
|
808 |
+
Dict with keywords passed when plotting the ICE lines.
|
809 |
+
"""
|
810 |
+
rng = check_random_state(self.random_state)
|
811 |
+
# subsample ice
|
812 |
+
ice_lines_idx = rng.choice(
|
813 |
+
preds.shape[0],
|
814 |
+
n_ice_to_plot,
|
815 |
+
replace=False,
|
816 |
+
)
|
817 |
+
ice_lines_subsampled = preds[ice_lines_idx, :]
|
818 |
+
# plot the subsampled ice
|
819 |
+
for ice_idx, ice in enumerate(ice_lines_subsampled):
|
820 |
+
line_idx = np.unravel_index(
|
821 |
+
pd_plot_idx * n_total_lines_by_plot + ice_idx, self.lines_.shape
|
822 |
+
)
|
823 |
+
self.lines_[line_idx] = ax.plot(
|
824 |
+
feature_values, ice.ravel(), **individual_line_kw
|
825 |
+
)[0]
|
826 |
+
|
827 |
+
def _plot_average_dependence(
|
828 |
+
self,
|
829 |
+
avg_preds,
|
830 |
+
feature_values,
|
831 |
+
ax,
|
832 |
+
pd_line_idx,
|
833 |
+
line_kw,
|
834 |
+
categorical,
|
835 |
+
bar_kw,
|
836 |
+
):
|
837 |
+
"""Plot the average partial dependence.
|
838 |
+
|
839 |
+
Parameters
|
840 |
+
----------
|
841 |
+
avg_preds : ndarray of shape (n_grid_points,)
|
842 |
+
The average predictions for all points of `feature_values` for a
|
843 |
+
given feature for all samples in `X`.
|
844 |
+
feature_values : ndarray of shape (n_grid_points,)
|
845 |
+
The feature values for which the predictions have been computed.
|
846 |
+
ax : Matplotlib axes
|
847 |
+
The axis on which to plot the average PD.
|
848 |
+
pd_line_idx : int
|
849 |
+
The sequential index of the plot. It will be unraveled to find the
|
850 |
+
matching 2D position in the grid layout.
|
851 |
+
line_kw : dict
|
852 |
+
Dict with keywords passed when plotting the PD plot.
|
853 |
+
categorical : bool
|
854 |
+
Whether feature is categorical.
|
855 |
+
bar_kw: dict
|
856 |
+
Dict with keywords passed when plotting the PD bars (categorical).
|
857 |
+
"""
|
858 |
+
if categorical:
|
859 |
+
bar_idx = np.unravel_index(pd_line_idx, self.bars_.shape)
|
860 |
+
self.bars_[bar_idx] = ax.bar(feature_values, avg_preds, **bar_kw)[0]
|
861 |
+
ax.tick_params(axis="x", rotation=90)
|
862 |
+
else:
|
863 |
+
line_idx = np.unravel_index(pd_line_idx, self.lines_.shape)
|
864 |
+
self.lines_[line_idx] = ax.plot(
|
865 |
+
feature_values,
|
866 |
+
avg_preds,
|
867 |
+
**line_kw,
|
868 |
+
)[0]
|
869 |
+
|
870 |
+
def _plot_one_way_partial_dependence(
|
871 |
+
self,
|
872 |
+
kind,
|
873 |
+
preds,
|
874 |
+
avg_preds,
|
875 |
+
feature_values,
|
876 |
+
feature_idx,
|
877 |
+
n_ice_lines,
|
878 |
+
ax,
|
879 |
+
n_cols,
|
880 |
+
pd_plot_idx,
|
881 |
+
n_lines,
|
882 |
+
ice_lines_kw,
|
883 |
+
pd_line_kw,
|
884 |
+
categorical,
|
885 |
+
bar_kw,
|
886 |
+
pdp_lim,
|
887 |
+
):
|
888 |
+
"""Plot 1-way partial dependence: ICE and PDP.
|
889 |
+
|
890 |
+
Parameters
|
891 |
+
----------
|
892 |
+
kind : str
|
893 |
+
The kind of partial plot to draw.
|
894 |
+
preds : ndarray of shape \
|
895 |
+
(n_instances, n_grid_points) or None
|
896 |
+
The predictions computed for all points of `feature_values` for a
|
897 |
+
given feature for all samples in `X`.
|
898 |
+
avg_preds : ndarray of shape (n_grid_points,)
|
899 |
+
The average predictions for all points of `feature_values` for a
|
900 |
+
given feature for all samples in `X`.
|
901 |
+
feature_values : ndarray of shape (n_grid_points,)
|
902 |
+
The feature values for which the predictions have been computed.
|
903 |
+
feature_idx : int
|
904 |
+
The index corresponding to the target feature.
|
905 |
+
n_ice_lines : int
|
906 |
+
The number of ICE lines to plot.
|
907 |
+
ax : Matplotlib axes
|
908 |
+
The axis on which to plot the ICE and PDP lines.
|
909 |
+
n_cols : int or None
|
910 |
+
The number of column in the axis.
|
911 |
+
pd_plot_idx : int
|
912 |
+
The sequential index of the plot. It will be unraveled to find the
|
913 |
+
matching 2D position in the grid layout.
|
914 |
+
n_lines : int
|
915 |
+
The total number of lines expected to be plot on the axis.
|
916 |
+
ice_lines_kw : dict
|
917 |
+
Dict with keywords passed when plotting the ICE lines.
|
918 |
+
pd_line_kw : dict
|
919 |
+
Dict with keywords passed when plotting the PD plot.
|
920 |
+
categorical : bool
|
921 |
+
Whether feature is categorical.
|
922 |
+
bar_kw: dict
|
923 |
+
Dict with keywords passed when plotting the PD bars (categorical).
|
924 |
+
pdp_lim : dict
|
925 |
+
Global min and max average predictions, such that all plots will
|
926 |
+
have the same scale and y limits. `pdp_lim[1]` is the global min
|
927 |
+
and max for single partial dependence curves.
|
928 |
+
"""
|
929 |
+
from matplotlib import transforms # noqa
|
930 |
+
|
931 |
+
if kind in ("individual", "both"):
|
932 |
+
self._plot_ice_lines(
|
933 |
+
preds[self.target_idx],
|
934 |
+
feature_values,
|
935 |
+
n_ice_lines,
|
936 |
+
ax,
|
937 |
+
pd_plot_idx,
|
938 |
+
n_lines,
|
939 |
+
ice_lines_kw,
|
940 |
+
)
|
941 |
+
|
942 |
+
if kind in ("average", "both"):
|
943 |
+
# the average is stored as the last line
|
944 |
+
if kind == "average":
|
945 |
+
pd_line_idx = pd_plot_idx
|
946 |
+
else:
|
947 |
+
pd_line_idx = pd_plot_idx * n_lines + n_ice_lines
|
948 |
+
self._plot_average_dependence(
|
949 |
+
avg_preds[self.target_idx].ravel(),
|
950 |
+
feature_values,
|
951 |
+
ax,
|
952 |
+
pd_line_idx,
|
953 |
+
pd_line_kw,
|
954 |
+
categorical,
|
955 |
+
bar_kw,
|
956 |
+
)
|
957 |
+
|
958 |
+
trans = transforms.blended_transform_factory(ax.transData, ax.transAxes)
|
959 |
+
# create the decile line for the vertical axis
|
960 |
+
vlines_idx = np.unravel_index(pd_plot_idx, self.deciles_vlines_.shape)
|
961 |
+
if self.deciles.get(feature_idx[0], None) is not None:
|
962 |
+
self.deciles_vlines_[vlines_idx] = ax.vlines(
|
963 |
+
self.deciles[feature_idx[0]],
|
964 |
+
0,
|
965 |
+
0.05,
|
966 |
+
transform=trans,
|
967 |
+
color="k",
|
968 |
+
)
|
969 |
+
# reset ylim which was overwritten by vlines
|
970 |
+
min_val = min(val[0] for val in pdp_lim.values())
|
971 |
+
max_val = max(val[1] for val in pdp_lim.values())
|
972 |
+
ax.set_ylim([min_val, max_val])
|
973 |
+
|
974 |
+
# Set xlabel if it is not already set
|
975 |
+
if not ax.get_xlabel():
|
976 |
+
ax.set_xlabel(self.feature_names[feature_idx[0]])
|
977 |
+
|
978 |
+
if n_cols is None or pd_plot_idx % n_cols == 0:
|
979 |
+
if not ax.get_ylabel():
|
980 |
+
ax.set_ylabel("Partial dependence")
|
981 |
+
else:
|
982 |
+
ax.set_yticklabels([])
|
983 |
+
|
984 |
+
if pd_line_kw.get("label", None) and kind != "individual" and not categorical:
|
985 |
+
ax.legend()
|
986 |
+
|
987 |
+
def _plot_two_way_partial_dependence(
|
988 |
+
self,
|
989 |
+
avg_preds,
|
990 |
+
feature_values,
|
991 |
+
feature_idx,
|
992 |
+
ax,
|
993 |
+
pd_plot_idx,
|
994 |
+
Z_level,
|
995 |
+
contour_kw,
|
996 |
+
categorical,
|
997 |
+
heatmap_kw,
|
998 |
+
):
|
999 |
+
"""Plot 2-way partial dependence.
|
1000 |
+
|
1001 |
+
Parameters
|
1002 |
+
----------
|
1003 |
+
avg_preds : ndarray of shape \
|
1004 |
+
(n_instances, n_grid_points, n_grid_points)
|
1005 |
+
The average predictions for all points of `feature_values[0]` and
|
1006 |
+
`feature_values[1]` for some given features for all samples in `X`.
|
1007 |
+
feature_values : seq of 1d array
|
1008 |
+
A sequence of array of the feature values for which the predictions
|
1009 |
+
have been computed.
|
1010 |
+
feature_idx : tuple of int
|
1011 |
+
The indices of the target features
|
1012 |
+
ax : Matplotlib axes
|
1013 |
+
The axis on which to plot the ICE and PDP lines.
|
1014 |
+
pd_plot_idx : int
|
1015 |
+
The sequential index of the plot. It will be unraveled to find the
|
1016 |
+
matching 2D position in the grid layout.
|
1017 |
+
Z_level : ndarray of shape (8, 8)
|
1018 |
+
The Z-level used to encode the average predictions.
|
1019 |
+
contour_kw : dict
|
1020 |
+
Dict with keywords passed when plotting the contours.
|
1021 |
+
categorical : bool
|
1022 |
+
Whether features are categorical.
|
1023 |
+
heatmap_kw: dict
|
1024 |
+
Dict with keywords passed when plotting the PD heatmap
|
1025 |
+
(categorical).
|
1026 |
+
"""
|
1027 |
+
if categorical:
|
1028 |
+
import matplotlib.pyplot as plt
|
1029 |
+
|
1030 |
+
default_im_kw = dict(interpolation="nearest", cmap="viridis")
|
1031 |
+
im_kw = {**default_im_kw, **heatmap_kw}
|
1032 |
+
|
1033 |
+
data = avg_preds[self.target_idx]
|
1034 |
+
im = ax.imshow(data, **im_kw)
|
1035 |
+
text = None
|
1036 |
+
cmap_min, cmap_max = im.cmap(0), im.cmap(1.0)
|
1037 |
+
|
1038 |
+
text = np.empty_like(data, dtype=object)
|
1039 |
+
# print text with appropriate color depending on background
|
1040 |
+
thresh = (data.max() + data.min()) / 2.0
|
1041 |
+
|
1042 |
+
for flat_index in range(data.size):
|
1043 |
+
row, col = np.unravel_index(flat_index, data.shape)
|
1044 |
+
color = cmap_max if data[row, col] < thresh else cmap_min
|
1045 |
+
|
1046 |
+
values_format = ".2f"
|
1047 |
+
text_data = format(data[row, col], values_format)
|
1048 |
+
|
1049 |
+
text_kwargs = dict(ha="center", va="center", color=color)
|
1050 |
+
text[row, col] = ax.text(col, row, text_data, **text_kwargs)
|
1051 |
+
|
1052 |
+
fig = ax.figure
|
1053 |
+
fig.colorbar(im, ax=ax)
|
1054 |
+
ax.set(
|
1055 |
+
xticks=np.arange(len(feature_values[1])),
|
1056 |
+
yticks=np.arange(len(feature_values[0])),
|
1057 |
+
xticklabels=feature_values[1],
|
1058 |
+
yticklabels=feature_values[0],
|
1059 |
+
xlabel=self.feature_names[feature_idx[1]],
|
1060 |
+
ylabel=self.feature_names[feature_idx[0]],
|
1061 |
+
)
|
1062 |
+
|
1063 |
+
plt.setp(ax.get_xticklabels(), rotation="vertical")
|
1064 |
+
|
1065 |
+
heatmap_idx = np.unravel_index(pd_plot_idx, self.heatmaps_.shape)
|
1066 |
+
self.heatmaps_[heatmap_idx] = im
|
1067 |
+
else:
|
1068 |
+
from matplotlib import transforms # noqa
|
1069 |
+
|
1070 |
+
XX, YY = np.meshgrid(feature_values[0], feature_values[1])
|
1071 |
+
Z = avg_preds[self.target_idx].T
|
1072 |
+
CS = ax.contour(XX, YY, Z, levels=Z_level, linewidths=0.5, colors="k")
|
1073 |
+
contour_idx = np.unravel_index(pd_plot_idx, self.contours_.shape)
|
1074 |
+
self.contours_[contour_idx] = ax.contourf(
|
1075 |
+
XX,
|
1076 |
+
YY,
|
1077 |
+
Z,
|
1078 |
+
levels=Z_level,
|
1079 |
+
vmax=Z_level[-1],
|
1080 |
+
vmin=Z_level[0],
|
1081 |
+
**contour_kw,
|
1082 |
+
)
|
1083 |
+
ax.clabel(CS, fmt="%2.2f", colors="k", fontsize=10, inline=True)
|
1084 |
+
|
1085 |
+
trans = transforms.blended_transform_factory(ax.transData, ax.transAxes)
|
1086 |
+
# create the decile line for the vertical axis
|
1087 |
+
xlim, ylim = ax.get_xlim(), ax.get_ylim()
|
1088 |
+
vlines_idx = np.unravel_index(pd_plot_idx, self.deciles_vlines_.shape)
|
1089 |
+
self.deciles_vlines_[vlines_idx] = ax.vlines(
|
1090 |
+
self.deciles[feature_idx[0]],
|
1091 |
+
0,
|
1092 |
+
0.05,
|
1093 |
+
transform=trans,
|
1094 |
+
color="k",
|
1095 |
+
)
|
1096 |
+
# create the decile line for the horizontal axis
|
1097 |
+
hlines_idx = np.unravel_index(pd_plot_idx, self.deciles_hlines_.shape)
|
1098 |
+
self.deciles_hlines_[hlines_idx] = ax.hlines(
|
1099 |
+
self.deciles[feature_idx[1]],
|
1100 |
+
0,
|
1101 |
+
0.05,
|
1102 |
+
transform=trans,
|
1103 |
+
color="k",
|
1104 |
+
)
|
1105 |
+
# reset xlim and ylim since they are overwritten by hlines and
|
1106 |
+
# vlines
|
1107 |
+
ax.set_xlim(xlim)
|
1108 |
+
ax.set_ylim(ylim)
|
1109 |
+
|
1110 |
+
# set xlabel if it is not already set
|
1111 |
+
if not ax.get_xlabel():
|
1112 |
+
ax.set_xlabel(self.feature_names[feature_idx[0]])
|
1113 |
+
ax.set_ylabel(self.feature_names[feature_idx[1]])
|
1114 |
+
|
1115 |
+
def plot(
|
1116 |
+
self,
|
1117 |
+
*,
|
1118 |
+
ax=None,
|
1119 |
+
n_cols=3,
|
1120 |
+
line_kw=None,
|
1121 |
+
ice_lines_kw=None,
|
1122 |
+
pd_line_kw=None,
|
1123 |
+
contour_kw=None,
|
1124 |
+
bar_kw=None,
|
1125 |
+
heatmap_kw=None,
|
1126 |
+
pdp_lim=None,
|
1127 |
+
centered=False,
|
1128 |
+
):
|
1129 |
+
"""Plot partial dependence plots.
|
1130 |
+
|
1131 |
+
Parameters
|
1132 |
+
----------
|
1133 |
+
ax : Matplotlib axes or array-like of Matplotlib axes, default=None
|
1134 |
+
- If a single axis is passed in, it is treated as a bounding axes
|
1135 |
+
and a grid of partial dependence plots will be drawn within
|
1136 |
+
these bounds. The `n_cols` parameter controls the number of
|
1137 |
+
columns in the grid.
|
1138 |
+
- If an array-like of axes are passed in, the partial dependence
|
1139 |
+
plots will be drawn directly into these axes.
|
1140 |
+
- If `None`, a figure and a bounding axes is created and treated
|
1141 |
+
as the single axes case.
|
1142 |
+
|
1143 |
+
n_cols : int, default=3
|
1144 |
+
The maximum number of columns in the grid plot. Only active when
|
1145 |
+
`ax` is a single axes or `None`.
|
1146 |
+
|
1147 |
+
line_kw : dict, default=None
|
1148 |
+
Dict with keywords passed to the `matplotlib.pyplot.plot` call.
|
1149 |
+
For one-way partial dependence plots.
|
1150 |
+
|
1151 |
+
ice_lines_kw : dict, default=None
|
1152 |
+
Dictionary with keywords passed to the `matplotlib.pyplot.plot` call.
|
1153 |
+
For ICE lines in the one-way partial dependence plots.
|
1154 |
+
The key value pairs defined in `ice_lines_kw` takes priority over
|
1155 |
+
`line_kw`.
|
1156 |
+
|
1157 |
+
.. versionadded:: 1.0
|
1158 |
+
|
1159 |
+
pd_line_kw : dict, default=None
|
1160 |
+
Dictionary with keywords passed to the `matplotlib.pyplot.plot` call.
|
1161 |
+
For partial dependence in one-way partial dependence plots.
|
1162 |
+
The key value pairs defined in `pd_line_kw` takes priority over
|
1163 |
+
`line_kw`.
|
1164 |
+
|
1165 |
+
.. versionadded:: 1.0
|
1166 |
+
|
1167 |
+
contour_kw : dict, default=None
|
1168 |
+
Dict with keywords passed to the `matplotlib.pyplot.contourf`
|
1169 |
+
call for two-way partial dependence plots.
|
1170 |
+
|
1171 |
+
bar_kw : dict, default=None
|
1172 |
+
Dict with keywords passed to the `matplotlib.pyplot.bar`
|
1173 |
+
call for one-way categorical partial dependence plots.
|
1174 |
+
|
1175 |
+
.. versionadded:: 1.2
|
1176 |
+
|
1177 |
+
heatmap_kw : dict, default=None
|
1178 |
+
Dict with keywords passed to the `matplotlib.pyplot.imshow`
|
1179 |
+
call for two-way categorical partial dependence plots.
|
1180 |
+
|
1181 |
+
.. versionadded:: 1.2
|
1182 |
+
|
1183 |
+
pdp_lim : dict, default=None
|
1184 |
+
Global min and max average predictions, such that all plots will have the
|
1185 |
+
same scale and y limits. `pdp_lim[1]` is the global min and max for single
|
1186 |
+
partial dependence curves. `pdp_lim[2]` is the global min and max for
|
1187 |
+
two-way partial dependence curves. If `None` (default), the limit will be
|
1188 |
+
inferred from the global minimum and maximum of all predictions.
|
1189 |
+
|
1190 |
+
.. versionadded:: 1.1
|
1191 |
+
|
1192 |
+
centered : bool, default=False
|
1193 |
+
If `True`, the ICE and PD lines will start at the origin of the
|
1194 |
+
y-axis. By default, no centering is done.
|
1195 |
+
|
1196 |
+
.. versionadded:: 1.1
|
1197 |
+
|
1198 |
+
Returns
|
1199 |
+
-------
|
1200 |
+
display : :class:`~sklearn.inspection.PartialDependenceDisplay`
|
1201 |
+
Returns a :class:`~sklearn.inspection.PartialDependenceDisplay`
|
1202 |
+
object that contains the partial dependence plots.
|
1203 |
+
"""
|
1204 |
+
|
1205 |
+
check_matplotlib_support("plot_partial_dependence")
|
1206 |
+
import matplotlib.pyplot as plt # noqa
|
1207 |
+
from matplotlib.gridspec import GridSpecFromSubplotSpec # noqa
|
1208 |
+
|
1209 |
+
if isinstance(self.kind, str):
|
1210 |
+
kind = [self.kind] * len(self.features)
|
1211 |
+
else:
|
1212 |
+
kind = self.kind
|
1213 |
+
|
1214 |
+
if self.is_categorical is None:
|
1215 |
+
is_categorical = [
|
1216 |
+
(False,) if len(fx) == 1 else (False, False) for fx in self.features
|
1217 |
+
]
|
1218 |
+
else:
|
1219 |
+
is_categorical = self.is_categorical
|
1220 |
+
|
1221 |
+
if len(kind) != len(self.features):
|
1222 |
+
raise ValueError(
|
1223 |
+
"When `kind` is provided as a list of strings, it should "
|
1224 |
+
"contain as many elements as `features`. `kind` contains "
|
1225 |
+
f"{len(kind)} element(s) and `features` contains "
|
1226 |
+
f"{len(self.features)} element(s)."
|
1227 |
+
)
|
1228 |
+
|
1229 |
+
valid_kinds = {"average", "individual", "both"}
|
1230 |
+
if any([k not in valid_kinds for k in kind]):
|
1231 |
+
raise ValueError(
|
1232 |
+
f"Values provided to `kind` must be one of: {valid_kinds!r} or a list"
|
1233 |
+
f" of such values. Currently, kind={self.kind!r}"
|
1234 |
+
)
|
1235 |
+
|
1236 |
+
# Center results before plotting
|
1237 |
+
if not centered:
|
1238 |
+
pd_results_ = self.pd_results
|
1239 |
+
else:
|
1240 |
+
pd_results_ = []
|
1241 |
+
for kind_plot, pd_result in zip(kind, self.pd_results):
|
1242 |
+
current_results = {"grid_values": pd_result["grid_values"]}
|
1243 |
+
|
1244 |
+
if kind_plot in ("individual", "both"):
|
1245 |
+
preds = pd_result.individual
|
1246 |
+
preds = preds - preds[self.target_idx, :, 0, None]
|
1247 |
+
current_results["individual"] = preds
|
1248 |
+
|
1249 |
+
if kind_plot in ("average", "both"):
|
1250 |
+
avg_preds = pd_result.average
|
1251 |
+
avg_preds = avg_preds - avg_preds[self.target_idx, 0, None]
|
1252 |
+
current_results["average"] = avg_preds
|
1253 |
+
|
1254 |
+
pd_results_.append(Bunch(**current_results))
|
1255 |
+
|
1256 |
+
if pdp_lim is None:
|
1257 |
+
# get global min and max average predictions of PD grouped by plot type
|
1258 |
+
pdp_lim = {}
|
1259 |
+
for kind_plot, pdp in zip(kind, pd_results_):
|
1260 |
+
values = pdp["grid_values"]
|
1261 |
+
preds = pdp.average if kind_plot == "average" else pdp.individual
|
1262 |
+
min_pd = preds[self.target_idx].min()
|
1263 |
+
max_pd = preds[self.target_idx].max()
|
1264 |
+
|
1265 |
+
# expand the limits to account so that the plotted lines do not touch
|
1266 |
+
# the edges of the plot
|
1267 |
+
span = max_pd - min_pd
|
1268 |
+
min_pd -= 0.05 * span
|
1269 |
+
max_pd += 0.05 * span
|
1270 |
+
|
1271 |
+
n_fx = len(values)
|
1272 |
+
old_min_pd, old_max_pd = pdp_lim.get(n_fx, (min_pd, max_pd))
|
1273 |
+
min_pd = min(min_pd, old_min_pd)
|
1274 |
+
max_pd = max(max_pd, old_max_pd)
|
1275 |
+
pdp_lim[n_fx] = (min_pd, max_pd)
|
1276 |
+
|
1277 |
+
if line_kw is None:
|
1278 |
+
line_kw = {}
|
1279 |
+
if ice_lines_kw is None:
|
1280 |
+
ice_lines_kw = {}
|
1281 |
+
if pd_line_kw is None:
|
1282 |
+
pd_line_kw = {}
|
1283 |
+
if bar_kw is None:
|
1284 |
+
bar_kw = {}
|
1285 |
+
if heatmap_kw is None:
|
1286 |
+
heatmap_kw = {}
|
1287 |
+
|
1288 |
+
if ax is None:
|
1289 |
+
_, ax = plt.subplots()
|
1290 |
+
|
1291 |
+
if contour_kw is None:
|
1292 |
+
contour_kw = {}
|
1293 |
+
default_contour_kws = {"alpha": 0.75}
|
1294 |
+
contour_kw = {**default_contour_kws, **contour_kw}
|
1295 |
+
|
1296 |
+
n_features = len(self.features)
|
1297 |
+
is_average_plot = [kind_plot == "average" for kind_plot in kind]
|
1298 |
+
if all(is_average_plot):
|
1299 |
+
# only average plots are requested
|
1300 |
+
n_ice_lines = 0
|
1301 |
+
n_lines = 1
|
1302 |
+
else:
|
1303 |
+
# we need to determine the number of ICE samples computed
|
1304 |
+
ice_plot_idx = is_average_plot.index(False)
|
1305 |
+
n_ice_lines = self._get_sample_count(
|
1306 |
+
len(pd_results_[ice_plot_idx].individual[0])
|
1307 |
+
)
|
1308 |
+
if any([kind_plot == "both" for kind_plot in kind]):
|
1309 |
+
n_lines = n_ice_lines + 1 # account for the average line
|
1310 |
+
else:
|
1311 |
+
n_lines = n_ice_lines
|
1312 |
+
|
1313 |
+
if isinstance(ax, plt.Axes):
|
1314 |
+
# If ax was set off, it has most likely been set to off
|
1315 |
+
# by a previous call to plot.
|
1316 |
+
if not ax.axison:
|
1317 |
+
raise ValueError(
|
1318 |
+
"The ax was already used in another plot "
|
1319 |
+
"function, please set ax=display.axes_ "
|
1320 |
+
"instead"
|
1321 |
+
)
|
1322 |
+
|
1323 |
+
ax.set_axis_off()
|
1324 |
+
self.bounding_ax_ = ax
|
1325 |
+
self.figure_ = ax.figure
|
1326 |
+
|
1327 |
+
n_cols = min(n_cols, n_features)
|
1328 |
+
n_rows = int(np.ceil(n_features / float(n_cols)))
|
1329 |
+
|
1330 |
+
self.axes_ = np.empty((n_rows, n_cols), dtype=object)
|
1331 |
+
if all(is_average_plot):
|
1332 |
+
self.lines_ = np.empty((n_rows, n_cols), dtype=object)
|
1333 |
+
else:
|
1334 |
+
self.lines_ = np.empty((n_rows, n_cols, n_lines), dtype=object)
|
1335 |
+
self.contours_ = np.empty((n_rows, n_cols), dtype=object)
|
1336 |
+
self.bars_ = np.empty((n_rows, n_cols), dtype=object)
|
1337 |
+
self.heatmaps_ = np.empty((n_rows, n_cols), dtype=object)
|
1338 |
+
|
1339 |
+
axes_ravel = self.axes_.ravel()
|
1340 |
+
|
1341 |
+
gs = GridSpecFromSubplotSpec(
|
1342 |
+
n_rows, n_cols, subplot_spec=ax.get_subplotspec()
|
1343 |
+
)
|
1344 |
+
for i, spec in zip(range(n_features), gs):
|
1345 |
+
axes_ravel[i] = self.figure_.add_subplot(spec)
|
1346 |
+
|
1347 |
+
else: # array-like
|
1348 |
+
ax = np.asarray(ax, dtype=object)
|
1349 |
+
if ax.size != n_features:
|
1350 |
+
raise ValueError(
|
1351 |
+
"Expected ax to have {} axes, got {}".format(n_features, ax.size)
|
1352 |
+
)
|
1353 |
+
|
1354 |
+
if ax.ndim == 2:
|
1355 |
+
n_cols = ax.shape[1]
|
1356 |
+
else:
|
1357 |
+
n_cols = None
|
1358 |
+
|
1359 |
+
self.bounding_ax_ = None
|
1360 |
+
self.figure_ = ax.ravel()[0].figure
|
1361 |
+
self.axes_ = ax
|
1362 |
+
if all(is_average_plot):
|
1363 |
+
self.lines_ = np.empty_like(ax, dtype=object)
|
1364 |
+
else:
|
1365 |
+
self.lines_ = np.empty(ax.shape + (n_lines,), dtype=object)
|
1366 |
+
self.contours_ = np.empty_like(ax, dtype=object)
|
1367 |
+
self.bars_ = np.empty_like(ax, dtype=object)
|
1368 |
+
self.heatmaps_ = np.empty_like(ax, dtype=object)
|
1369 |
+
|
1370 |
+
# create contour levels for two-way plots
|
1371 |
+
if 2 in pdp_lim:
|
1372 |
+
Z_level = np.linspace(*pdp_lim[2], num=8)
|
1373 |
+
|
1374 |
+
self.deciles_vlines_ = np.empty_like(self.axes_, dtype=object)
|
1375 |
+
self.deciles_hlines_ = np.empty_like(self.axes_, dtype=object)
|
1376 |
+
|
1377 |
+
for pd_plot_idx, (axi, feature_idx, cat, pd_result, kind_plot) in enumerate(
|
1378 |
+
zip(
|
1379 |
+
self.axes_.ravel(),
|
1380 |
+
self.features,
|
1381 |
+
is_categorical,
|
1382 |
+
pd_results_,
|
1383 |
+
kind,
|
1384 |
+
)
|
1385 |
+
):
|
1386 |
+
avg_preds = None
|
1387 |
+
preds = None
|
1388 |
+
feature_values = pd_result["grid_values"]
|
1389 |
+
if kind_plot == "individual":
|
1390 |
+
preds = pd_result.individual
|
1391 |
+
elif kind_plot == "average":
|
1392 |
+
avg_preds = pd_result.average
|
1393 |
+
else: # kind_plot == 'both'
|
1394 |
+
avg_preds = pd_result.average
|
1395 |
+
preds = pd_result.individual
|
1396 |
+
|
1397 |
+
if len(feature_values) == 1:
|
1398 |
+
# define the line-style for the current plot
|
1399 |
+
default_line_kws = {
|
1400 |
+
"color": "C0",
|
1401 |
+
"label": "average" if kind_plot == "both" else None,
|
1402 |
+
}
|
1403 |
+
if kind_plot == "individual":
|
1404 |
+
default_ice_lines_kws = {"alpha": 0.3, "linewidth": 0.5}
|
1405 |
+
default_pd_lines_kws = {}
|
1406 |
+
elif kind_plot == "both":
|
1407 |
+
# by default, we need to distinguish the average line from
|
1408 |
+
# the individual lines via color and line style
|
1409 |
+
default_ice_lines_kws = {
|
1410 |
+
"alpha": 0.3,
|
1411 |
+
"linewidth": 0.5,
|
1412 |
+
"color": "tab:blue",
|
1413 |
+
}
|
1414 |
+
default_pd_lines_kws = {
|
1415 |
+
"color": "tab:orange",
|
1416 |
+
"linestyle": "--",
|
1417 |
+
}
|
1418 |
+
else:
|
1419 |
+
default_ice_lines_kws = {}
|
1420 |
+
default_pd_lines_kws = {}
|
1421 |
+
|
1422 |
+
ice_lines_kw = {
|
1423 |
+
**default_line_kws,
|
1424 |
+
**default_ice_lines_kws,
|
1425 |
+
**line_kw,
|
1426 |
+
**ice_lines_kw,
|
1427 |
+
}
|
1428 |
+
del ice_lines_kw["label"]
|
1429 |
+
|
1430 |
+
pd_line_kw = {
|
1431 |
+
**default_line_kws,
|
1432 |
+
**default_pd_lines_kws,
|
1433 |
+
**line_kw,
|
1434 |
+
**pd_line_kw,
|
1435 |
+
}
|
1436 |
+
|
1437 |
+
default_bar_kws = {"color": "C0"}
|
1438 |
+
bar_kw = {**default_bar_kws, **bar_kw}
|
1439 |
+
|
1440 |
+
default_heatmap_kw = {}
|
1441 |
+
heatmap_kw = {**default_heatmap_kw, **heatmap_kw}
|
1442 |
+
|
1443 |
+
self._plot_one_way_partial_dependence(
|
1444 |
+
kind_plot,
|
1445 |
+
preds,
|
1446 |
+
avg_preds,
|
1447 |
+
feature_values[0],
|
1448 |
+
feature_idx,
|
1449 |
+
n_ice_lines,
|
1450 |
+
axi,
|
1451 |
+
n_cols,
|
1452 |
+
pd_plot_idx,
|
1453 |
+
n_lines,
|
1454 |
+
ice_lines_kw,
|
1455 |
+
pd_line_kw,
|
1456 |
+
cat[0],
|
1457 |
+
bar_kw,
|
1458 |
+
pdp_lim,
|
1459 |
+
)
|
1460 |
+
else:
|
1461 |
+
self._plot_two_way_partial_dependence(
|
1462 |
+
avg_preds,
|
1463 |
+
feature_values,
|
1464 |
+
feature_idx,
|
1465 |
+
axi,
|
1466 |
+
pd_plot_idx,
|
1467 |
+
Z_level,
|
1468 |
+
contour_kw,
|
1469 |
+
cat[0] and cat[1],
|
1470 |
+
heatmap_kw,
|
1471 |
+
)
|
1472 |
+
|
1473 |
+
return self
|
venv/lib/python3.10/site-packages/sklearn/inspection/_plot/tests/test_boundary_decision_display.py
ADDED
@@ -0,0 +1,609 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import warnings
|
2 |
+
|
3 |
+
import numpy as np
|
4 |
+
import pytest
|
5 |
+
|
6 |
+
from sklearn.base import BaseEstimator, ClassifierMixin
|
7 |
+
from sklearn.datasets import (
|
8 |
+
load_diabetes,
|
9 |
+
load_iris,
|
10 |
+
make_classification,
|
11 |
+
make_multilabel_classification,
|
12 |
+
)
|
13 |
+
from sklearn.ensemble import IsolationForest
|
14 |
+
from sklearn.inspection import DecisionBoundaryDisplay
|
15 |
+
from sklearn.inspection._plot.decision_boundary import _check_boundary_response_method
|
16 |
+
from sklearn.linear_model import LogisticRegression
|
17 |
+
from sklearn.preprocessing import scale
|
18 |
+
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
|
19 |
+
from sklearn.utils._testing import (
|
20 |
+
assert_allclose,
|
21 |
+
assert_array_equal,
|
22 |
+
)
|
23 |
+
|
24 |
+
# TODO: Remove when https://github.com/numpy/numpy/issues/14397 is resolved
|
25 |
+
pytestmark = pytest.mark.filterwarnings(
|
26 |
+
"ignore:In future, it will be an error for 'np.bool_':DeprecationWarning:"
|
27 |
+
"matplotlib.*"
|
28 |
+
)
|
29 |
+
|
30 |
+
|
31 |
+
X, y = make_classification(
|
32 |
+
n_informative=1,
|
33 |
+
n_redundant=1,
|
34 |
+
n_clusters_per_class=1,
|
35 |
+
n_features=2,
|
36 |
+
random_state=42,
|
37 |
+
)
|
38 |
+
|
39 |
+
|
40 |
+
def load_iris_2d_scaled():
|
41 |
+
X, y = load_iris(return_X_y=True)
|
42 |
+
X = scale(X)[:, :2]
|
43 |
+
return X, y
|
44 |
+
|
45 |
+
|
46 |
+
@pytest.fixture(scope="module")
|
47 |
+
def fitted_clf():
|
48 |
+
return LogisticRegression().fit(X, y)
|
49 |
+
|
50 |
+
|
51 |
+
def test_input_data_dimension(pyplot):
|
52 |
+
"""Check that we raise an error when `X` does not have exactly 2 features."""
|
53 |
+
X, y = make_classification(n_samples=10, n_features=4, random_state=0)
|
54 |
+
|
55 |
+
clf = LogisticRegression().fit(X, y)
|
56 |
+
msg = "n_features must be equal to 2. Got 4 instead."
|
57 |
+
with pytest.raises(ValueError, match=msg):
|
58 |
+
DecisionBoundaryDisplay.from_estimator(estimator=clf, X=X)
|
59 |
+
|
60 |
+
|
61 |
+
def test_check_boundary_response_method_error():
|
62 |
+
"""Check that we raise an error for the cases not supported by
|
63 |
+
`_check_boundary_response_method`.
|
64 |
+
"""
|
65 |
+
|
66 |
+
class MultiLabelClassifier:
|
67 |
+
classes_ = [np.array([0, 1]), np.array([0, 1])]
|
68 |
+
|
69 |
+
err_msg = "Multi-label and multi-output multi-class classifiers are not supported"
|
70 |
+
with pytest.raises(ValueError, match=err_msg):
|
71 |
+
_check_boundary_response_method(MultiLabelClassifier(), "predict", None)
|
72 |
+
|
73 |
+
class MulticlassClassifier:
|
74 |
+
classes_ = [0, 1, 2]
|
75 |
+
|
76 |
+
err_msg = "Multiclass classifiers are only supported when `response_method` is"
|
77 |
+
for response_method in ("predict_proba", "decision_function"):
|
78 |
+
with pytest.raises(ValueError, match=err_msg):
|
79 |
+
_check_boundary_response_method(
|
80 |
+
MulticlassClassifier(), response_method, None
|
81 |
+
)
|
82 |
+
|
83 |
+
|
84 |
+
@pytest.mark.parametrize(
|
85 |
+
"estimator, response_method, class_of_interest, expected_prediction_method",
|
86 |
+
[
|
87 |
+
(DecisionTreeRegressor(), "predict", None, "predict"),
|
88 |
+
(DecisionTreeRegressor(), "auto", None, "predict"),
|
89 |
+
(LogisticRegression().fit(*load_iris_2d_scaled()), "predict", None, "predict"),
|
90 |
+
(LogisticRegression().fit(*load_iris_2d_scaled()), "auto", None, "predict"),
|
91 |
+
(
|
92 |
+
LogisticRegression().fit(*load_iris_2d_scaled()),
|
93 |
+
"predict_proba",
|
94 |
+
0,
|
95 |
+
"predict_proba",
|
96 |
+
),
|
97 |
+
(
|
98 |
+
LogisticRegression().fit(*load_iris_2d_scaled()),
|
99 |
+
"decision_function",
|
100 |
+
0,
|
101 |
+
"decision_function",
|
102 |
+
),
|
103 |
+
(
|
104 |
+
LogisticRegression().fit(X, y),
|
105 |
+
"auto",
|
106 |
+
None,
|
107 |
+
["decision_function", "predict_proba", "predict"],
|
108 |
+
),
|
109 |
+
(LogisticRegression().fit(X, y), "predict", None, "predict"),
|
110 |
+
(
|
111 |
+
LogisticRegression().fit(X, y),
|
112 |
+
["predict_proba", "decision_function"],
|
113 |
+
None,
|
114 |
+
["predict_proba", "decision_function"],
|
115 |
+
),
|
116 |
+
],
|
117 |
+
)
|
118 |
+
def test_check_boundary_response_method(
|
119 |
+
estimator, response_method, class_of_interest, expected_prediction_method
|
120 |
+
):
|
121 |
+
"""Check the behaviour of `_check_boundary_response_method` for the supported
|
122 |
+
cases.
|
123 |
+
"""
|
124 |
+
prediction_method = _check_boundary_response_method(
|
125 |
+
estimator, response_method, class_of_interest
|
126 |
+
)
|
127 |
+
assert prediction_method == expected_prediction_method
|
128 |
+
|
129 |
+
|
130 |
+
@pytest.mark.parametrize("response_method", ["predict_proba", "decision_function"])
|
131 |
+
def test_multiclass_error(pyplot, response_method):
|
132 |
+
"""Check multiclass errors."""
|
133 |
+
X, y = make_classification(n_classes=3, n_informative=3, random_state=0)
|
134 |
+
X = X[:, [0, 1]]
|
135 |
+
lr = LogisticRegression().fit(X, y)
|
136 |
+
|
137 |
+
msg = (
|
138 |
+
"Multiclass classifiers are only supported when `response_method` is 'predict'"
|
139 |
+
" or 'auto'"
|
140 |
+
)
|
141 |
+
with pytest.raises(ValueError, match=msg):
|
142 |
+
DecisionBoundaryDisplay.from_estimator(lr, X, response_method=response_method)
|
143 |
+
|
144 |
+
|
145 |
+
@pytest.mark.parametrize("response_method", ["auto", "predict"])
|
146 |
+
def test_multiclass(pyplot, response_method):
|
147 |
+
"""Check multiclass gives expected results."""
|
148 |
+
grid_resolution = 10
|
149 |
+
eps = 1.0
|
150 |
+
X, y = make_classification(n_classes=3, n_informative=3, random_state=0)
|
151 |
+
X = X[:, [0, 1]]
|
152 |
+
lr = LogisticRegression(random_state=0).fit(X, y)
|
153 |
+
|
154 |
+
disp = DecisionBoundaryDisplay.from_estimator(
|
155 |
+
lr, X, response_method=response_method, grid_resolution=grid_resolution, eps=1.0
|
156 |
+
)
|
157 |
+
|
158 |
+
x0_min, x0_max = X[:, 0].min() - eps, X[:, 0].max() + eps
|
159 |
+
x1_min, x1_max = X[:, 1].min() - eps, X[:, 1].max() + eps
|
160 |
+
xx0, xx1 = np.meshgrid(
|
161 |
+
np.linspace(x0_min, x0_max, grid_resolution),
|
162 |
+
np.linspace(x1_min, x1_max, grid_resolution),
|
163 |
+
)
|
164 |
+
response = lr.predict(np.c_[xx0.ravel(), xx1.ravel()])
|
165 |
+
assert_allclose(disp.response, response.reshape(xx0.shape))
|
166 |
+
assert_allclose(disp.xx0, xx0)
|
167 |
+
assert_allclose(disp.xx1, xx1)
|
168 |
+
|
169 |
+
|
170 |
+
@pytest.mark.parametrize(
|
171 |
+
"kwargs, error_msg",
|
172 |
+
[
|
173 |
+
(
|
174 |
+
{"plot_method": "hello_world"},
|
175 |
+
r"plot_method must be one of contourf, contour, pcolormesh. Got hello_world"
|
176 |
+
r" instead.",
|
177 |
+
),
|
178 |
+
(
|
179 |
+
{"grid_resolution": 1},
|
180 |
+
r"grid_resolution must be greater than 1. Got 1 instead",
|
181 |
+
),
|
182 |
+
(
|
183 |
+
{"grid_resolution": -1},
|
184 |
+
r"grid_resolution must be greater than 1. Got -1 instead",
|
185 |
+
),
|
186 |
+
({"eps": -1.1}, r"eps must be greater than or equal to 0. Got -1.1 instead"),
|
187 |
+
],
|
188 |
+
)
|
189 |
+
def test_input_validation_errors(pyplot, kwargs, error_msg, fitted_clf):
|
190 |
+
"""Check input validation from_estimator."""
|
191 |
+
with pytest.raises(ValueError, match=error_msg):
|
192 |
+
DecisionBoundaryDisplay.from_estimator(fitted_clf, X, **kwargs)
|
193 |
+
|
194 |
+
|
195 |
+
def test_display_plot_input_error(pyplot, fitted_clf):
|
196 |
+
"""Check input validation for `plot`."""
|
197 |
+
disp = DecisionBoundaryDisplay.from_estimator(fitted_clf, X, grid_resolution=5)
|
198 |
+
|
199 |
+
with pytest.raises(ValueError, match="plot_method must be 'contourf'"):
|
200 |
+
disp.plot(plot_method="hello_world")
|
201 |
+
|
202 |
+
|
203 |
+
@pytest.mark.parametrize(
|
204 |
+
"response_method", ["auto", "predict", "predict_proba", "decision_function"]
|
205 |
+
)
|
206 |
+
@pytest.mark.parametrize("plot_method", ["contourf", "contour"])
|
207 |
+
def test_decision_boundary_display_classifier(
|
208 |
+
pyplot, fitted_clf, response_method, plot_method
|
209 |
+
):
|
210 |
+
"""Check that decision boundary is correct."""
|
211 |
+
fig, ax = pyplot.subplots()
|
212 |
+
eps = 2.0
|
213 |
+
disp = DecisionBoundaryDisplay.from_estimator(
|
214 |
+
fitted_clf,
|
215 |
+
X,
|
216 |
+
grid_resolution=5,
|
217 |
+
response_method=response_method,
|
218 |
+
plot_method=plot_method,
|
219 |
+
eps=eps,
|
220 |
+
ax=ax,
|
221 |
+
)
|
222 |
+
assert isinstance(disp.surface_, pyplot.matplotlib.contour.QuadContourSet)
|
223 |
+
assert disp.ax_ == ax
|
224 |
+
assert disp.figure_ == fig
|
225 |
+
|
226 |
+
x0, x1 = X[:, 0], X[:, 1]
|
227 |
+
|
228 |
+
x0_min, x0_max = x0.min() - eps, x0.max() + eps
|
229 |
+
x1_min, x1_max = x1.min() - eps, x1.max() + eps
|
230 |
+
|
231 |
+
assert disp.xx0.min() == pytest.approx(x0_min)
|
232 |
+
assert disp.xx0.max() == pytest.approx(x0_max)
|
233 |
+
assert disp.xx1.min() == pytest.approx(x1_min)
|
234 |
+
assert disp.xx1.max() == pytest.approx(x1_max)
|
235 |
+
|
236 |
+
fig2, ax2 = pyplot.subplots()
|
237 |
+
# change plotting method for second plot
|
238 |
+
disp.plot(plot_method="pcolormesh", ax=ax2, shading="auto")
|
239 |
+
assert isinstance(disp.surface_, pyplot.matplotlib.collections.QuadMesh)
|
240 |
+
assert disp.ax_ == ax2
|
241 |
+
assert disp.figure_ == fig2
|
242 |
+
|
243 |
+
|
244 |
+
@pytest.mark.parametrize("response_method", ["auto", "predict", "decision_function"])
|
245 |
+
@pytest.mark.parametrize("plot_method", ["contourf", "contour"])
|
246 |
+
def test_decision_boundary_display_outlier_detector(
|
247 |
+
pyplot, response_method, plot_method
|
248 |
+
):
|
249 |
+
"""Check that decision boundary is correct for outlier detector."""
|
250 |
+
fig, ax = pyplot.subplots()
|
251 |
+
eps = 2.0
|
252 |
+
outlier_detector = IsolationForest(random_state=0).fit(X, y)
|
253 |
+
disp = DecisionBoundaryDisplay.from_estimator(
|
254 |
+
outlier_detector,
|
255 |
+
X,
|
256 |
+
grid_resolution=5,
|
257 |
+
response_method=response_method,
|
258 |
+
plot_method=plot_method,
|
259 |
+
eps=eps,
|
260 |
+
ax=ax,
|
261 |
+
)
|
262 |
+
assert isinstance(disp.surface_, pyplot.matplotlib.contour.QuadContourSet)
|
263 |
+
assert disp.ax_ == ax
|
264 |
+
assert disp.figure_ == fig
|
265 |
+
|
266 |
+
x0, x1 = X[:, 0], X[:, 1]
|
267 |
+
|
268 |
+
x0_min, x0_max = x0.min() - eps, x0.max() + eps
|
269 |
+
x1_min, x1_max = x1.min() - eps, x1.max() + eps
|
270 |
+
|
271 |
+
assert disp.xx0.min() == pytest.approx(x0_min)
|
272 |
+
assert disp.xx0.max() == pytest.approx(x0_max)
|
273 |
+
assert disp.xx1.min() == pytest.approx(x1_min)
|
274 |
+
assert disp.xx1.max() == pytest.approx(x1_max)
|
275 |
+
|
276 |
+
|
277 |
+
@pytest.mark.parametrize("response_method", ["auto", "predict"])
|
278 |
+
@pytest.mark.parametrize("plot_method", ["contourf", "contour"])
|
279 |
+
def test_decision_boundary_display_regressor(pyplot, response_method, plot_method):
|
280 |
+
"""Check that we can display the decision boundary for a regressor."""
|
281 |
+
X, y = load_diabetes(return_X_y=True)
|
282 |
+
X = X[:, :2]
|
283 |
+
tree = DecisionTreeRegressor().fit(X, y)
|
284 |
+
fig, ax = pyplot.subplots()
|
285 |
+
eps = 2.0
|
286 |
+
disp = DecisionBoundaryDisplay.from_estimator(
|
287 |
+
tree,
|
288 |
+
X,
|
289 |
+
response_method=response_method,
|
290 |
+
ax=ax,
|
291 |
+
eps=eps,
|
292 |
+
plot_method=plot_method,
|
293 |
+
)
|
294 |
+
assert isinstance(disp.surface_, pyplot.matplotlib.contour.QuadContourSet)
|
295 |
+
assert disp.ax_ == ax
|
296 |
+
assert disp.figure_ == fig
|
297 |
+
|
298 |
+
x0, x1 = X[:, 0], X[:, 1]
|
299 |
+
|
300 |
+
x0_min, x0_max = x0.min() - eps, x0.max() + eps
|
301 |
+
x1_min, x1_max = x1.min() - eps, x1.max() + eps
|
302 |
+
|
303 |
+
assert disp.xx0.min() == pytest.approx(x0_min)
|
304 |
+
assert disp.xx0.max() == pytest.approx(x0_max)
|
305 |
+
assert disp.xx1.min() == pytest.approx(x1_min)
|
306 |
+
assert disp.xx1.max() == pytest.approx(x1_max)
|
307 |
+
|
308 |
+
fig2, ax2 = pyplot.subplots()
|
309 |
+
# change plotting method for second plot
|
310 |
+
disp.plot(plot_method="pcolormesh", ax=ax2, shading="auto")
|
311 |
+
assert isinstance(disp.surface_, pyplot.matplotlib.collections.QuadMesh)
|
312 |
+
assert disp.ax_ == ax2
|
313 |
+
assert disp.figure_ == fig2
|
314 |
+
|
315 |
+
|
316 |
+
@pytest.mark.parametrize(
|
317 |
+
"response_method, msg",
|
318 |
+
[
|
319 |
+
(
|
320 |
+
"predict_proba",
|
321 |
+
"MyClassifier has none of the following attributes: predict_proba",
|
322 |
+
),
|
323 |
+
(
|
324 |
+
"decision_function",
|
325 |
+
"MyClassifier has none of the following attributes: decision_function",
|
326 |
+
),
|
327 |
+
(
|
328 |
+
"auto",
|
329 |
+
(
|
330 |
+
"MyClassifier has none of the following attributes: decision_function, "
|
331 |
+
"predict_proba, predict"
|
332 |
+
),
|
333 |
+
),
|
334 |
+
(
|
335 |
+
"bad_method",
|
336 |
+
"MyClassifier has none of the following attributes: bad_method",
|
337 |
+
),
|
338 |
+
],
|
339 |
+
)
|
340 |
+
def test_error_bad_response(pyplot, response_method, msg):
|
341 |
+
"""Check errors for bad response."""
|
342 |
+
|
343 |
+
class MyClassifier(BaseEstimator, ClassifierMixin):
|
344 |
+
def fit(self, X, y):
|
345 |
+
self.fitted_ = True
|
346 |
+
self.classes_ = [0, 1]
|
347 |
+
return self
|
348 |
+
|
349 |
+
clf = MyClassifier().fit(X, y)
|
350 |
+
|
351 |
+
with pytest.raises(AttributeError, match=msg):
|
352 |
+
DecisionBoundaryDisplay.from_estimator(clf, X, response_method=response_method)
|
353 |
+
|
354 |
+
|
355 |
+
@pytest.mark.parametrize("response_method", ["auto", "predict", "predict_proba"])
|
356 |
+
def test_multilabel_classifier_error(pyplot, response_method):
|
357 |
+
"""Check that multilabel classifier raises correct error."""
|
358 |
+
X, y = make_multilabel_classification(random_state=0)
|
359 |
+
X = X[:, :2]
|
360 |
+
tree = DecisionTreeClassifier().fit(X, y)
|
361 |
+
|
362 |
+
msg = "Multi-label and multi-output multi-class classifiers are not supported"
|
363 |
+
with pytest.raises(ValueError, match=msg):
|
364 |
+
DecisionBoundaryDisplay.from_estimator(
|
365 |
+
tree,
|
366 |
+
X,
|
367 |
+
response_method=response_method,
|
368 |
+
)
|
369 |
+
|
370 |
+
|
371 |
+
@pytest.mark.parametrize("response_method", ["auto", "predict", "predict_proba"])
|
372 |
+
def test_multi_output_multi_class_classifier_error(pyplot, response_method):
|
373 |
+
"""Check that multi-output multi-class classifier raises correct error."""
|
374 |
+
X = np.asarray([[0, 1], [1, 2]])
|
375 |
+
y = np.asarray([["tree", "cat"], ["cat", "tree"]])
|
376 |
+
tree = DecisionTreeClassifier().fit(X, y)
|
377 |
+
|
378 |
+
msg = "Multi-label and multi-output multi-class classifiers are not supported"
|
379 |
+
with pytest.raises(ValueError, match=msg):
|
380 |
+
DecisionBoundaryDisplay.from_estimator(
|
381 |
+
tree,
|
382 |
+
X,
|
383 |
+
response_method=response_method,
|
384 |
+
)
|
385 |
+
|
386 |
+
|
387 |
+
def test_multioutput_regressor_error(pyplot):
|
388 |
+
"""Check that multioutput regressor raises correct error."""
|
389 |
+
X = np.asarray([[0, 1], [1, 2]])
|
390 |
+
y = np.asarray([[0, 1], [4, 1]])
|
391 |
+
tree = DecisionTreeRegressor().fit(X, y)
|
392 |
+
with pytest.raises(ValueError, match="Multi-output regressors are not supported"):
|
393 |
+
DecisionBoundaryDisplay.from_estimator(tree, X, response_method="predict")
|
394 |
+
|
395 |
+
|
396 |
+
@pytest.mark.parametrize(
|
397 |
+
"response_method",
|
398 |
+
["predict_proba", "decision_function", ["predict_proba", "predict"]],
|
399 |
+
)
|
400 |
+
def test_regressor_unsupported_response(pyplot, response_method):
|
401 |
+
"""Check that we can display the decision boundary for a regressor."""
|
402 |
+
X, y = load_diabetes(return_X_y=True)
|
403 |
+
X = X[:, :2]
|
404 |
+
tree = DecisionTreeRegressor().fit(X, y)
|
405 |
+
err_msg = "should either be a classifier to be used with response_method"
|
406 |
+
with pytest.raises(ValueError, match=err_msg):
|
407 |
+
DecisionBoundaryDisplay.from_estimator(tree, X, response_method=response_method)
|
408 |
+
|
409 |
+
|
410 |
+
@pytest.mark.filterwarnings(
|
411 |
+
# We expect to raise the following warning because the classifier is fit on a
|
412 |
+
# NumPy array
|
413 |
+
"ignore:X has feature names, but LogisticRegression was fitted without"
|
414 |
+
)
|
415 |
+
def test_dataframe_labels_used(pyplot, fitted_clf):
|
416 |
+
"""Check that column names are used for pandas."""
|
417 |
+
pd = pytest.importorskip("pandas")
|
418 |
+
df = pd.DataFrame(X, columns=["col_x", "col_y"])
|
419 |
+
|
420 |
+
# pandas column names are used by default
|
421 |
+
_, ax = pyplot.subplots()
|
422 |
+
disp = DecisionBoundaryDisplay.from_estimator(fitted_clf, df, ax=ax)
|
423 |
+
assert ax.get_xlabel() == "col_x"
|
424 |
+
assert ax.get_ylabel() == "col_y"
|
425 |
+
|
426 |
+
# second call to plot will have the names
|
427 |
+
fig, ax = pyplot.subplots()
|
428 |
+
disp.plot(ax=ax)
|
429 |
+
assert ax.get_xlabel() == "col_x"
|
430 |
+
assert ax.get_ylabel() == "col_y"
|
431 |
+
|
432 |
+
# axes with a label will not get overridden
|
433 |
+
fig, ax = pyplot.subplots()
|
434 |
+
ax.set(xlabel="hello", ylabel="world")
|
435 |
+
disp.plot(ax=ax)
|
436 |
+
assert ax.get_xlabel() == "hello"
|
437 |
+
assert ax.get_ylabel() == "world"
|
438 |
+
|
439 |
+
# labels get overridden only if provided to the `plot` method
|
440 |
+
disp.plot(ax=ax, xlabel="overwritten_x", ylabel="overwritten_y")
|
441 |
+
assert ax.get_xlabel() == "overwritten_x"
|
442 |
+
assert ax.get_ylabel() == "overwritten_y"
|
443 |
+
|
444 |
+
# labels do not get inferred if provided to `from_estimator`
|
445 |
+
_, ax = pyplot.subplots()
|
446 |
+
disp = DecisionBoundaryDisplay.from_estimator(
|
447 |
+
fitted_clf, df, ax=ax, xlabel="overwritten_x", ylabel="overwritten_y"
|
448 |
+
)
|
449 |
+
assert ax.get_xlabel() == "overwritten_x"
|
450 |
+
assert ax.get_ylabel() == "overwritten_y"
|
451 |
+
|
452 |
+
|
453 |
+
def test_string_target(pyplot):
|
454 |
+
"""Check that decision boundary works with classifiers trained on string labels."""
|
455 |
+
iris = load_iris()
|
456 |
+
X = iris.data[:, [0, 1]]
|
457 |
+
|
458 |
+
# Use strings as target
|
459 |
+
y = iris.target_names[iris.target]
|
460 |
+
log_reg = LogisticRegression().fit(X, y)
|
461 |
+
|
462 |
+
# Does not raise
|
463 |
+
DecisionBoundaryDisplay.from_estimator(
|
464 |
+
log_reg,
|
465 |
+
X,
|
466 |
+
grid_resolution=5,
|
467 |
+
response_method="predict",
|
468 |
+
)
|
469 |
+
|
470 |
+
|
471 |
+
def test_dataframe_support(pyplot):
|
472 |
+
"""Check that passing a dataframe at fit and to the Display does not
|
473 |
+
raise warnings.
|
474 |
+
|
475 |
+
Non-regression test for:
|
476 |
+
https://github.com/scikit-learn/scikit-learn/issues/23311
|
477 |
+
"""
|
478 |
+
pd = pytest.importorskip("pandas")
|
479 |
+
df = pd.DataFrame(X, columns=["col_x", "col_y"])
|
480 |
+
estimator = LogisticRegression().fit(df, y)
|
481 |
+
|
482 |
+
with warnings.catch_warnings():
|
483 |
+
# no warnings linked to feature names validation should be raised
|
484 |
+
warnings.simplefilter("error", UserWarning)
|
485 |
+
DecisionBoundaryDisplay.from_estimator(estimator, df, response_method="predict")
|
486 |
+
|
487 |
+
|
488 |
+
@pytest.mark.parametrize("response_method", ["predict_proba", "decision_function"])
|
489 |
+
def test_class_of_interest_binary(pyplot, response_method):
|
490 |
+
"""Check the behaviour of passing `class_of_interest` for plotting the output of
|
491 |
+
`predict_proba` and `decision_function` in the binary case.
|
492 |
+
"""
|
493 |
+
iris = load_iris()
|
494 |
+
X = iris.data[:100, :2]
|
495 |
+
y = iris.target[:100]
|
496 |
+
assert_array_equal(np.unique(y), [0, 1])
|
497 |
+
|
498 |
+
estimator = LogisticRegression().fit(X, y)
|
499 |
+
# We will check that `class_of_interest=None` is equivalent to
|
500 |
+
# `class_of_interest=estimator.classes_[1]`
|
501 |
+
disp_default = DecisionBoundaryDisplay.from_estimator(
|
502 |
+
estimator,
|
503 |
+
X,
|
504 |
+
response_method=response_method,
|
505 |
+
class_of_interest=None,
|
506 |
+
)
|
507 |
+
disp_class_1 = DecisionBoundaryDisplay.from_estimator(
|
508 |
+
estimator,
|
509 |
+
X,
|
510 |
+
response_method=response_method,
|
511 |
+
class_of_interest=estimator.classes_[1],
|
512 |
+
)
|
513 |
+
|
514 |
+
assert_allclose(disp_default.response, disp_class_1.response)
|
515 |
+
|
516 |
+
# we can check that `_get_response_values` modifies the response when targeting
|
517 |
+
# the other class, i.e. 1 - p(y=1|x) for `predict_proba` and -decision_function
|
518 |
+
# for `decision_function`.
|
519 |
+
disp_class_0 = DecisionBoundaryDisplay.from_estimator(
|
520 |
+
estimator,
|
521 |
+
X,
|
522 |
+
response_method=response_method,
|
523 |
+
class_of_interest=estimator.classes_[0],
|
524 |
+
)
|
525 |
+
|
526 |
+
if response_method == "predict_proba":
|
527 |
+
assert_allclose(disp_default.response, 1 - disp_class_0.response)
|
528 |
+
else:
|
529 |
+
assert response_method == "decision_function"
|
530 |
+
assert_allclose(disp_default.response, -disp_class_0.response)
|
531 |
+
|
532 |
+
|
533 |
+
@pytest.mark.parametrize("response_method", ["predict_proba", "decision_function"])
|
534 |
+
def test_class_of_interest_multiclass(pyplot, response_method):
|
535 |
+
"""Check the behaviour of passing `class_of_interest` for plotting the output of
|
536 |
+
`predict_proba` and `decision_function` in the multiclass case.
|
537 |
+
"""
|
538 |
+
iris = load_iris()
|
539 |
+
X = iris.data[:, :2]
|
540 |
+
y = iris.target # the target are numerical labels
|
541 |
+
class_of_interest_idx = 2
|
542 |
+
|
543 |
+
estimator = LogisticRegression().fit(X, y)
|
544 |
+
disp = DecisionBoundaryDisplay.from_estimator(
|
545 |
+
estimator,
|
546 |
+
X,
|
547 |
+
response_method=response_method,
|
548 |
+
class_of_interest=class_of_interest_idx,
|
549 |
+
)
|
550 |
+
|
551 |
+
# we will check that we plot the expected values as response
|
552 |
+
grid = np.concatenate([disp.xx0.reshape(-1, 1), disp.xx1.reshape(-1, 1)], axis=1)
|
553 |
+
response = getattr(estimator, response_method)(grid)[:, class_of_interest_idx]
|
554 |
+
assert_allclose(response.reshape(*disp.response.shape), disp.response)
|
555 |
+
|
556 |
+
# make the same test but this time using target as strings
|
557 |
+
y = iris.target_names[iris.target]
|
558 |
+
estimator = LogisticRegression().fit(X, y)
|
559 |
+
|
560 |
+
disp = DecisionBoundaryDisplay.from_estimator(
|
561 |
+
estimator,
|
562 |
+
X,
|
563 |
+
response_method=response_method,
|
564 |
+
class_of_interest=iris.target_names[class_of_interest_idx],
|
565 |
+
)
|
566 |
+
|
567 |
+
grid = np.concatenate([disp.xx0.reshape(-1, 1), disp.xx1.reshape(-1, 1)], axis=1)
|
568 |
+
response = getattr(estimator, response_method)(grid)[:, class_of_interest_idx]
|
569 |
+
assert_allclose(response.reshape(*disp.response.shape), disp.response)
|
570 |
+
|
571 |
+
# check that we raise an error for unknown labels
|
572 |
+
# this test should already be handled in `_get_response_values` but we can have this
|
573 |
+
# test here as well
|
574 |
+
err_msg = "class_of_interest=2 is not a valid label: It should be one of"
|
575 |
+
with pytest.raises(ValueError, match=err_msg):
|
576 |
+
DecisionBoundaryDisplay.from_estimator(
|
577 |
+
estimator,
|
578 |
+
X,
|
579 |
+
response_method=response_method,
|
580 |
+
class_of_interest=class_of_interest_idx,
|
581 |
+
)
|
582 |
+
|
583 |
+
# TODO: remove this test when we handle multiclass with class_of_interest=None
|
584 |
+
# by showing the max of the decision function or the max of the predicted
|
585 |
+
# probabilities.
|
586 |
+
err_msg = "Multiclass classifiers are only supported"
|
587 |
+
with pytest.raises(ValueError, match=err_msg):
|
588 |
+
DecisionBoundaryDisplay.from_estimator(
|
589 |
+
estimator,
|
590 |
+
X,
|
591 |
+
response_method=response_method,
|
592 |
+
class_of_interest=None,
|
593 |
+
)
|
594 |
+
|
595 |
+
|
596 |
+
def test_subclass_named_constructors_return_type_is_subclass(pyplot):
|
597 |
+
"""Check that named constructors return the correct type when subclassed.
|
598 |
+
|
599 |
+
Non-regression test for:
|
600 |
+
https://github.com/scikit-learn/scikit-learn/pull/27675
|
601 |
+
"""
|
602 |
+
clf = LogisticRegression().fit(X, y)
|
603 |
+
|
604 |
+
class SubclassOfDisplay(DecisionBoundaryDisplay):
|
605 |
+
pass
|
606 |
+
|
607 |
+
curve = SubclassOfDisplay.from_estimator(estimator=clf, X=X)
|
608 |
+
|
609 |
+
assert isinstance(curve, SubclassOfDisplay)
|
venv/lib/python3.10/site-packages/sklearn/inspection/_plot/tests/test_plot_partial_dependence.py
ADDED
@@ -0,0 +1,1140 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import pytest
|
3 |
+
from numpy.testing import assert_allclose
|
4 |
+
from scipy.stats.mstats import mquantiles
|
5 |
+
|
6 |
+
from sklearn.compose import make_column_transformer
|
7 |
+
from sklearn.datasets import (
|
8 |
+
load_diabetes,
|
9 |
+
load_iris,
|
10 |
+
make_classification,
|
11 |
+
make_regression,
|
12 |
+
)
|
13 |
+
from sklearn.ensemble import GradientBoostingClassifier, GradientBoostingRegressor
|
14 |
+
from sklearn.inspection import PartialDependenceDisplay
|
15 |
+
from sklearn.linear_model import LinearRegression
|
16 |
+
from sklearn.pipeline import make_pipeline
|
17 |
+
from sklearn.preprocessing import OneHotEncoder
|
18 |
+
from sklearn.utils._testing import _convert_container
|
19 |
+
|
20 |
+
# TODO: Remove when https://github.com/numpy/numpy/issues/14397 is resolved
|
21 |
+
pytestmark = pytest.mark.filterwarnings(
|
22 |
+
(
|
23 |
+
"ignore:In future, it will be an error for 'np.bool_':DeprecationWarning:"
|
24 |
+
"matplotlib.*"
|
25 |
+
),
|
26 |
+
)
|
27 |
+
|
28 |
+
|
29 |
+
@pytest.fixture(scope="module")
|
30 |
+
def diabetes():
|
31 |
+
# diabetes dataset, subsampled for speed
|
32 |
+
data = load_diabetes()
|
33 |
+
data.data = data.data[:50]
|
34 |
+
data.target = data.target[:50]
|
35 |
+
return data
|
36 |
+
|
37 |
+
|
38 |
+
@pytest.fixture(scope="module")
|
39 |
+
def clf_diabetes(diabetes):
|
40 |
+
clf = GradientBoostingRegressor(n_estimators=10, random_state=1)
|
41 |
+
clf.fit(diabetes.data, diabetes.target)
|
42 |
+
return clf
|
43 |
+
|
44 |
+
|
45 |
+
@pytest.mark.filterwarnings("ignore:A Bunch will be returned")
|
46 |
+
@pytest.mark.parametrize("grid_resolution", [10, 20])
|
47 |
+
def test_plot_partial_dependence(grid_resolution, pyplot, clf_diabetes, diabetes):
|
48 |
+
# Test partial dependence plot function.
|
49 |
+
# Use columns 0 & 2 as 1 is not quantitative (sex)
|
50 |
+
feature_names = diabetes.feature_names
|
51 |
+
disp = PartialDependenceDisplay.from_estimator(
|
52 |
+
clf_diabetes,
|
53 |
+
diabetes.data,
|
54 |
+
[0, 2, (0, 2)],
|
55 |
+
grid_resolution=grid_resolution,
|
56 |
+
feature_names=feature_names,
|
57 |
+
contour_kw={"cmap": "jet"},
|
58 |
+
)
|
59 |
+
fig = pyplot.gcf()
|
60 |
+
axs = fig.get_axes()
|
61 |
+
assert disp.figure_ is fig
|
62 |
+
assert len(axs) == 4
|
63 |
+
|
64 |
+
assert disp.bounding_ax_ is not None
|
65 |
+
assert disp.axes_.shape == (1, 3)
|
66 |
+
assert disp.lines_.shape == (1, 3)
|
67 |
+
assert disp.contours_.shape == (1, 3)
|
68 |
+
assert disp.deciles_vlines_.shape == (1, 3)
|
69 |
+
assert disp.deciles_hlines_.shape == (1, 3)
|
70 |
+
|
71 |
+
assert disp.lines_[0, 2] is None
|
72 |
+
assert disp.contours_[0, 0] is None
|
73 |
+
assert disp.contours_[0, 1] is None
|
74 |
+
|
75 |
+
# deciles lines: always show on xaxis, only show on yaxis if 2-way PDP
|
76 |
+
for i in range(3):
|
77 |
+
assert disp.deciles_vlines_[0, i] is not None
|
78 |
+
assert disp.deciles_hlines_[0, 0] is None
|
79 |
+
assert disp.deciles_hlines_[0, 1] is None
|
80 |
+
assert disp.deciles_hlines_[0, 2] is not None
|
81 |
+
|
82 |
+
assert disp.features == [(0,), (2,), (0, 2)]
|
83 |
+
assert np.all(disp.feature_names == feature_names)
|
84 |
+
assert len(disp.deciles) == 2
|
85 |
+
for i in [0, 2]:
|
86 |
+
assert_allclose(
|
87 |
+
disp.deciles[i],
|
88 |
+
mquantiles(diabetes.data[:, i], prob=np.arange(0.1, 1.0, 0.1)),
|
89 |
+
)
|
90 |
+
|
91 |
+
single_feature_positions = [(0, (0, 0)), (2, (0, 1))]
|
92 |
+
expected_ylabels = ["Partial dependence", ""]
|
93 |
+
|
94 |
+
for i, (feat_col, pos) in enumerate(single_feature_positions):
|
95 |
+
ax = disp.axes_[pos]
|
96 |
+
assert ax.get_ylabel() == expected_ylabels[i]
|
97 |
+
assert ax.get_xlabel() == diabetes.feature_names[feat_col]
|
98 |
+
|
99 |
+
line = disp.lines_[pos]
|
100 |
+
|
101 |
+
avg_preds = disp.pd_results[i]
|
102 |
+
assert avg_preds.average.shape == (1, grid_resolution)
|
103 |
+
target_idx = disp.target_idx
|
104 |
+
|
105 |
+
line_data = line.get_data()
|
106 |
+
assert_allclose(line_data[0], avg_preds["grid_values"][0])
|
107 |
+
assert_allclose(line_data[1], avg_preds.average[target_idx].ravel())
|
108 |
+
|
109 |
+
# two feature position
|
110 |
+
ax = disp.axes_[0, 2]
|
111 |
+
coutour = disp.contours_[0, 2]
|
112 |
+
assert coutour.get_cmap().name == "jet"
|
113 |
+
assert ax.get_xlabel() == diabetes.feature_names[0]
|
114 |
+
assert ax.get_ylabel() == diabetes.feature_names[2]
|
115 |
+
|
116 |
+
|
117 |
+
@pytest.mark.filterwarnings("ignore:A Bunch will be returned")
|
118 |
+
@pytest.mark.parametrize(
|
119 |
+
"kind, centered, subsample, shape",
|
120 |
+
[
|
121 |
+
("average", False, None, (1, 3)),
|
122 |
+
("individual", False, None, (1, 3, 50)),
|
123 |
+
("both", False, None, (1, 3, 51)),
|
124 |
+
("individual", False, 20, (1, 3, 20)),
|
125 |
+
("both", False, 20, (1, 3, 21)),
|
126 |
+
("individual", False, 0.5, (1, 3, 25)),
|
127 |
+
("both", False, 0.5, (1, 3, 26)),
|
128 |
+
("average", True, None, (1, 3)),
|
129 |
+
("individual", True, None, (1, 3, 50)),
|
130 |
+
("both", True, None, (1, 3, 51)),
|
131 |
+
("individual", True, 20, (1, 3, 20)),
|
132 |
+
("both", True, 20, (1, 3, 21)),
|
133 |
+
],
|
134 |
+
)
|
135 |
+
def test_plot_partial_dependence_kind(
|
136 |
+
pyplot,
|
137 |
+
kind,
|
138 |
+
centered,
|
139 |
+
subsample,
|
140 |
+
shape,
|
141 |
+
clf_diabetes,
|
142 |
+
diabetes,
|
143 |
+
):
|
144 |
+
disp = PartialDependenceDisplay.from_estimator(
|
145 |
+
clf_diabetes,
|
146 |
+
diabetes.data,
|
147 |
+
[0, 1, 2],
|
148 |
+
kind=kind,
|
149 |
+
centered=centered,
|
150 |
+
subsample=subsample,
|
151 |
+
)
|
152 |
+
|
153 |
+
assert disp.axes_.shape == (1, 3)
|
154 |
+
assert disp.lines_.shape == shape
|
155 |
+
assert disp.contours_.shape == (1, 3)
|
156 |
+
|
157 |
+
assert disp.contours_[0, 0] is None
|
158 |
+
assert disp.contours_[0, 1] is None
|
159 |
+
assert disp.contours_[0, 2] is None
|
160 |
+
|
161 |
+
if centered:
|
162 |
+
assert all([ln._y[0] == 0.0 for ln in disp.lines_.ravel() if ln is not None])
|
163 |
+
else:
|
164 |
+
assert all([ln._y[0] != 0.0 for ln in disp.lines_.ravel() if ln is not None])
|
165 |
+
|
166 |
+
|
167 |
+
@pytest.mark.filterwarnings("ignore:A Bunch will be returned")
|
168 |
+
@pytest.mark.parametrize(
|
169 |
+
"input_type, feature_names_type",
|
170 |
+
[
|
171 |
+
("dataframe", None),
|
172 |
+
("dataframe", "list"),
|
173 |
+
("list", "list"),
|
174 |
+
("array", "list"),
|
175 |
+
("dataframe", "array"),
|
176 |
+
("list", "array"),
|
177 |
+
("array", "array"),
|
178 |
+
("dataframe", "series"),
|
179 |
+
("list", "series"),
|
180 |
+
("array", "series"),
|
181 |
+
("dataframe", "index"),
|
182 |
+
("list", "index"),
|
183 |
+
("array", "index"),
|
184 |
+
],
|
185 |
+
)
|
186 |
+
def test_plot_partial_dependence_str_features(
|
187 |
+
pyplot,
|
188 |
+
clf_diabetes,
|
189 |
+
diabetes,
|
190 |
+
input_type,
|
191 |
+
feature_names_type,
|
192 |
+
):
|
193 |
+
if input_type == "dataframe":
|
194 |
+
pd = pytest.importorskip("pandas")
|
195 |
+
X = pd.DataFrame(diabetes.data, columns=diabetes.feature_names)
|
196 |
+
elif input_type == "list":
|
197 |
+
X = diabetes.data.tolist()
|
198 |
+
else:
|
199 |
+
X = diabetes.data
|
200 |
+
|
201 |
+
if feature_names_type is None:
|
202 |
+
feature_names = None
|
203 |
+
else:
|
204 |
+
feature_names = _convert_container(diabetes.feature_names, feature_names_type)
|
205 |
+
|
206 |
+
grid_resolution = 25
|
207 |
+
# check with str features and array feature names and single column
|
208 |
+
disp = PartialDependenceDisplay.from_estimator(
|
209 |
+
clf_diabetes,
|
210 |
+
X,
|
211 |
+
[("age", "bmi"), "bmi"],
|
212 |
+
grid_resolution=grid_resolution,
|
213 |
+
feature_names=feature_names,
|
214 |
+
n_cols=1,
|
215 |
+
line_kw={"alpha": 0.8},
|
216 |
+
)
|
217 |
+
fig = pyplot.gcf()
|
218 |
+
axs = fig.get_axes()
|
219 |
+
assert len(axs) == 3
|
220 |
+
|
221 |
+
assert disp.figure_ is fig
|
222 |
+
assert disp.axes_.shape == (2, 1)
|
223 |
+
assert disp.lines_.shape == (2, 1)
|
224 |
+
assert disp.contours_.shape == (2, 1)
|
225 |
+
assert disp.deciles_vlines_.shape == (2, 1)
|
226 |
+
assert disp.deciles_hlines_.shape == (2, 1)
|
227 |
+
|
228 |
+
assert disp.lines_[0, 0] is None
|
229 |
+
assert disp.deciles_vlines_[0, 0] is not None
|
230 |
+
assert disp.deciles_hlines_[0, 0] is not None
|
231 |
+
assert disp.contours_[1, 0] is None
|
232 |
+
assert disp.deciles_hlines_[1, 0] is None
|
233 |
+
assert disp.deciles_vlines_[1, 0] is not None
|
234 |
+
|
235 |
+
# line
|
236 |
+
ax = disp.axes_[1, 0]
|
237 |
+
assert ax.get_xlabel() == "bmi"
|
238 |
+
assert ax.get_ylabel() == "Partial dependence"
|
239 |
+
|
240 |
+
line = disp.lines_[1, 0]
|
241 |
+
avg_preds = disp.pd_results[1]
|
242 |
+
target_idx = disp.target_idx
|
243 |
+
assert line.get_alpha() == 0.8
|
244 |
+
|
245 |
+
line_data = line.get_data()
|
246 |
+
assert_allclose(line_data[0], avg_preds["grid_values"][0])
|
247 |
+
assert_allclose(line_data[1], avg_preds.average[target_idx].ravel())
|
248 |
+
|
249 |
+
# contour
|
250 |
+
ax = disp.axes_[0, 0]
|
251 |
+
assert ax.get_xlabel() == "age"
|
252 |
+
assert ax.get_ylabel() == "bmi"
|
253 |
+
|
254 |
+
|
255 |
+
@pytest.mark.filterwarnings("ignore:A Bunch will be returned")
|
256 |
+
def test_plot_partial_dependence_custom_axes(pyplot, clf_diabetes, diabetes):
|
257 |
+
grid_resolution = 25
|
258 |
+
fig, (ax1, ax2) = pyplot.subplots(1, 2)
|
259 |
+
disp = PartialDependenceDisplay.from_estimator(
|
260 |
+
clf_diabetes,
|
261 |
+
diabetes.data,
|
262 |
+
["age", ("age", "bmi")],
|
263 |
+
grid_resolution=grid_resolution,
|
264 |
+
feature_names=diabetes.feature_names,
|
265 |
+
ax=[ax1, ax2],
|
266 |
+
)
|
267 |
+
assert fig is disp.figure_
|
268 |
+
assert disp.bounding_ax_ is None
|
269 |
+
assert disp.axes_.shape == (2,)
|
270 |
+
assert disp.axes_[0] is ax1
|
271 |
+
assert disp.axes_[1] is ax2
|
272 |
+
|
273 |
+
ax = disp.axes_[0]
|
274 |
+
assert ax.get_xlabel() == "age"
|
275 |
+
assert ax.get_ylabel() == "Partial dependence"
|
276 |
+
|
277 |
+
line = disp.lines_[0]
|
278 |
+
avg_preds = disp.pd_results[0]
|
279 |
+
target_idx = disp.target_idx
|
280 |
+
|
281 |
+
line_data = line.get_data()
|
282 |
+
assert_allclose(line_data[0], avg_preds["grid_values"][0])
|
283 |
+
assert_allclose(line_data[1], avg_preds.average[target_idx].ravel())
|
284 |
+
|
285 |
+
# contour
|
286 |
+
ax = disp.axes_[1]
|
287 |
+
assert ax.get_xlabel() == "age"
|
288 |
+
assert ax.get_ylabel() == "bmi"
|
289 |
+
|
290 |
+
|
291 |
+
@pytest.mark.filterwarnings("ignore:A Bunch will be returned")
|
292 |
+
@pytest.mark.parametrize(
|
293 |
+
"kind, lines", [("average", 1), ("individual", 50), ("both", 51)]
|
294 |
+
)
|
295 |
+
def test_plot_partial_dependence_passing_numpy_axes(
|
296 |
+
pyplot, clf_diabetes, diabetes, kind, lines
|
297 |
+
):
|
298 |
+
grid_resolution = 25
|
299 |
+
feature_names = diabetes.feature_names
|
300 |
+
disp1 = PartialDependenceDisplay.from_estimator(
|
301 |
+
clf_diabetes,
|
302 |
+
diabetes.data,
|
303 |
+
["age", "bmi"],
|
304 |
+
kind=kind,
|
305 |
+
grid_resolution=grid_resolution,
|
306 |
+
feature_names=feature_names,
|
307 |
+
)
|
308 |
+
assert disp1.axes_.shape == (1, 2)
|
309 |
+
assert disp1.axes_[0, 0].get_ylabel() == "Partial dependence"
|
310 |
+
assert disp1.axes_[0, 1].get_ylabel() == ""
|
311 |
+
assert len(disp1.axes_[0, 0].get_lines()) == lines
|
312 |
+
assert len(disp1.axes_[0, 1].get_lines()) == lines
|
313 |
+
|
314 |
+
lr = LinearRegression()
|
315 |
+
lr.fit(diabetes.data, diabetes.target)
|
316 |
+
|
317 |
+
disp2 = PartialDependenceDisplay.from_estimator(
|
318 |
+
lr,
|
319 |
+
diabetes.data,
|
320 |
+
["age", "bmi"],
|
321 |
+
kind=kind,
|
322 |
+
grid_resolution=grid_resolution,
|
323 |
+
feature_names=feature_names,
|
324 |
+
ax=disp1.axes_,
|
325 |
+
)
|
326 |
+
|
327 |
+
assert np.all(disp1.axes_ == disp2.axes_)
|
328 |
+
assert len(disp2.axes_[0, 0].get_lines()) == 2 * lines
|
329 |
+
assert len(disp2.axes_[0, 1].get_lines()) == 2 * lines
|
330 |
+
|
331 |
+
|
332 |
+
@pytest.mark.filterwarnings("ignore:A Bunch will be returned")
|
333 |
+
@pytest.mark.parametrize("nrows, ncols", [(2, 2), (3, 1)])
|
334 |
+
def test_plot_partial_dependence_incorrent_num_axes(
|
335 |
+
pyplot, clf_diabetes, diabetes, nrows, ncols
|
336 |
+
):
|
337 |
+
grid_resolution = 5
|
338 |
+
fig, axes = pyplot.subplots(nrows, ncols)
|
339 |
+
axes_formats = [list(axes.ravel()), tuple(axes.ravel()), axes]
|
340 |
+
|
341 |
+
msg = "Expected ax to have 2 axes, got {}".format(nrows * ncols)
|
342 |
+
|
343 |
+
disp = PartialDependenceDisplay.from_estimator(
|
344 |
+
clf_diabetes,
|
345 |
+
diabetes.data,
|
346 |
+
["age", "bmi"],
|
347 |
+
grid_resolution=grid_resolution,
|
348 |
+
feature_names=diabetes.feature_names,
|
349 |
+
)
|
350 |
+
|
351 |
+
for ax_format in axes_formats:
|
352 |
+
with pytest.raises(ValueError, match=msg):
|
353 |
+
PartialDependenceDisplay.from_estimator(
|
354 |
+
clf_diabetes,
|
355 |
+
diabetes.data,
|
356 |
+
["age", "bmi"],
|
357 |
+
grid_resolution=grid_resolution,
|
358 |
+
feature_names=diabetes.feature_names,
|
359 |
+
ax=ax_format,
|
360 |
+
)
|
361 |
+
|
362 |
+
# with axes object
|
363 |
+
with pytest.raises(ValueError, match=msg):
|
364 |
+
disp.plot(ax=ax_format)
|
365 |
+
|
366 |
+
|
367 |
+
@pytest.mark.filterwarnings("ignore:A Bunch will be returned")
|
368 |
+
def test_plot_partial_dependence_with_same_axes(pyplot, clf_diabetes, diabetes):
|
369 |
+
# The first call to plot_partial_dependence will create two new axes to
|
370 |
+
# place in the space of the passed in axes, which results in a total of
|
371 |
+
# three axes in the figure.
|
372 |
+
# Currently the API does not allow for the second call to
|
373 |
+
# plot_partial_dependence to use the same axes again, because it will
|
374 |
+
# create two new axes in the space resulting in five axes. To get the
|
375 |
+
# expected behavior one needs to pass the generated axes into the second
|
376 |
+
# call:
|
377 |
+
# disp1 = plot_partial_dependence(...)
|
378 |
+
# disp2 = plot_partial_dependence(..., ax=disp1.axes_)
|
379 |
+
|
380 |
+
grid_resolution = 25
|
381 |
+
fig, ax = pyplot.subplots()
|
382 |
+
PartialDependenceDisplay.from_estimator(
|
383 |
+
clf_diabetes,
|
384 |
+
diabetes.data,
|
385 |
+
["age", "bmi"],
|
386 |
+
grid_resolution=grid_resolution,
|
387 |
+
feature_names=diabetes.feature_names,
|
388 |
+
ax=ax,
|
389 |
+
)
|
390 |
+
|
391 |
+
msg = (
|
392 |
+
"The ax was already used in another plot function, please set "
|
393 |
+
"ax=display.axes_ instead"
|
394 |
+
)
|
395 |
+
|
396 |
+
with pytest.raises(ValueError, match=msg):
|
397 |
+
PartialDependenceDisplay.from_estimator(
|
398 |
+
clf_diabetes,
|
399 |
+
diabetes.data,
|
400 |
+
["age", "bmi"],
|
401 |
+
grid_resolution=grid_resolution,
|
402 |
+
feature_names=diabetes.feature_names,
|
403 |
+
ax=ax,
|
404 |
+
)
|
405 |
+
|
406 |
+
|
407 |
+
@pytest.mark.filterwarnings("ignore:A Bunch will be returned")
|
408 |
+
def test_plot_partial_dependence_feature_name_reuse(pyplot, clf_diabetes, diabetes):
|
409 |
+
# second call to plot does not change the feature names from the first
|
410 |
+
# call
|
411 |
+
|
412 |
+
feature_names = diabetes.feature_names
|
413 |
+
disp = PartialDependenceDisplay.from_estimator(
|
414 |
+
clf_diabetes,
|
415 |
+
diabetes.data,
|
416 |
+
[0, 1],
|
417 |
+
grid_resolution=10,
|
418 |
+
feature_names=feature_names,
|
419 |
+
)
|
420 |
+
|
421 |
+
PartialDependenceDisplay.from_estimator(
|
422 |
+
clf_diabetes, diabetes.data, [0, 1], grid_resolution=10, ax=disp.axes_
|
423 |
+
)
|
424 |
+
|
425 |
+
for i, ax in enumerate(disp.axes_.ravel()):
|
426 |
+
assert ax.get_xlabel() == feature_names[i]
|
427 |
+
|
428 |
+
|
429 |
+
@pytest.mark.filterwarnings("ignore:A Bunch will be returned")
|
430 |
+
def test_plot_partial_dependence_multiclass(pyplot):
|
431 |
+
grid_resolution = 25
|
432 |
+
clf_int = GradientBoostingClassifier(n_estimators=10, random_state=1)
|
433 |
+
iris = load_iris()
|
434 |
+
|
435 |
+
# Test partial dependence plot function on multi-class input.
|
436 |
+
clf_int.fit(iris.data, iris.target)
|
437 |
+
disp_target_0 = PartialDependenceDisplay.from_estimator(
|
438 |
+
clf_int, iris.data, [0, 3], target=0, grid_resolution=grid_resolution
|
439 |
+
)
|
440 |
+
assert disp_target_0.figure_ is pyplot.gcf()
|
441 |
+
assert disp_target_0.axes_.shape == (1, 2)
|
442 |
+
assert disp_target_0.lines_.shape == (1, 2)
|
443 |
+
assert disp_target_0.contours_.shape == (1, 2)
|
444 |
+
assert disp_target_0.deciles_vlines_.shape == (1, 2)
|
445 |
+
assert disp_target_0.deciles_hlines_.shape == (1, 2)
|
446 |
+
assert all(c is None for c in disp_target_0.contours_.flat)
|
447 |
+
assert disp_target_0.target_idx == 0
|
448 |
+
|
449 |
+
# now with symbol labels
|
450 |
+
target = iris.target_names[iris.target]
|
451 |
+
clf_symbol = GradientBoostingClassifier(n_estimators=10, random_state=1)
|
452 |
+
clf_symbol.fit(iris.data, target)
|
453 |
+
disp_symbol = PartialDependenceDisplay.from_estimator(
|
454 |
+
clf_symbol, iris.data, [0, 3], target="setosa", grid_resolution=grid_resolution
|
455 |
+
)
|
456 |
+
assert disp_symbol.figure_ is pyplot.gcf()
|
457 |
+
assert disp_symbol.axes_.shape == (1, 2)
|
458 |
+
assert disp_symbol.lines_.shape == (1, 2)
|
459 |
+
assert disp_symbol.contours_.shape == (1, 2)
|
460 |
+
assert disp_symbol.deciles_vlines_.shape == (1, 2)
|
461 |
+
assert disp_symbol.deciles_hlines_.shape == (1, 2)
|
462 |
+
assert all(c is None for c in disp_symbol.contours_.flat)
|
463 |
+
assert disp_symbol.target_idx == 0
|
464 |
+
|
465 |
+
for int_result, symbol_result in zip(
|
466 |
+
disp_target_0.pd_results, disp_symbol.pd_results
|
467 |
+
):
|
468 |
+
assert_allclose(int_result.average, symbol_result.average)
|
469 |
+
assert_allclose(int_result["grid_values"], symbol_result["grid_values"])
|
470 |
+
|
471 |
+
# check that the pd plots are different for another target
|
472 |
+
disp_target_1 = PartialDependenceDisplay.from_estimator(
|
473 |
+
clf_int, iris.data, [0, 3], target=1, grid_resolution=grid_resolution
|
474 |
+
)
|
475 |
+
target_0_data_y = disp_target_0.lines_[0, 0].get_data()[1]
|
476 |
+
target_1_data_y = disp_target_1.lines_[0, 0].get_data()[1]
|
477 |
+
assert any(target_0_data_y != target_1_data_y)
|
478 |
+
|
479 |
+
|
480 |
+
multioutput_regression_data = make_regression(n_samples=50, n_targets=2, random_state=0)
|
481 |
+
|
482 |
+
|
483 |
+
@pytest.mark.filterwarnings("ignore:A Bunch will be returned")
|
484 |
+
@pytest.mark.parametrize("target", [0, 1])
|
485 |
+
def test_plot_partial_dependence_multioutput(pyplot, target):
|
486 |
+
# Test partial dependence plot function on multi-output input.
|
487 |
+
X, y = multioutput_regression_data
|
488 |
+
clf = LinearRegression().fit(X, y)
|
489 |
+
|
490 |
+
grid_resolution = 25
|
491 |
+
disp = PartialDependenceDisplay.from_estimator(
|
492 |
+
clf, X, [0, 1], target=target, grid_resolution=grid_resolution
|
493 |
+
)
|
494 |
+
fig = pyplot.gcf()
|
495 |
+
axs = fig.get_axes()
|
496 |
+
assert len(axs) == 3
|
497 |
+
assert disp.target_idx == target
|
498 |
+
assert disp.bounding_ax_ is not None
|
499 |
+
|
500 |
+
positions = [(0, 0), (0, 1)]
|
501 |
+
expected_label = ["Partial dependence", ""]
|
502 |
+
|
503 |
+
for i, pos in enumerate(positions):
|
504 |
+
ax = disp.axes_[pos]
|
505 |
+
assert ax.get_ylabel() == expected_label[i]
|
506 |
+
assert ax.get_xlabel() == f"x{i}"
|
507 |
+
|
508 |
+
|
509 |
+
@pytest.mark.filterwarnings("ignore:A Bunch will be returned")
|
510 |
+
def test_plot_partial_dependence_dataframe(pyplot, clf_diabetes, diabetes):
|
511 |
+
pd = pytest.importorskip("pandas")
|
512 |
+
df = pd.DataFrame(diabetes.data, columns=diabetes.feature_names)
|
513 |
+
|
514 |
+
grid_resolution = 25
|
515 |
+
|
516 |
+
PartialDependenceDisplay.from_estimator(
|
517 |
+
clf_diabetes,
|
518 |
+
df,
|
519 |
+
["bp", "s1"],
|
520 |
+
grid_resolution=grid_resolution,
|
521 |
+
feature_names=df.columns.tolist(),
|
522 |
+
)
|
523 |
+
|
524 |
+
|
525 |
+
dummy_classification_data = make_classification(random_state=0)
|
526 |
+
|
527 |
+
|
528 |
+
@pytest.mark.filterwarnings("ignore:A Bunch will be returned")
|
529 |
+
@pytest.mark.parametrize(
|
530 |
+
"data, params, err_msg",
|
531 |
+
[
|
532 |
+
(
|
533 |
+
multioutput_regression_data,
|
534 |
+
{"target": None, "features": [0]},
|
535 |
+
"target must be specified for multi-output",
|
536 |
+
),
|
537 |
+
(
|
538 |
+
multioutput_regression_data,
|
539 |
+
{"target": -1, "features": [0]},
|
540 |
+
r"target must be in \[0, n_tasks\]",
|
541 |
+
),
|
542 |
+
(
|
543 |
+
multioutput_regression_data,
|
544 |
+
{"target": 100, "features": [0]},
|
545 |
+
r"target must be in \[0, n_tasks\]",
|
546 |
+
),
|
547 |
+
(
|
548 |
+
dummy_classification_data,
|
549 |
+
{"features": ["foobar"], "feature_names": None},
|
550 |
+
"Feature 'foobar' not in feature_names",
|
551 |
+
),
|
552 |
+
(
|
553 |
+
dummy_classification_data,
|
554 |
+
{"features": ["foobar"], "feature_names": ["abcd", "def"]},
|
555 |
+
"Feature 'foobar' not in feature_names",
|
556 |
+
),
|
557 |
+
(
|
558 |
+
dummy_classification_data,
|
559 |
+
{"features": [(1, 2, 3)]},
|
560 |
+
"Each entry in features must be either an int, ",
|
561 |
+
),
|
562 |
+
(
|
563 |
+
dummy_classification_data,
|
564 |
+
{"features": [1, {}]},
|
565 |
+
"Each entry in features must be either an int, ",
|
566 |
+
),
|
567 |
+
(
|
568 |
+
dummy_classification_data,
|
569 |
+
{"features": [tuple()]},
|
570 |
+
"Each entry in features must be either an int, ",
|
571 |
+
),
|
572 |
+
(
|
573 |
+
dummy_classification_data,
|
574 |
+
{"features": [123], "feature_names": ["blahblah"]},
|
575 |
+
"All entries of features must be less than ",
|
576 |
+
),
|
577 |
+
(
|
578 |
+
dummy_classification_data,
|
579 |
+
{"features": [0, 1, 2], "feature_names": ["a", "b", "a"]},
|
580 |
+
"feature_names should not contain duplicates",
|
581 |
+
),
|
582 |
+
(
|
583 |
+
dummy_classification_data,
|
584 |
+
{"features": [1, 2], "kind": ["both"]},
|
585 |
+
"When `kind` is provided as a list of strings, it should contain",
|
586 |
+
),
|
587 |
+
(
|
588 |
+
dummy_classification_data,
|
589 |
+
{"features": [1], "subsample": -1},
|
590 |
+
"When an integer, subsample=-1 should be positive.",
|
591 |
+
),
|
592 |
+
(
|
593 |
+
dummy_classification_data,
|
594 |
+
{"features": [1], "subsample": 1.2},
|
595 |
+
r"When a floating-point, subsample=1.2 should be in the \(0, 1\) range",
|
596 |
+
),
|
597 |
+
(
|
598 |
+
dummy_classification_data,
|
599 |
+
{"features": [1, 2], "categorical_features": [1.0, 2.0]},
|
600 |
+
"Expected `categorical_features` to be an array-like of boolean,",
|
601 |
+
),
|
602 |
+
(
|
603 |
+
dummy_classification_data,
|
604 |
+
{"features": [(1, 2)], "categorical_features": [2]},
|
605 |
+
"Two-way partial dependence plots are not supported for pairs",
|
606 |
+
),
|
607 |
+
(
|
608 |
+
dummy_classification_data,
|
609 |
+
{"features": [1], "categorical_features": [1], "kind": "individual"},
|
610 |
+
"It is not possible to display individual effects",
|
611 |
+
),
|
612 |
+
],
|
613 |
+
)
|
614 |
+
def test_plot_partial_dependence_error(pyplot, data, params, err_msg):
|
615 |
+
X, y = data
|
616 |
+
estimator = LinearRegression().fit(X, y)
|
617 |
+
|
618 |
+
with pytest.raises(ValueError, match=err_msg):
|
619 |
+
PartialDependenceDisplay.from_estimator(estimator, X, **params)
|
620 |
+
|
621 |
+
|
622 |
+
@pytest.mark.filterwarnings("ignore:A Bunch will be returned")
|
623 |
+
@pytest.mark.parametrize(
|
624 |
+
"params, err_msg",
|
625 |
+
[
|
626 |
+
({"target": 4, "features": [0]}, "target not in est.classes_, got 4"),
|
627 |
+
({"target": None, "features": [0]}, "target must be specified for multi-class"),
|
628 |
+
(
|
629 |
+
{"target": 1, "features": [4.5]},
|
630 |
+
"Each entry in features must be either an int,",
|
631 |
+
),
|
632 |
+
],
|
633 |
+
)
|
634 |
+
def test_plot_partial_dependence_multiclass_error(pyplot, params, err_msg):
|
635 |
+
iris = load_iris()
|
636 |
+
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
|
637 |
+
clf.fit(iris.data, iris.target)
|
638 |
+
|
639 |
+
with pytest.raises(ValueError, match=err_msg):
|
640 |
+
PartialDependenceDisplay.from_estimator(clf, iris.data, **params)
|
641 |
+
|
642 |
+
|
643 |
+
def test_plot_partial_dependence_does_not_override_ylabel(
|
644 |
+
pyplot, clf_diabetes, diabetes
|
645 |
+
):
|
646 |
+
# Non-regression test to be sure to not override the ylabel if it has been
|
647 |
+
# See https://github.com/scikit-learn/scikit-learn/issues/15772
|
648 |
+
_, axes = pyplot.subplots(1, 2)
|
649 |
+
axes[0].set_ylabel("Hello world")
|
650 |
+
PartialDependenceDisplay.from_estimator(
|
651 |
+
clf_diabetes, diabetes.data, [0, 1], ax=axes
|
652 |
+
)
|
653 |
+
|
654 |
+
assert axes[0].get_ylabel() == "Hello world"
|
655 |
+
assert axes[1].get_ylabel() == "Partial dependence"
|
656 |
+
|
657 |
+
|
658 |
+
@pytest.mark.parametrize(
|
659 |
+
"categorical_features, array_type",
|
660 |
+
[
|
661 |
+
(["col_A", "col_C"], "dataframe"),
|
662 |
+
([0, 2], "array"),
|
663 |
+
([True, False, True], "array"),
|
664 |
+
],
|
665 |
+
)
|
666 |
+
def test_plot_partial_dependence_with_categorical(
|
667 |
+
pyplot, categorical_features, array_type
|
668 |
+
):
|
669 |
+
X = [[1, 1, "A"], [2, 0, "C"], [3, 2, "B"]]
|
670 |
+
column_name = ["col_A", "col_B", "col_C"]
|
671 |
+
X = _convert_container(X, array_type, columns_name=column_name)
|
672 |
+
y = np.array([1.2, 0.5, 0.45]).T
|
673 |
+
|
674 |
+
preprocessor = make_column_transformer((OneHotEncoder(), categorical_features))
|
675 |
+
model = make_pipeline(preprocessor, LinearRegression())
|
676 |
+
model.fit(X, y)
|
677 |
+
|
678 |
+
# single feature
|
679 |
+
disp = PartialDependenceDisplay.from_estimator(
|
680 |
+
model,
|
681 |
+
X,
|
682 |
+
features=["col_C"],
|
683 |
+
feature_names=column_name,
|
684 |
+
categorical_features=categorical_features,
|
685 |
+
)
|
686 |
+
|
687 |
+
assert disp.figure_ is pyplot.gcf()
|
688 |
+
assert disp.bars_.shape == (1, 1)
|
689 |
+
assert disp.bars_[0][0] is not None
|
690 |
+
assert disp.lines_.shape == (1, 1)
|
691 |
+
assert disp.lines_[0][0] is None
|
692 |
+
assert disp.contours_.shape == (1, 1)
|
693 |
+
assert disp.contours_[0][0] is None
|
694 |
+
assert disp.deciles_vlines_.shape == (1, 1)
|
695 |
+
assert disp.deciles_vlines_[0][0] is None
|
696 |
+
assert disp.deciles_hlines_.shape == (1, 1)
|
697 |
+
assert disp.deciles_hlines_[0][0] is None
|
698 |
+
assert disp.axes_[0, 0].get_legend() is None
|
699 |
+
|
700 |
+
# interaction between two features
|
701 |
+
disp = PartialDependenceDisplay.from_estimator(
|
702 |
+
model,
|
703 |
+
X,
|
704 |
+
features=[("col_A", "col_C")],
|
705 |
+
feature_names=column_name,
|
706 |
+
categorical_features=categorical_features,
|
707 |
+
)
|
708 |
+
|
709 |
+
assert disp.figure_ is pyplot.gcf()
|
710 |
+
assert disp.bars_.shape == (1, 1)
|
711 |
+
assert disp.bars_[0][0] is None
|
712 |
+
assert disp.lines_.shape == (1, 1)
|
713 |
+
assert disp.lines_[0][0] is None
|
714 |
+
assert disp.contours_.shape == (1, 1)
|
715 |
+
assert disp.contours_[0][0] is None
|
716 |
+
assert disp.deciles_vlines_.shape == (1, 1)
|
717 |
+
assert disp.deciles_vlines_[0][0] is None
|
718 |
+
assert disp.deciles_hlines_.shape == (1, 1)
|
719 |
+
assert disp.deciles_hlines_[0][0] is None
|
720 |
+
assert disp.axes_[0, 0].get_legend() is None
|
721 |
+
|
722 |
+
|
723 |
+
def test_plot_partial_dependence_legend(pyplot):
|
724 |
+
pd = pytest.importorskip("pandas")
|
725 |
+
X = pd.DataFrame(
|
726 |
+
{
|
727 |
+
"col_A": ["A", "B", "C"],
|
728 |
+
"col_B": [1, 0, 2],
|
729 |
+
"col_C": ["C", "B", "A"],
|
730 |
+
}
|
731 |
+
)
|
732 |
+
y = np.array([1.2, 0.5, 0.45]).T
|
733 |
+
|
734 |
+
categorical_features = ["col_A", "col_C"]
|
735 |
+
preprocessor = make_column_transformer((OneHotEncoder(), categorical_features))
|
736 |
+
model = make_pipeline(preprocessor, LinearRegression())
|
737 |
+
model.fit(X, y)
|
738 |
+
|
739 |
+
disp = PartialDependenceDisplay.from_estimator(
|
740 |
+
model,
|
741 |
+
X,
|
742 |
+
features=["col_B", "col_C"],
|
743 |
+
categorical_features=categorical_features,
|
744 |
+
kind=["both", "average"],
|
745 |
+
)
|
746 |
+
|
747 |
+
legend_text = disp.axes_[0, 0].get_legend().get_texts()
|
748 |
+
assert len(legend_text) == 1
|
749 |
+
assert legend_text[0].get_text() == "average"
|
750 |
+
assert disp.axes_[0, 1].get_legend() is None
|
751 |
+
|
752 |
+
|
753 |
+
@pytest.mark.parametrize(
|
754 |
+
"kind, expected_shape",
|
755 |
+
[("average", (1, 2)), ("individual", (1, 2, 20)), ("both", (1, 2, 21))],
|
756 |
+
)
|
757 |
+
def test_plot_partial_dependence_subsampling(
|
758 |
+
pyplot, clf_diabetes, diabetes, kind, expected_shape
|
759 |
+
):
|
760 |
+
# check that the subsampling is properly working
|
761 |
+
# non-regression test for:
|
762 |
+
# https://github.com/scikit-learn/scikit-learn/pull/18359
|
763 |
+
matplotlib = pytest.importorskip("matplotlib")
|
764 |
+
grid_resolution = 25
|
765 |
+
feature_names = diabetes.feature_names
|
766 |
+
|
767 |
+
disp1 = PartialDependenceDisplay.from_estimator(
|
768 |
+
clf_diabetes,
|
769 |
+
diabetes.data,
|
770 |
+
["age", "bmi"],
|
771 |
+
kind=kind,
|
772 |
+
grid_resolution=grid_resolution,
|
773 |
+
feature_names=feature_names,
|
774 |
+
subsample=20,
|
775 |
+
random_state=0,
|
776 |
+
)
|
777 |
+
|
778 |
+
assert disp1.lines_.shape == expected_shape
|
779 |
+
assert all(
|
780 |
+
[isinstance(line, matplotlib.lines.Line2D) for line in disp1.lines_.ravel()]
|
781 |
+
)
|
782 |
+
|
783 |
+
|
784 |
+
@pytest.mark.parametrize(
|
785 |
+
"kind, line_kw, label",
|
786 |
+
[
|
787 |
+
("individual", {}, None),
|
788 |
+
("individual", {"label": "xxx"}, None),
|
789 |
+
("average", {}, None),
|
790 |
+
("average", {"label": "xxx"}, "xxx"),
|
791 |
+
("both", {}, "average"),
|
792 |
+
("both", {"label": "xxx"}, "xxx"),
|
793 |
+
],
|
794 |
+
)
|
795 |
+
def test_partial_dependence_overwrite_labels(
|
796 |
+
pyplot,
|
797 |
+
clf_diabetes,
|
798 |
+
diabetes,
|
799 |
+
kind,
|
800 |
+
line_kw,
|
801 |
+
label,
|
802 |
+
):
|
803 |
+
"""Test that make sure that we can overwrite the label of the PDP plot"""
|
804 |
+
disp = PartialDependenceDisplay.from_estimator(
|
805 |
+
clf_diabetes,
|
806 |
+
diabetes.data,
|
807 |
+
[0, 2],
|
808 |
+
grid_resolution=25,
|
809 |
+
feature_names=diabetes.feature_names,
|
810 |
+
kind=kind,
|
811 |
+
line_kw=line_kw,
|
812 |
+
)
|
813 |
+
|
814 |
+
for ax in disp.axes_.ravel():
|
815 |
+
if label is None:
|
816 |
+
assert ax.get_legend() is None
|
817 |
+
else:
|
818 |
+
legend_text = ax.get_legend().get_texts()
|
819 |
+
assert len(legend_text) == 1
|
820 |
+
assert legend_text[0].get_text() == label
|
821 |
+
|
822 |
+
|
823 |
+
@pytest.mark.parametrize(
|
824 |
+
"categorical_features, array_type",
|
825 |
+
[
|
826 |
+
(["col_A", "col_C"], "dataframe"),
|
827 |
+
([0, 2], "array"),
|
828 |
+
([True, False, True], "array"),
|
829 |
+
],
|
830 |
+
)
|
831 |
+
def test_grid_resolution_with_categorical(pyplot, categorical_features, array_type):
|
832 |
+
"""Check that we raise a ValueError when the grid_resolution is too small
|
833 |
+
respect to the number of categories in the categorical features targeted.
|
834 |
+
"""
|
835 |
+
X = [["A", 1, "A"], ["B", 0, "C"], ["C", 2, "B"]]
|
836 |
+
column_name = ["col_A", "col_B", "col_C"]
|
837 |
+
X = _convert_container(X, array_type, columns_name=column_name)
|
838 |
+
y = np.array([1.2, 0.5, 0.45]).T
|
839 |
+
|
840 |
+
preprocessor = make_column_transformer((OneHotEncoder(), categorical_features))
|
841 |
+
model = make_pipeline(preprocessor, LinearRegression())
|
842 |
+
model.fit(X, y)
|
843 |
+
|
844 |
+
err_msg = (
|
845 |
+
"resolution of the computed grid is less than the minimum number of categories"
|
846 |
+
)
|
847 |
+
with pytest.raises(ValueError, match=err_msg):
|
848 |
+
PartialDependenceDisplay.from_estimator(
|
849 |
+
model,
|
850 |
+
X,
|
851 |
+
features=["col_C"],
|
852 |
+
feature_names=column_name,
|
853 |
+
categorical_features=categorical_features,
|
854 |
+
grid_resolution=2,
|
855 |
+
)
|
856 |
+
|
857 |
+
|
858 |
+
@pytest.mark.parametrize("kind", ["individual", "average", "both"])
|
859 |
+
@pytest.mark.parametrize("centered", [True, False])
|
860 |
+
def test_partial_dependence_plot_limits_one_way(
|
861 |
+
pyplot, clf_diabetes, diabetes, kind, centered
|
862 |
+
):
|
863 |
+
"""Check that the PD limit on the plots are properly set on one-way plots."""
|
864 |
+
disp = PartialDependenceDisplay.from_estimator(
|
865 |
+
clf_diabetes,
|
866 |
+
diabetes.data,
|
867 |
+
features=(0, 1),
|
868 |
+
kind=kind,
|
869 |
+
grid_resolution=25,
|
870 |
+
feature_names=diabetes.feature_names,
|
871 |
+
)
|
872 |
+
|
873 |
+
range_pd = np.array([-1, 1], dtype=np.float64)
|
874 |
+
for pd in disp.pd_results:
|
875 |
+
if "average" in pd:
|
876 |
+
pd["average"][...] = range_pd[1]
|
877 |
+
pd["average"][0, 0] = range_pd[0]
|
878 |
+
if "individual" in pd:
|
879 |
+
pd["individual"][...] = range_pd[1]
|
880 |
+
pd["individual"][0, 0, 0] = range_pd[0]
|
881 |
+
|
882 |
+
disp.plot(centered=centered)
|
883 |
+
# check that we anchor to zero x-axis when centering
|
884 |
+
y_lim = range_pd - range_pd[0] if centered else range_pd
|
885 |
+
padding = 0.05 * (y_lim[1] - y_lim[0])
|
886 |
+
y_lim[0] -= padding
|
887 |
+
y_lim[1] += padding
|
888 |
+
for ax in disp.axes_.ravel():
|
889 |
+
assert_allclose(ax.get_ylim(), y_lim)
|
890 |
+
|
891 |
+
|
892 |
+
@pytest.mark.parametrize("centered", [True, False])
|
893 |
+
def test_partial_dependence_plot_limits_two_way(
|
894 |
+
pyplot, clf_diabetes, diabetes, centered
|
895 |
+
):
|
896 |
+
"""Check that the PD limit on the plots are properly set on two-way plots."""
|
897 |
+
disp = PartialDependenceDisplay.from_estimator(
|
898 |
+
clf_diabetes,
|
899 |
+
diabetes.data,
|
900 |
+
features=[(0, 1)],
|
901 |
+
kind="average",
|
902 |
+
grid_resolution=25,
|
903 |
+
feature_names=diabetes.feature_names,
|
904 |
+
)
|
905 |
+
|
906 |
+
range_pd = np.array([-1, 1], dtype=np.float64)
|
907 |
+
for pd in disp.pd_results:
|
908 |
+
pd["average"][...] = range_pd[1]
|
909 |
+
pd["average"][0, 0] = range_pd[0]
|
910 |
+
|
911 |
+
disp.plot(centered=centered)
|
912 |
+
contours = disp.contours_[0, 0]
|
913 |
+
levels = range_pd - range_pd[0] if centered else range_pd
|
914 |
+
|
915 |
+
padding = 0.05 * (levels[1] - levels[0])
|
916 |
+
levels[0] -= padding
|
917 |
+
levels[1] += padding
|
918 |
+
expect_levels = np.linspace(*levels, num=8)
|
919 |
+
assert_allclose(contours.levels, expect_levels)
|
920 |
+
|
921 |
+
|
922 |
+
def test_partial_dependence_kind_list(
|
923 |
+
pyplot,
|
924 |
+
clf_diabetes,
|
925 |
+
diabetes,
|
926 |
+
):
|
927 |
+
"""Check that we can provide a list of strings to kind parameter."""
|
928 |
+
matplotlib = pytest.importorskip("matplotlib")
|
929 |
+
|
930 |
+
disp = PartialDependenceDisplay.from_estimator(
|
931 |
+
clf_diabetes,
|
932 |
+
diabetes.data,
|
933 |
+
features=[0, 2, (1, 2)],
|
934 |
+
grid_resolution=20,
|
935 |
+
kind=["both", "both", "average"],
|
936 |
+
)
|
937 |
+
|
938 |
+
for idx in [0, 1]:
|
939 |
+
assert all(
|
940 |
+
[
|
941 |
+
isinstance(line, matplotlib.lines.Line2D)
|
942 |
+
for line in disp.lines_[0, idx].ravel()
|
943 |
+
]
|
944 |
+
)
|
945 |
+
assert disp.contours_[0, idx] is None
|
946 |
+
|
947 |
+
assert disp.contours_[0, 2] is not None
|
948 |
+
assert all([line is None for line in disp.lines_[0, 2].ravel()])
|
949 |
+
|
950 |
+
|
951 |
+
@pytest.mark.parametrize(
|
952 |
+
"features, kind",
|
953 |
+
[
|
954 |
+
([0, 2, (1, 2)], "individual"),
|
955 |
+
([0, 2, (1, 2)], "both"),
|
956 |
+
([(0, 1), (0, 2), (1, 2)], "individual"),
|
957 |
+
([(0, 1), (0, 2), (1, 2)], "both"),
|
958 |
+
([0, 2, (1, 2)], ["individual", "individual", "individual"]),
|
959 |
+
([0, 2, (1, 2)], ["both", "both", "both"]),
|
960 |
+
],
|
961 |
+
)
|
962 |
+
def test_partial_dependence_kind_error(
|
963 |
+
pyplot,
|
964 |
+
clf_diabetes,
|
965 |
+
diabetes,
|
966 |
+
features,
|
967 |
+
kind,
|
968 |
+
):
|
969 |
+
"""Check that we raise an informative error when 2-way PD is requested
|
970 |
+
together with 1-way PD/ICE"""
|
971 |
+
warn_msg = (
|
972 |
+
"ICE plot cannot be rendered for 2-way feature interactions. 2-way "
|
973 |
+
"feature interactions mandates PD plots using the 'average' kind"
|
974 |
+
)
|
975 |
+
with pytest.raises(ValueError, match=warn_msg):
|
976 |
+
PartialDependenceDisplay.from_estimator(
|
977 |
+
clf_diabetes,
|
978 |
+
diabetes.data,
|
979 |
+
features=features,
|
980 |
+
grid_resolution=20,
|
981 |
+
kind=kind,
|
982 |
+
)
|
983 |
+
|
984 |
+
|
985 |
+
@pytest.mark.filterwarnings("ignore:A Bunch will be returned")
|
986 |
+
@pytest.mark.parametrize(
|
987 |
+
"line_kw, pd_line_kw, ice_lines_kw, expected_colors",
|
988 |
+
[
|
989 |
+
({"color": "r"}, {"color": "g"}, {"color": "b"}, ("g", "b")),
|
990 |
+
(None, {"color": "g"}, {"color": "b"}, ("g", "b")),
|
991 |
+
({"color": "r"}, None, {"color": "b"}, ("r", "b")),
|
992 |
+
({"color": "r"}, {"color": "g"}, None, ("g", "r")),
|
993 |
+
({"color": "r"}, None, None, ("r", "r")),
|
994 |
+
({"color": "r"}, {"linestyle": "--"}, {"linestyle": "-."}, ("r", "r")),
|
995 |
+
],
|
996 |
+
)
|
997 |
+
def test_plot_partial_dependence_lines_kw(
|
998 |
+
pyplot,
|
999 |
+
clf_diabetes,
|
1000 |
+
diabetes,
|
1001 |
+
line_kw,
|
1002 |
+
pd_line_kw,
|
1003 |
+
ice_lines_kw,
|
1004 |
+
expected_colors,
|
1005 |
+
):
|
1006 |
+
"""Check that passing `pd_line_kw` and `ice_lines_kw` will act on the
|
1007 |
+
specific lines in the plot.
|
1008 |
+
"""
|
1009 |
+
|
1010 |
+
disp = PartialDependenceDisplay.from_estimator(
|
1011 |
+
clf_diabetes,
|
1012 |
+
diabetes.data,
|
1013 |
+
[0, 2],
|
1014 |
+
grid_resolution=20,
|
1015 |
+
feature_names=diabetes.feature_names,
|
1016 |
+
n_cols=2,
|
1017 |
+
kind="both",
|
1018 |
+
line_kw=line_kw,
|
1019 |
+
pd_line_kw=pd_line_kw,
|
1020 |
+
ice_lines_kw=ice_lines_kw,
|
1021 |
+
)
|
1022 |
+
|
1023 |
+
line = disp.lines_[0, 0, -1]
|
1024 |
+
assert line.get_color() == expected_colors[0]
|
1025 |
+
if pd_line_kw is not None and "linestyle" in pd_line_kw:
|
1026 |
+
assert line.get_linestyle() == pd_line_kw["linestyle"]
|
1027 |
+
else:
|
1028 |
+
assert line.get_linestyle() == "--"
|
1029 |
+
|
1030 |
+
line = disp.lines_[0, 0, 0]
|
1031 |
+
assert line.get_color() == expected_colors[1]
|
1032 |
+
if ice_lines_kw is not None and "linestyle" in ice_lines_kw:
|
1033 |
+
assert line.get_linestyle() == ice_lines_kw["linestyle"]
|
1034 |
+
else:
|
1035 |
+
assert line.get_linestyle() == "-"
|
1036 |
+
|
1037 |
+
|
1038 |
+
def test_partial_dependence_display_wrong_len_kind(
|
1039 |
+
pyplot,
|
1040 |
+
clf_diabetes,
|
1041 |
+
diabetes,
|
1042 |
+
):
|
1043 |
+
"""Check that we raise an error when `kind` is a list with a wrong length.
|
1044 |
+
|
1045 |
+
This case can only be triggered using the `PartialDependenceDisplay.from_estimator`
|
1046 |
+
method.
|
1047 |
+
"""
|
1048 |
+
disp = PartialDependenceDisplay.from_estimator(
|
1049 |
+
clf_diabetes,
|
1050 |
+
diabetes.data,
|
1051 |
+
features=[0, 2],
|
1052 |
+
grid_resolution=20,
|
1053 |
+
kind="average", # len(kind) != len(features)
|
1054 |
+
)
|
1055 |
+
|
1056 |
+
# alter `kind` to be a list with a length different from length of `features`
|
1057 |
+
disp.kind = ["average"]
|
1058 |
+
err_msg = (
|
1059 |
+
r"When `kind` is provided as a list of strings, it should contain as many"
|
1060 |
+
r" elements as `features`. `kind` contains 1 element\(s\) and `features`"
|
1061 |
+
r" contains 2 element\(s\)."
|
1062 |
+
)
|
1063 |
+
with pytest.raises(ValueError, match=err_msg):
|
1064 |
+
disp.plot()
|
1065 |
+
|
1066 |
+
|
1067 |
+
@pytest.mark.parametrize(
|
1068 |
+
"kind",
|
1069 |
+
["individual", "both", "average", ["average", "both"], ["individual", "both"]],
|
1070 |
+
)
|
1071 |
+
def test_partial_dependence_display_kind_centered_interaction(
|
1072 |
+
pyplot,
|
1073 |
+
kind,
|
1074 |
+
clf_diabetes,
|
1075 |
+
diabetes,
|
1076 |
+
):
|
1077 |
+
"""Check that we properly center ICE and PD when passing kind as a string and as a
|
1078 |
+
list."""
|
1079 |
+
disp = PartialDependenceDisplay.from_estimator(
|
1080 |
+
clf_diabetes,
|
1081 |
+
diabetes.data,
|
1082 |
+
[0, 1],
|
1083 |
+
kind=kind,
|
1084 |
+
centered=True,
|
1085 |
+
subsample=5,
|
1086 |
+
)
|
1087 |
+
|
1088 |
+
assert all([ln._y[0] == 0.0 for ln in disp.lines_.ravel() if ln is not None])
|
1089 |
+
|
1090 |
+
|
1091 |
+
def test_partial_dependence_display_with_constant_sample_weight(
|
1092 |
+
pyplot,
|
1093 |
+
clf_diabetes,
|
1094 |
+
diabetes,
|
1095 |
+
):
|
1096 |
+
"""Check that the utilization of a constant sample weight maintains the
|
1097 |
+
standard behavior.
|
1098 |
+
"""
|
1099 |
+
disp = PartialDependenceDisplay.from_estimator(
|
1100 |
+
clf_diabetes,
|
1101 |
+
diabetes.data,
|
1102 |
+
[0, 1],
|
1103 |
+
kind="average",
|
1104 |
+
method="brute",
|
1105 |
+
)
|
1106 |
+
|
1107 |
+
sample_weight = np.ones_like(diabetes.target)
|
1108 |
+
disp_sw = PartialDependenceDisplay.from_estimator(
|
1109 |
+
clf_diabetes,
|
1110 |
+
diabetes.data,
|
1111 |
+
[0, 1],
|
1112 |
+
sample_weight=sample_weight,
|
1113 |
+
kind="average",
|
1114 |
+
method="brute",
|
1115 |
+
)
|
1116 |
+
|
1117 |
+
assert np.array_equal(
|
1118 |
+
disp.pd_results[0]["average"], disp_sw.pd_results[0]["average"]
|
1119 |
+
)
|
1120 |
+
|
1121 |
+
|
1122 |
+
def test_subclass_named_constructors_return_type_is_subclass(
|
1123 |
+
pyplot, diabetes, clf_diabetes
|
1124 |
+
):
|
1125 |
+
"""Check that named constructors return the correct type when subclassed.
|
1126 |
+
|
1127 |
+
Non-regression test for:
|
1128 |
+
https://github.com/scikit-learn/scikit-learn/pull/27675
|
1129 |
+
"""
|
1130 |
+
|
1131 |
+
class SubclassOfDisplay(PartialDependenceDisplay):
|
1132 |
+
pass
|
1133 |
+
|
1134 |
+
curve = SubclassOfDisplay.from_estimator(
|
1135 |
+
clf_diabetes,
|
1136 |
+
diabetes.data,
|
1137 |
+
[0, 2, (0, 2)],
|
1138 |
+
)
|
1139 |
+
|
1140 |
+
assert isinstance(curve, SubclassOfDisplay)
|
venv/lib/python3.10/site-packages/sklearn/inspection/tests/__init__.py
ADDED
File without changes
|
venv/lib/python3.10/site-packages/sklearn/inspection/tests/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (192 Bytes). View file
|
|
venv/lib/python3.10/site-packages/sklearn/inspection/tests/__pycache__/test_partial_dependence.cpython-310.pyc
ADDED
Binary file (22.4 kB). View file
|
|
venv/lib/python3.10/site-packages/sklearn/inspection/tests/__pycache__/test_pd_utils.cpython-310.pyc
ADDED
Binary file (1.94 kB). View file
|
|
venv/lib/python3.10/site-packages/sklearn/inspection/tests/__pycache__/test_permutation_importance.cpython-310.pyc
ADDED
Binary file (11.3 kB). View file
|
|
venv/lib/python3.10/site-packages/sklearn/inspection/tests/test_partial_dependence.py
ADDED
@@ -0,0 +1,958 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Testing for the partial dependence module.
|
3 |
+
"""
|
4 |
+
import warnings
|
5 |
+
|
6 |
+
import numpy as np
|
7 |
+
import pytest
|
8 |
+
|
9 |
+
import sklearn
|
10 |
+
from sklearn.base import BaseEstimator, ClassifierMixin, clone, is_regressor
|
11 |
+
from sklearn.cluster import KMeans
|
12 |
+
from sklearn.compose import make_column_transformer
|
13 |
+
from sklearn.datasets import load_iris, make_classification, make_regression
|
14 |
+
from sklearn.dummy import DummyClassifier
|
15 |
+
from sklearn.ensemble import (
|
16 |
+
GradientBoostingClassifier,
|
17 |
+
GradientBoostingRegressor,
|
18 |
+
HistGradientBoostingClassifier,
|
19 |
+
HistGradientBoostingRegressor,
|
20 |
+
RandomForestRegressor,
|
21 |
+
)
|
22 |
+
from sklearn.exceptions import NotFittedError
|
23 |
+
from sklearn.inspection import partial_dependence
|
24 |
+
from sklearn.inspection._partial_dependence import (
|
25 |
+
_grid_from_X,
|
26 |
+
_partial_dependence_brute,
|
27 |
+
_partial_dependence_recursion,
|
28 |
+
)
|
29 |
+
from sklearn.linear_model import LinearRegression, LogisticRegression, MultiTaskLasso
|
30 |
+
from sklearn.metrics import r2_score
|
31 |
+
from sklearn.pipeline import make_pipeline
|
32 |
+
from sklearn.preprocessing import (
|
33 |
+
PolynomialFeatures,
|
34 |
+
RobustScaler,
|
35 |
+
StandardScaler,
|
36 |
+
scale,
|
37 |
+
)
|
38 |
+
from sklearn.tree import DecisionTreeRegressor
|
39 |
+
from sklearn.tree.tests.test_tree import assert_is_subtree
|
40 |
+
from sklearn.utils import _IS_32BIT
|
41 |
+
from sklearn.utils._testing import assert_allclose, assert_array_equal
|
42 |
+
from sklearn.utils.validation import check_random_state
|
43 |
+
|
44 |
+
# toy sample
|
45 |
+
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
|
46 |
+
y = [-1, -1, -1, 1, 1, 1]
|
47 |
+
|
48 |
+
|
49 |
+
# (X, y), n_targets <-- as expected in the output of partial_dep()
|
50 |
+
binary_classification_data = (make_classification(n_samples=50, random_state=0), 1)
|
51 |
+
multiclass_classification_data = (
|
52 |
+
make_classification(
|
53 |
+
n_samples=50, n_classes=3, n_clusters_per_class=1, random_state=0
|
54 |
+
),
|
55 |
+
3,
|
56 |
+
)
|
57 |
+
regression_data = (make_regression(n_samples=50, random_state=0), 1)
|
58 |
+
multioutput_regression_data = (
|
59 |
+
make_regression(n_samples=50, n_targets=2, random_state=0),
|
60 |
+
2,
|
61 |
+
)
|
62 |
+
|
63 |
+
# iris
|
64 |
+
iris = load_iris()
|
65 |
+
|
66 |
+
|
67 |
+
@pytest.mark.parametrize(
|
68 |
+
"Estimator, method, data",
|
69 |
+
[
|
70 |
+
(GradientBoostingClassifier, "auto", binary_classification_data),
|
71 |
+
(GradientBoostingClassifier, "auto", multiclass_classification_data),
|
72 |
+
(GradientBoostingClassifier, "brute", binary_classification_data),
|
73 |
+
(GradientBoostingClassifier, "brute", multiclass_classification_data),
|
74 |
+
(GradientBoostingRegressor, "auto", regression_data),
|
75 |
+
(GradientBoostingRegressor, "brute", regression_data),
|
76 |
+
(DecisionTreeRegressor, "brute", regression_data),
|
77 |
+
(LinearRegression, "brute", regression_data),
|
78 |
+
(LinearRegression, "brute", multioutput_regression_data),
|
79 |
+
(LogisticRegression, "brute", binary_classification_data),
|
80 |
+
(LogisticRegression, "brute", multiclass_classification_data),
|
81 |
+
(MultiTaskLasso, "brute", multioutput_regression_data),
|
82 |
+
],
|
83 |
+
)
|
84 |
+
@pytest.mark.parametrize("grid_resolution", (5, 10))
|
85 |
+
@pytest.mark.parametrize("features", ([1], [1, 2]))
|
86 |
+
@pytest.mark.parametrize("kind", ("average", "individual", "both"))
|
87 |
+
def test_output_shape(Estimator, method, data, grid_resolution, features, kind):
|
88 |
+
# Check that partial_dependence has consistent output shape for different
|
89 |
+
# kinds of estimators:
|
90 |
+
# - classifiers with binary and multiclass settings
|
91 |
+
# - regressors
|
92 |
+
# - multi-task regressors
|
93 |
+
|
94 |
+
est = Estimator()
|
95 |
+
if hasattr(est, "n_estimators"):
|
96 |
+
est.set_params(n_estimators=2) # speed-up computations
|
97 |
+
|
98 |
+
# n_target corresponds to the number of classes (1 for binary classif) or
|
99 |
+
# the number of tasks / outputs in multi task settings. It's equal to 1 for
|
100 |
+
# classical regression_data.
|
101 |
+
(X, y), n_targets = data
|
102 |
+
n_instances = X.shape[0]
|
103 |
+
|
104 |
+
est.fit(X, y)
|
105 |
+
result = partial_dependence(
|
106 |
+
est,
|
107 |
+
X=X,
|
108 |
+
features=features,
|
109 |
+
method=method,
|
110 |
+
kind=kind,
|
111 |
+
grid_resolution=grid_resolution,
|
112 |
+
)
|
113 |
+
pdp, axes = result, result["grid_values"]
|
114 |
+
|
115 |
+
expected_pdp_shape = (n_targets, *[grid_resolution for _ in range(len(features))])
|
116 |
+
expected_ice_shape = (
|
117 |
+
n_targets,
|
118 |
+
n_instances,
|
119 |
+
*[grid_resolution for _ in range(len(features))],
|
120 |
+
)
|
121 |
+
if kind == "average":
|
122 |
+
assert pdp.average.shape == expected_pdp_shape
|
123 |
+
elif kind == "individual":
|
124 |
+
assert pdp.individual.shape == expected_ice_shape
|
125 |
+
else: # 'both'
|
126 |
+
assert pdp.average.shape == expected_pdp_shape
|
127 |
+
assert pdp.individual.shape == expected_ice_shape
|
128 |
+
|
129 |
+
expected_axes_shape = (len(features), grid_resolution)
|
130 |
+
assert axes is not None
|
131 |
+
assert np.asarray(axes).shape == expected_axes_shape
|
132 |
+
|
133 |
+
|
134 |
+
def test_grid_from_X():
|
135 |
+
# tests for _grid_from_X: sanity check for output, and for shapes.
|
136 |
+
|
137 |
+
# Make sure that the grid is a cartesian product of the input (it will use
|
138 |
+
# the unique values instead of the percentiles)
|
139 |
+
percentiles = (0.05, 0.95)
|
140 |
+
grid_resolution = 100
|
141 |
+
is_categorical = [False, False]
|
142 |
+
X = np.asarray([[1, 2], [3, 4]])
|
143 |
+
grid, axes = _grid_from_X(X, percentiles, is_categorical, grid_resolution)
|
144 |
+
assert_array_equal(grid, [[1, 2], [1, 4], [3, 2], [3, 4]])
|
145 |
+
assert_array_equal(axes, X.T)
|
146 |
+
|
147 |
+
# test shapes of returned objects depending on the number of unique values
|
148 |
+
# for a feature.
|
149 |
+
rng = np.random.RandomState(0)
|
150 |
+
grid_resolution = 15
|
151 |
+
|
152 |
+
# n_unique_values > grid_resolution
|
153 |
+
X = rng.normal(size=(20, 2))
|
154 |
+
grid, axes = _grid_from_X(
|
155 |
+
X, percentiles, is_categorical, grid_resolution=grid_resolution
|
156 |
+
)
|
157 |
+
assert grid.shape == (grid_resolution * grid_resolution, X.shape[1])
|
158 |
+
assert np.asarray(axes).shape == (2, grid_resolution)
|
159 |
+
|
160 |
+
# n_unique_values < grid_resolution, will use actual values
|
161 |
+
n_unique_values = 12
|
162 |
+
X[n_unique_values - 1 :, 0] = 12345
|
163 |
+
rng.shuffle(X) # just to make sure the order is irrelevant
|
164 |
+
grid, axes = _grid_from_X(
|
165 |
+
X, percentiles, is_categorical, grid_resolution=grid_resolution
|
166 |
+
)
|
167 |
+
assert grid.shape == (n_unique_values * grid_resolution, X.shape[1])
|
168 |
+
# axes is a list of arrays of different shapes
|
169 |
+
assert axes[0].shape == (n_unique_values,)
|
170 |
+
assert axes[1].shape == (grid_resolution,)
|
171 |
+
|
172 |
+
|
173 |
+
@pytest.mark.parametrize(
|
174 |
+
"grid_resolution",
|
175 |
+
[
|
176 |
+
2, # since n_categories > 2, we should not use quantiles resampling
|
177 |
+
100,
|
178 |
+
],
|
179 |
+
)
|
180 |
+
def test_grid_from_X_with_categorical(grid_resolution):
|
181 |
+
"""Check that `_grid_from_X` always sample from categories and does not
|
182 |
+
depend from the percentiles.
|
183 |
+
"""
|
184 |
+
pd = pytest.importorskip("pandas")
|
185 |
+
percentiles = (0.05, 0.95)
|
186 |
+
is_categorical = [True]
|
187 |
+
X = pd.DataFrame({"cat_feature": ["A", "B", "C", "A", "B", "D", "E"]})
|
188 |
+
grid, axes = _grid_from_X(
|
189 |
+
X, percentiles, is_categorical, grid_resolution=grid_resolution
|
190 |
+
)
|
191 |
+
assert grid.shape == (5, X.shape[1])
|
192 |
+
assert axes[0].shape == (5,)
|
193 |
+
|
194 |
+
|
195 |
+
@pytest.mark.parametrize("grid_resolution", [3, 100])
|
196 |
+
def test_grid_from_X_heterogeneous_type(grid_resolution):
|
197 |
+
"""Check that `_grid_from_X` always sample from categories and does not
|
198 |
+
depend from the percentiles.
|
199 |
+
"""
|
200 |
+
pd = pytest.importorskip("pandas")
|
201 |
+
percentiles = (0.05, 0.95)
|
202 |
+
is_categorical = [True, False]
|
203 |
+
X = pd.DataFrame(
|
204 |
+
{
|
205 |
+
"cat": ["A", "B", "C", "A", "B", "D", "E", "A", "B", "D"],
|
206 |
+
"num": [1, 1, 1, 2, 5, 6, 6, 6, 6, 8],
|
207 |
+
}
|
208 |
+
)
|
209 |
+
nunique = X.nunique()
|
210 |
+
|
211 |
+
grid, axes = _grid_from_X(
|
212 |
+
X, percentiles, is_categorical, grid_resolution=grid_resolution
|
213 |
+
)
|
214 |
+
if grid_resolution == 3:
|
215 |
+
assert grid.shape == (15, 2)
|
216 |
+
assert axes[0].shape[0] == nunique["num"]
|
217 |
+
assert axes[1].shape[0] == grid_resolution
|
218 |
+
else:
|
219 |
+
assert grid.shape == (25, 2)
|
220 |
+
assert axes[0].shape[0] == nunique["cat"]
|
221 |
+
assert axes[1].shape[0] == nunique["cat"]
|
222 |
+
|
223 |
+
|
224 |
+
@pytest.mark.parametrize(
|
225 |
+
"grid_resolution, percentiles, err_msg",
|
226 |
+
[
|
227 |
+
(2, (0, 0.0001), "percentiles are too close"),
|
228 |
+
(100, (1, 2, 3, 4), "'percentiles' must be a sequence of 2 elements"),
|
229 |
+
(100, 12345, "'percentiles' must be a sequence of 2 elements"),
|
230 |
+
(100, (-1, 0.95), r"'percentiles' values must be in \[0, 1\]"),
|
231 |
+
(100, (0.05, 2), r"'percentiles' values must be in \[0, 1\]"),
|
232 |
+
(100, (0.9, 0.1), r"percentiles\[0\] must be strictly less than"),
|
233 |
+
(1, (0.05, 0.95), "'grid_resolution' must be strictly greater than 1"),
|
234 |
+
],
|
235 |
+
)
|
236 |
+
def test_grid_from_X_error(grid_resolution, percentiles, err_msg):
|
237 |
+
X = np.asarray([[1, 2], [3, 4]])
|
238 |
+
is_categorical = [False]
|
239 |
+
with pytest.raises(ValueError, match=err_msg):
|
240 |
+
_grid_from_X(X, percentiles, is_categorical, grid_resolution)
|
241 |
+
|
242 |
+
|
243 |
+
@pytest.mark.parametrize("target_feature", range(5))
|
244 |
+
@pytest.mark.parametrize(
|
245 |
+
"est, method",
|
246 |
+
[
|
247 |
+
(LinearRegression(), "brute"),
|
248 |
+
(GradientBoostingRegressor(random_state=0), "brute"),
|
249 |
+
(GradientBoostingRegressor(random_state=0), "recursion"),
|
250 |
+
(HistGradientBoostingRegressor(random_state=0), "brute"),
|
251 |
+
(HistGradientBoostingRegressor(random_state=0), "recursion"),
|
252 |
+
],
|
253 |
+
)
|
254 |
+
def test_partial_dependence_helpers(est, method, target_feature):
|
255 |
+
# Check that what is returned by _partial_dependence_brute or
|
256 |
+
# _partial_dependence_recursion is equivalent to manually setting a target
|
257 |
+
# feature to a given value, and computing the average prediction over all
|
258 |
+
# samples.
|
259 |
+
# This also checks that the brute and recursion methods give the same
|
260 |
+
# output.
|
261 |
+
# Note that even on the trainset, the brute and the recursion methods
|
262 |
+
# aren't always strictly equivalent, in particular when the slow method
|
263 |
+
# generates unrealistic samples that have low mass in the joint
|
264 |
+
# distribution of the input features, and when some of the features are
|
265 |
+
# dependent. Hence the high tolerance on the checks.
|
266 |
+
|
267 |
+
X, y = make_regression(random_state=0, n_features=5, n_informative=5)
|
268 |
+
# The 'init' estimator for GBDT (here the average prediction) isn't taken
|
269 |
+
# into account with the recursion method, for technical reasons. We set
|
270 |
+
# the mean to 0 to that this 'bug' doesn't have any effect.
|
271 |
+
y = y - y.mean()
|
272 |
+
est.fit(X, y)
|
273 |
+
|
274 |
+
# target feature will be set to .5 and then to 123
|
275 |
+
features = np.array([target_feature], dtype=np.int32)
|
276 |
+
grid = np.array([[0.5], [123]])
|
277 |
+
|
278 |
+
if method == "brute":
|
279 |
+
pdp, predictions = _partial_dependence_brute(
|
280 |
+
est, grid, features, X, response_method="auto"
|
281 |
+
)
|
282 |
+
else:
|
283 |
+
pdp = _partial_dependence_recursion(est, grid, features)
|
284 |
+
|
285 |
+
mean_predictions = []
|
286 |
+
for val in (0.5, 123):
|
287 |
+
X_ = X.copy()
|
288 |
+
X_[:, target_feature] = val
|
289 |
+
mean_predictions.append(est.predict(X_).mean())
|
290 |
+
|
291 |
+
pdp = pdp[0] # (shape is (1, 2) so make it (2,))
|
292 |
+
|
293 |
+
# allow for greater margin for error with recursion method
|
294 |
+
rtol = 1e-1 if method == "recursion" else 1e-3
|
295 |
+
assert np.allclose(pdp, mean_predictions, rtol=rtol)
|
296 |
+
|
297 |
+
|
298 |
+
@pytest.mark.parametrize("seed", range(1))
|
299 |
+
def test_recursion_decision_tree_vs_forest_and_gbdt(seed):
|
300 |
+
# Make sure that the recursion method gives the same results on a
|
301 |
+
# DecisionTreeRegressor and a GradientBoostingRegressor or a
|
302 |
+
# RandomForestRegressor with 1 tree and equivalent parameters.
|
303 |
+
|
304 |
+
rng = np.random.RandomState(seed)
|
305 |
+
|
306 |
+
# Purely random dataset to avoid correlated features
|
307 |
+
n_samples = 1000
|
308 |
+
n_features = 5
|
309 |
+
X = rng.randn(n_samples, n_features)
|
310 |
+
y = rng.randn(n_samples) * 10
|
311 |
+
|
312 |
+
# The 'init' estimator for GBDT (here the average prediction) isn't taken
|
313 |
+
# into account with the recursion method, for technical reasons. We set
|
314 |
+
# the mean to 0 to that this 'bug' doesn't have any effect.
|
315 |
+
y = y - y.mean()
|
316 |
+
|
317 |
+
# set max_depth not too high to avoid splits with same gain but different
|
318 |
+
# features
|
319 |
+
max_depth = 5
|
320 |
+
|
321 |
+
tree_seed = 0
|
322 |
+
forest = RandomForestRegressor(
|
323 |
+
n_estimators=1,
|
324 |
+
max_features=None,
|
325 |
+
bootstrap=False,
|
326 |
+
max_depth=max_depth,
|
327 |
+
random_state=tree_seed,
|
328 |
+
)
|
329 |
+
# The forest will use ensemble.base._set_random_states to set the
|
330 |
+
# random_state of the tree sub-estimator. We simulate this here to have
|
331 |
+
# equivalent estimators.
|
332 |
+
equiv_random_state = check_random_state(tree_seed).randint(np.iinfo(np.int32).max)
|
333 |
+
gbdt = GradientBoostingRegressor(
|
334 |
+
n_estimators=1,
|
335 |
+
learning_rate=1,
|
336 |
+
criterion="squared_error",
|
337 |
+
max_depth=max_depth,
|
338 |
+
random_state=equiv_random_state,
|
339 |
+
)
|
340 |
+
tree = DecisionTreeRegressor(max_depth=max_depth, random_state=equiv_random_state)
|
341 |
+
|
342 |
+
forest.fit(X, y)
|
343 |
+
gbdt.fit(X, y)
|
344 |
+
tree.fit(X, y)
|
345 |
+
|
346 |
+
# sanity check: if the trees aren't the same, the PD values won't be equal
|
347 |
+
try:
|
348 |
+
assert_is_subtree(tree.tree_, gbdt[0, 0].tree_)
|
349 |
+
assert_is_subtree(tree.tree_, forest[0].tree_)
|
350 |
+
except AssertionError:
|
351 |
+
# For some reason the trees aren't exactly equal on 32bits, so the PDs
|
352 |
+
# cannot be equal either. See
|
353 |
+
# https://github.com/scikit-learn/scikit-learn/issues/8853
|
354 |
+
assert _IS_32BIT, "this should only fail on 32 bit platforms"
|
355 |
+
return
|
356 |
+
|
357 |
+
grid = rng.randn(50).reshape(-1, 1)
|
358 |
+
for f in range(n_features):
|
359 |
+
features = np.array([f], dtype=np.int32)
|
360 |
+
|
361 |
+
pdp_forest = _partial_dependence_recursion(forest, grid, features)
|
362 |
+
pdp_gbdt = _partial_dependence_recursion(gbdt, grid, features)
|
363 |
+
pdp_tree = _partial_dependence_recursion(tree, grid, features)
|
364 |
+
|
365 |
+
np.testing.assert_allclose(pdp_gbdt, pdp_tree)
|
366 |
+
np.testing.assert_allclose(pdp_forest, pdp_tree)
|
367 |
+
|
368 |
+
|
369 |
+
@pytest.mark.parametrize(
|
370 |
+
"est",
|
371 |
+
(
|
372 |
+
GradientBoostingClassifier(random_state=0),
|
373 |
+
HistGradientBoostingClassifier(random_state=0),
|
374 |
+
),
|
375 |
+
)
|
376 |
+
@pytest.mark.parametrize("target_feature", (0, 1, 2, 3, 4, 5))
|
377 |
+
def test_recursion_decision_function(est, target_feature):
|
378 |
+
# Make sure the recursion method (implicitly uses decision_function) has
|
379 |
+
# the same result as using brute method with
|
380 |
+
# response_method=decision_function
|
381 |
+
|
382 |
+
X, y = make_classification(n_classes=2, n_clusters_per_class=1, random_state=1)
|
383 |
+
assert np.mean(y) == 0.5 # make sure the init estimator predicts 0 anyway
|
384 |
+
|
385 |
+
est.fit(X, y)
|
386 |
+
|
387 |
+
preds_1 = partial_dependence(
|
388 |
+
est,
|
389 |
+
X,
|
390 |
+
[target_feature],
|
391 |
+
response_method="decision_function",
|
392 |
+
method="recursion",
|
393 |
+
kind="average",
|
394 |
+
)
|
395 |
+
preds_2 = partial_dependence(
|
396 |
+
est,
|
397 |
+
X,
|
398 |
+
[target_feature],
|
399 |
+
response_method="decision_function",
|
400 |
+
method="brute",
|
401 |
+
kind="average",
|
402 |
+
)
|
403 |
+
|
404 |
+
assert_allclose(preds_1["average"], preds_2["average"], atol=1e-7)
|
405 |
+
|
406 |
+
|
407 |
+
@pytest.mark.parametrize(
|
408 |
+
"est",
|
409 |
+
(
|
410 |
+
LinearRegression(),
|
411 |
+
GradientBoostingRegressor(random_state=0),
|
412 |
+
HistGradientBoostingRegressor(
|
413 |
+
random_state=0, min_samples_leaf=1, max_leaf_nodes=None, max_iter=1
|
414 |
+
),
|
415 |
+
DecisionTreeRegressor(random_state=0),
|
416 |
+
),
|
417 |
+
)
|
418 |
+
@pytest.mark.parametrize("power", (1, 2))
|
419 |
+
def test_partial_dependence_easy_target(est, power):
|
420 |
+
# If the target y only depends on one feature in an obvious way (linear or
|
421 |
+
# quadratic) then the partial dependence for that feature should reflect
|
422 |
+
# it.
|
423 |
+
# We here fit a linear regression_data model (with polynomial features if
|
424 |
+
# needed) and compute r_squared to check that the partial dependence
|
425 |
+
# correctly reflects the target.
|
426 |
+
|
427 |
+
rng = np.random.RandomState(0)
|
428 |
+
n_samples = 200
|
429 |
+
target_variable = 2
|
430 |
+
X = rng.normal(size=(n_samples, 5))
|
431 |
+
y = X[:, target_variable] ** power
|
432 |
+
|
433 |
+
est.fit(X, y)
|
434 |
+
|
435 |
+
pdp = partial_dependence(
|
436 |
+
est, features=[target_variable], X=X, grid_resolution=1000, kind="average"
|
437 |
+
)
|
438 |
+
|
439 |
+
new_X = pdp["grid_values"][0].reshape(-1, 1)
|
440 |
+
new_y = pdp["average"][0]
|
441 |
+
# add polynomial features if needed
|
442 |
+
new_X = PolynomialFeatures(degree=power).fit_transform(new_X)
|
443 |
+
|
444 |
+
lr = LinearRegression().fit(new_X, new_y)
|
445 |
+
r2 = r2_score(new_y, lr.predict(new_X))
|
446 |
+
|
447 |
+
assert r2 > 0.99
|
448 |
+
|
449 |
+
|
450 |
+
@pytest.mark.parametrize(
|
451 |
+
"Estimator",
|
452 |
+
(
|
453 |
+
sklearn.tree.DecisionTreeClassifier,
|
454 |
+
sklearn.tree.ExtraTreeClassifier,
|
455 |
+
sklearn.ensemble.ExtraTreesClassifier,
|
456 |
+
sklearn.neighbors.KNeighborsClassifier,
|
457 |
+
sklearn.neighbors.RadiusNeighborsClassifier,
|
458 |
+
sklearn.ensemble.RandomForestClassifier,
|
459 |
+
),
|
460 |
+
)
|
461 |
+
def test_multiclass_multioutput(Estimator):
|
462 |
+
# Make sure error is raised for multiclass-multioutput classifiers
|
463 |
+
|
464 |
+
# make multiclass-multioutput dataset
|
465 |
+
X, y = make_classification(n_classes=3, n_clusters_per_class=1, random_state=0)
|
466 |
+
y = np.array([y, y]).T
|
467 |
+
|
468 |
+
est = Estimator()
|
469 |
+
est.fit(X, y)
|
470 |
+
|
471 |
+
with pytest.raises(
|
472 |
+
ValueError, match="Multiclass-multioutput estimators are not supported"
|
473 |
+
):
|
474 |
+
partial_dependence(est, X, [0])
|
475 |
+
|
476 |
+
|
477 |
+
class NoPredictProbaNoDecisionFunction(ClassifierMixin, BaseEstimator):
|
478 |
+
def fit(self, X, y):
|
479 |
+
# simulate that we have some classes
|
480 |
+
self.classes_ = [0, 1]
|
481 |
+
return self
|
482 |
+
|
483 |
+
|
484 |
+
@pytest.mark.filterwarnings("ignore:A Bunch will be returned")
|
485 |
+
@pytest.mark.parametrize(
|
486 |
+
"estimator, params, err_msg",
|
487 |
+
[
|
488 |
+
(
|
489 |
+
KMeans(random_state=0, n_init="auto"),
|
490 |
+
{"features": [0]},
|
491 |
+
"'estimator' must be a fitted regressor or classifier",
|
492 |
+
),
|
493 |
+
(
|
494 |
+
LinearRegression(),
|
495 |
+
{"features": [0], "response_method": "predict_proba"},
|
496 |
+
"The response_method parameter is ignored for regressors",
|
497 |
+
),
|
498 |
+
(
|
499 |
+
GradientBoostingClassifier(random_state=0),
|
500 |
+
{
|
501 |
+
"features": [0],
|
502 |
+
"response_method": "predict_proba",
|
503 |
+
"method": "recursion",
|
504 |
+
},
|
505 |
+
"'recursion' method, the response_method must be 'decision_function'",
|
506 |
+
),
|
507 |
+
(
|
508 |
+
GradientBoostingClassifier(random_state=0),
|
509 |
+
{"features": [0], "response_method": "predict_proba", "method": "auto"},
|
510 |
+
"'recursion' method, the response_method must be 'decision_function'",
|
511 |
+
),
|
512 |
+
(
|
513 |
+
LinearRegression(),
|
514 |
+
{"features": [0], "method": "recursion", "kind": "individual"},
|
515 |
+
"The 'recursion' method only applies when 'kind' is set to 'average'",
|
516 |
+
),
|
517 |
+
(
|
518 |
+
LinearRegression(),
|
519 |
+
{"features": [0], "method": "recursion", "kind": "both"},
|
520 |
+
"The 'recursion' method only applies when 'kind' is set to 'average'",
|
521 |
+
),
|
522 |
+
(
|
523 |
+
LinearRegression(),
|
524 |
+
{"features": [0], "method": "recursion"},
|
525 |
+
"Only the following estimators support the 'recursion' method:",
|
526 |
+
),
|
527 |
+
],
|
528 |
+
)
|
529 |
+
def test_partial_dependence_error(estimator, params, err_msg):
|
530 |
+
X, y = make_classification(random_state=0)
|
531 |
+
estimator.fit(X, y)
|
532 |
+
|
533 |
+
with pytest.raises(ValueError, match=err_msg):
|
534 |
+
partial_dependence(estimator, X, **params)
|
535 |
+
|
536 |
+
|
537 |
+
@pytest.mark.parametrize(
|
538 |
+
"estimator", [LinearRegression(), GradientBoostingClassifier(random_state=0)]
|
539 |
+
)
|
540 |
+
@pytest.mark.parametrize("features", [-1, 10000])
|
541 |
+
def test_partial_dependence_unknown_feature_indices(estimator, features):
|
542 |
+
X, y = make_classification(random_state=0)
|
543 |
+
estimator.fit(X, y)
|
544 |
+
|
545 |
+
err_msg = "all features must be in"
|
546 |
+
with pytest.raises(ValueError, match=err_msg):
|
547 |
+
partial_dependence(estimator, X, [features])
|
548 |
+
|
549 |
+
|
550 |
+
@pytest.mark.parametrize(
|
551 |
+
"estimator", [LinearRegression(), GradientBoostingClassifier(random_state=0)]
|
552 |
+
)
|
553 |
+
def test_partial_dependence_unknown_feature_string(estimator):
|
554 |
+
pd = pytest.importorskip("pandas")
|
555 |
+
X, y = make_classification(random_state=0)
|
556 |
+
df = pd.DataFrame(X)
|
557 |
+
estimator.fit(df, y)
|
558 |
+
|
559 |
+
features = ["random"]
|
560 |
+
err_msg = "A given column is not a column of the dataframe"
|
561 |
+
with pytest.raises(ValueError, match=err_msg):
|
562 |
+
partial_dependence(estimator, df, features)
|
563 |
+
|
564 |
+
|
565 |
+
@pytest.mark.parametrize(
|
566 |
+
"estimator", [LinearRegression(), GradientBoostingClassifier(random_state=0)]
|
567 |
+
)
|
568 |
+
def test_partial_dependence_X_list(estimator):
|
569 |
+
# check that array-like objects are accepted
|
570 |
+
X, y = make_classification(random_state=0)
|
571 |
+
estimator.fit(X, y)
|
572 |
+
partial_dependence(estimator, list(X), [0], kind="average")
|
573 |
+
|
574 |
+
|
575 |
+
def test_warning_recursion_non_constant_init():
|
576 |
+
# make sure that passing a non-constant init parameter to a GBDT and using
|
577 |
+
# recursion method yields a warning.
|
578 |
+
|
579 |
+
gbc = GradientBoostingClassifier(init=DummyClassifier(), random_state=0)
|
580 |
+
gbc.fit(X, y)
|
581 |
+
|
582 |
+
with pytest.warns(
|
583 |
+
UserWarning, match="Using recursion method with a non-constant init predictor"
|
584 |
+
):
|
585 |
+
partial_dependence(gbc, X, [0], method="recursion", kind="average")
|
586 |
+
|
587 |
+
with pytest.warns(
|
588 |
+
UserWarning, match="Using recursion method with a non-constant init predictor"
|
589 |
+
):
|
590 |
+
partial_dependence(gbc, X, [0], method="recursion", kind="average")
|
591 |
+
|
592 |
+
|
593 |
+
def test_partial_dependence_sample_weight_of_fitted_estimator():
|
594 |
+
# Test near perfect correlation between partial dependence and diagonal
|
595 |
+
# when sample weights emphasize y = x predictions
|
596 |
+
# non-regression test for #13193
|
597 |
+
# TODO: extend to HistGradientBoosting once sample_weight is supported
|
598 |
+
N = 1000
|
599 |
+
rng = np.random.RandomState(123456)
|
600 |
+
mask = rng.randint(2, size=N, dtype=bool)
|
601 |
+
|
602 |
+
x = rng.rand(N)
|
603 |
+
# set y = x on mask and y = -x outside
|
604 |
+
y = x.copy()
|
605 |
+
y[~mask] = -y[~mask]
|
606 |
+
X = np.c_[mask, x]
|
607 |
+
# sample weights to emphasize data points where y = x
|
608 |
+
sample_weight = np.ones(N)
|
609 |
+
sample_weight[mask] = 1000.0
|
610 |
+
|
611 |
+
clf = GradientBoostingRegressor(n_estimators=10, random_state=1)
|
612 |
+
clf.fit(X, y, sample_weight=sample_weight)
|
613 |
+
|
614 |
+
pdp = partial_dependence(clf, X, features=[1], kind="average")
|
615 |
+
|
616 |
+
assert np.corrcoef(pdp["average"], pdp["grid_values"])[0, 1] > 0.99
|
617 |
+
|
618 |
+
|
619 |
+
def test_hist_gbdt_sw_not_supported():
|
620 |
+
# TODO: remove/fix when PDP supports HGBT with sample weights
|
621 |
+
clf = HistGradientBoostingRegressor(random_state=1)
|
622 |
+
clf.fit(X, y, sample_weight=np.ones(len(X)))
|
623 |
+
|
624 |
+
with pytest.raises(
|
625 |
+
NotImplementedError, match="does not support partial dependence"
|
626 |
+
):
|
627 |
+
partial_dependence(clf, X, features=[1])
|
628 |
+
|
629 |
+
|
630 |
+
def test_partial_dependence_pipeline():
|
631 |
+
# check that the partial dependence support pipeline
|
632 |
+
iris = load_iris()
|
633 |
+
|
634 |
+
scaler = StandardScaler()
|
635 |
+
clf = DummyClassifier(random_state=42)
|
636 |
+
pipe = make_pipeline(scaler, clf)
|
637 |
+
|
638 |
+
clf.fit(scaler.fit_transform(iris.data), iris.target)
|
639 |
+
pipe.fit(iris.data, iris.target)
|
640 |
+
|
641 |
+
features = 0
|
642 |
+
pdp_pipe = partial_dependence(
|
643 |
+
pipe, iris.data, features=[features], grid_resolution=10, kind="average"
|
644 |
+
)
|
645 |
+
pdp_clf = partial_dependence(
|
646 |
+
clf,
|
647 |
+
scaler.transform(iris.data),
|
648 |
+
features=[features],
|
649 |
+
grid_resolution=10,
|
650 |
+
kind="average",
|
651 |
+
)
|
652 |
+
assert_allclose(pdp_pipe["average"], pdp_clf["average"])
|
653 |
+
assert_allclose(
|
654 |
+
pdp_pipe["grid_values"][0],
|
655 |
+
pdp_clf["grid_values"][0] * scaler.scale_[features] + scaler.mean_[features],
|
656 |
+
)
|
657 |
+
|
658 |
+
|
659 |
+
@pytest.mark.parametrize(
|
660 |
+
"estimator",
|
661 |
+
[
|
662 |
+
LogisticRegression(max_iter=1000, random_state=0),
|
663 |
+
GradientBoostingClassifier(random_state=0, n_estimators=5),
|
664 |
+
],
|
665 |
+
ids=["estimator-brute", "estimator-recursion"],
|
666 |
+
)
|
667 |
+
@pytest.mark.parametrize(
|
668 |
+
"preprocessor",
|
669 |
+
[
|
670 |
+
None,
|
671 |
+
make_column_transformer(
|
672 |
+
(StandardScaler(), [iris.feature_names[i] for i in (0, 2)]),
|
673 |
+
(RobustScaler(), [iris.feature_names[i] for i in (1, 3)]),
|
674 |
+
),
|
675 |
+
make_column_transformer(
|
676 |
+
(StandardScaler(), [iris.feature_names[i] for i in (0, 2)]),
|
677 |
+
remainder="passthrough",
|
678 |
+
),
|
679 |
+
],
|
680 |
+
ids=["None", "column-transformer", "column-transformer-passthrough"],
|
681 |
+
)
|
682 |
+
@pytest.mark.parametrize(
|
683 |
+
"features",
|
684 |
+
[[0, 2], [iris.feature_names[i] for i in (0, 2)]],
|
685 |
+
ids=["features-integer", "features-string"],
|
686 |
+
)
|
687 |
+
def test_partial_dependence_dataframe(estimator, preprocessor, features):
|
688 |
+
# check that the partial dependence support dataframe and pipeline
|
689 |
+
# including a column transformer
|
690 |
+
pd = pytest.importorskip("pandas")
|
691 |
+
df = pd.DataFrame(scale(iris.data), columns=iris.feature_names)
|
692 |
+
|
693 |
+
pipe = make_pipeline(preprocessor, estimator)
|
694 |
+
pipe.fit(df, iris.target)
|
695 |
+
pdp_pipe = partial_dependence(
|
696 |
+
pipe, df, features=features, grid_resolution=10, kind="average"
|
697 |
+
)
|
698 |
+
|
699 |
+
# the column transformer will reorder the column when transforming
|
700 |
+
# we mixed the index to be sure that we are computing the partial
|
701 |
+
# dependence of the right columns
|
702 |
+
if preprocessor is not None:
|
703 |
+
X_proc = clone(preprocessor).fit_transform(df)
|
704 |
+
features_clf = [0, 1]
|
705 |
+
else:
|
706 |
+
X_proc = df
|
707 |
+
features_clf = [0, 2]
|
708 |
+
|
709 |
+
clf = clone(estimator).fit(X_proc, iris.target)
|
710 |
+
pdp_clf = partial_dependence(
|
711 |
+
clf,
|
712 |
+
X_proc,
|
713 |
+
features=features_clf,
|
714 |
+
method="brute",
|
715 |
+
grid_resolution=10,
|
716 |
+
kind="average",
|
717 |
+
)
|
718 |
+
|
719 |
+
assert_allclose(pdp_pipe["average"], pdp_clf["average"])
|
720 |
+
if preprocessor is not None:
|
721 |
+
scaler = preprocessor.named_transformers_["standardscaler"]
|
722 |
+
assert_allclose(
|
723 |
+
pdp_pipe["grid_values"][1],
|
724 |
+
pdp_clf["grid_values"][1] * scaler.scale_[1] + scaler.mean_[1],
|
725 |
+
)
|
726 |
+
else:
|
727 |
+
assert_allclose(pdp_pipe["grid_values"][1], pdp_clf["grid_values"][1])
|
728 |
+
|
729 |
+
|
730 |
+
@pytest.mark.parametrize(
|
731 |
+
"features, expected_pd_shape",
|
732 |
+
[
|
733 |
+
(0, (3, 10)),
|
734 |
+
(iris.feature_names[0], (3, 10)),
|
735 |
+
([0, 2], (3, 10, 10)),
|
736 |
+
([iris.feature_names[i] for i in (0, 2)], (3, 10, 10)),
|
737 |
+
([True, False, True, False], (3, 10, 10)),
|
738 |
+
],
|
739 |
+
ids=["scalar-int", "scalar-str", "list-int", "list-str", "mask"],
|
740 |
+
)
|
741 |
+
def test_partial_dependence_feature_type(features, expected_pd_shape):
|
742 |
+
# check all possible features type supported in PDP
|
743 |
+
pd = pytest.importorskip("pandas")
|
744 |
+
df = pd.DataFrame(iris.data, columns=iris.feature_names)
|
745 |
+
|
746 |
+
preprocessor = make_column_transformer(
|
747 |
+
(StandardScaler(), [iris.feature_names[i] for i in (0, 2)]),
|
748 |
+
(RobustScaler(), [iris.feature_names[i] for i in (1, 3)]),
|
749 |
+
)
|
750 |
+
pipe = make_pipeline(
|
751 |
+
preprocessor, LogisticRegression(max_iter=1000, random_state=0)
|
752 |
+
)
|
753 |
+
pipe.fit(df, iris.target)
|
754 |
+
pdp_pipe = partial_dependence(
|
755 |
+
pipe, df, features=features, grid_resolution=10, kind="average"
|
756 |
+
)
|
757 |
+
assert pdp_pipe["average"].shape == expected_pd_shape
|
758 |
+
assert len(pdp_pipe["grid_values"]) == len(pdp_pipe["average"].shape) - 1
|
759 |
+
|
760 |
+
|
761 |
+
@pytest.mark.parametrize(
|
762 |
+
"estimator",
|
763 |
+
[
|
764 |
+
LinearRegression(),
|
765 |
+
LogisticRegression(),
|
766 |
+
GradientBoostingRegressor(),
|
767 |
+
GradientBoostingClassifier(),
|
768 |
+
],
|
769 |
+
)
|
770 |
+
def test_partial_dependence_unfitted(estimator):
|
771 |
+
X = iris.data
|
772 |
+
preprocessor = make_column_transformer(
|
773 |
+
(StandardScaler(), [0, 2]), (RobustScaler(), [1, 3])
|
774 |
+
)
|
775 |
+
pipe = make_pipeline(preprocessor, estimator)
|
776 |
+
with pytest.raises(NotFittedError, match="is not fitted yet"):
|
777 |
+
partial_dependence(pipe, X, features=[0, 2], grid_resolution=10)
|
778 |
+
with pytest.raises(NotFittedError, match="is not fitted yet"):
|
779 |
+
partial_dependence(estimator, X, features=[0, 2], grid_resolution=10)
|
780 |
+
|
781 |
+
|
782 |
+
@pytest.mark.parametrize(
|
783 |
+
"Estimator, data",
|
784 |
+
[
|
785 |
+
(LinearRegression, multioutput_regression_data),
|
786 |
+
(LogisticRegression, binary_classification_data),
|
787 |
+
],
|
788 |
+
)
|
789 |
+
def test_kind_average_and_average_of_individual(Estimator, data):
|
790 |
+
est = Estimator()
|
791 |
+
(X, y), n_targets = data
|
792 |
+
est.fit(X, y)
|
793 |
+
|
794 |
+
pdp_avg = partial_dependence(est, X=X, features=[1, 2], kind="average")
|
795 |
+
pdp_ind = partial_dependence(est, X=X, features=[1, 2], kind="individual")
|
796 |
+
avg_ind = np.mean(pdp_ind["individual"], axis=1)
|
797 |
+
assert_allclose(avg_ind, pdp_avg["average"])
|
798 |
+
|
799 |
+
|
800 |
+
@pytest.mark.parametrize(
|
801 |
+
"Estimator, data",
|
802 |
+
[
|
803 |
+
(LinearRegression, multioutput_regression_data),
|
804 |
+
(LogisticRegression, binary_classification_data),
|
805 |
+
],
|
806 |
+
)
|
807 |
+
def test_partial_dependence_kind_individual_ignores_sample_weight(Estimator, data):
|
808 |
+
"""Check that `sample_weight` does not have any effect on reported ICE."""
|
809 |
+
est = Estimator()
|
810 |
+
(X, y), n_targets = data
|
811 |
+
sample_weight = np.arange(X.shape[0])
|
812 |
+
est.fit(X, y)
|
813 |
+
|
814 |
+
pdp_nsw = partial_dependence(est, X=X, features=[1, 2], kind="individual")
|
815 |
+
pdp_sw = partial_dependence(
|
816 |
+
est, X=X, features=[1, 2], kind="individual", sample_weight=sample_weight
|
817 |
+
)
|
818 |
+
assert_allclose(pdp_nsw["individual"], pdp_sw["individual"])
|
819 |
+
assert_allclose(pdp_nsw["grid_values"], pdp_sw["grid_values"])
|
820 |
+
|
821 |
+
|
822 |
+
@pytest.mark.parametrize(
|
823 |
+
"estimator",
|
824 |
+
[
|
825 |
+
LinearRegression(),
|
826 |
+
LogisticRegression(),
|
827 |
+
RandomForestRegressor(),
|
828 |
+
GradientBoostingClassifier(),
|
829 |
+
],
|
830 |
+
)
|
831 |
+
@pytest.mark.parametrize("non_null_weight_idx", [0, 1, -1])
|
832 |
+
def test_partial_dependence_non_null_weight_idx(estimator, non_null_weight_idx):
|
833 |
+
"""Check that if we pass a `sample_weight` of zeros with only one index with
|
834 |
+
sample weight equals one, then the average `partial_dependence` with this
|
835 |
+
`sample_weight` is equal to the individual `partial_dependence` of the
|
836 |
+
corresponding index.
|
837 |
+
"""
|
838 |
+
X, y = iris.data, iris.target
|
839 |
+
preprocessor = make_column_transformer(
|
840 |
+
(StandardScaler(), [0, 2]), (RobustScaler(), [1, 3])
|
841 |
+
)
|
842 |
+
pipe = make_pipeline(preprocessor, estimator).fit(X, y)
|
843 |
+
|
844 |
+
sample_weight = np.zeros_like(y)
|
845 |
+
sample_weight[non_null_weight_idx] = 1
|
846 |
+
pdp_sw = partial_dependence(
|
847 |
+
pipe,
|
848 |
+
X,
|
849 |
+
[2, 3],
|
850 |
+
kind="average",
|
851 |
+
sample_weight=sample_weight,
|
852 |
+
grid_resolution=10,
|
853 |
+
)
|
854 |
+
pdp_ind = partial_dependence(pipe, X, [2, 3], kind="individual", grid_resolution=10)
|
855 |
+
output_dim = 1 if is_regressor(pipe) else len(np.unique(y))
|
856 |
+
for i in range(output_dim):
|
857 |
+
assert_allclose(
|
858 |
+
pdp_ind["individual"][i][non_null_weight_idx],
|
859 |
+
pdp_sw["average"][i],
|
860 |
+
)
|
861 |
+
|
862 |
+
|
863 |
+
@pytest.mark.parametrize(
|
864 |
+
"Estimator, data",
|
865 |
+
[
|
866 |
+
(LinearRegression, multioutput_regression_data),
|
867 |
+
(LogisticRegression, binary_classification_data),
|
868 |
+
],
|
869 |
+
)
|
870 |
+
def test_partial_dependence_equivalence_equal_sample_weight(Estimator, data):
|
871 |
+
"""Check that `sample_weight=None` is equivalent to having equal weights."""
|
872 |
+
|
873 |
+
est = Estimator()
|
874 |
+
(X, y), n_targets = data
|
875 |
+
est.fit(X, y)
|
876 |
+
|
877 |
+
sample_weight, params = None, {"X": X, "features": [1, 2], "kind": "average"}
|
878 |
+
pdp_sw_none = partial_dependence(est, **params, sample_weight=sample_weight)
|
879 |
+
sample_weight = np.ones(len(y))
|
880 |
+
pdp_sw_unit = partial_dependence(est, **params, sample_weight=sample_weight)
|
881 |
+
assert_allclose(pdp_sw_none["average"], pdp_sw_unit["average"])
|
882 |
+
sample_weight = 2 * np.ones(len(y))
|
883 |
+
pdp_sw_doubling = partial_dependence(est, **params, sample_weight=sample_weight)
|
884 |
+
assert_allclose(pdp_sw_none["average"], pdp_sw_doubling["average"])
|
885 |
+
|
886 |
+
|
887 |
+
def test_partial_dependence_sample_weight_size_error():
|
888 |
+
"""Check that we raise an error when the size of `sample_weight` is not
|
889 |
+
consistent with `X` and `y`.
|
890 |
+
"""
|
891 |
+
est = LogisticRegression()
|
892 |
+
(X, y), n_targets = binary_classification_data
|
893 |
+
sample_weight = np.ones_like(y)
|
894 |
+
est.fit(X, y)
|
895 |
+
|
896 |
+
with pytest.raises(ValueError, match="sample_weight.shape =="):
|
897 |
+
partial_dependence(
|
898 |
+
est, X, features=[0], sample_weight=sample_weight[1:], grid_resolution=10
|
899 |
+
)
|
900 |
+
|
901 |
+
|
902 |
+
def test_partial_dependence_sample_weight_with_recursion():
|
903 |
+
"""Check that we raise an error when `sample_weight` is provided with
|
904 |
+
`"recursion"` method.
|
905 |
+
"""
|
906 |
+
est = RandomForestRegressor()
|
907 |
+
(X, y), n_targets = regression_data
|
908 |
+
sample_weight = np.ones_like(y)
|
909 |
+
est.fit(X, y, sample_weight=sample_weight)
|
910 |
+
|
911 |
+
with pytest.raises(ValueError, match="'recursion' method can only be applied when"):
|
912 |
+
partial_dependence(
|
913 |
+
est, X, features=[0], method="recursion", sample_weight=sample_weight
|
914 |
+
)
|
915 |
+
|
916 |
+
|
917 |
+
# TODO(1.5): Remove when bunch values is deprecated in 1.5
|
918 |
+
def test_partial_dependence_bunch_values_deprecated():
|
919 |
+
"""Test that deprecation warning is raised when values is accessed."""
|
920 |
+
|
921 |
+
est = LogisticRegression()
|
922 |
+
(X, y), _ = binary_classification_data
|
923 |
+
est.fit(X, y)
|
924 |
+
|
925 |
+
pdp_avg = partial_dependence(est, X=X, features=[1, 2], kind="average")
|
926 |
+
|
927 |
+
msg = (
|
928 |
+
"Key: 'values', is deprecated in 1.3 and will be "
|
929 |
+
"removed in 1.5. Please use 'grid_values' instead"
|
930 |
+
)
|
931 |
+
|
932 |
+
with warnings.catch_warnings():
|
933 |
+
# Does not raise warnings with "grid_values"
|
934 |
+
warnings.simplefilter("error", FutureWarning)
|
935 |
+
grid_values = pdp_avg["grid_values"]
|
936 |
+
|
937 |
+
with pytest.warns(FutureWarning, match=msg):
|
938 |
+
# Warns for "values"
|
939 |
+
values = pdp_avg["values"]
|
940 |
+
|
941 |
+
# "values" and "grid_values" are the same object
|
942 |
+
assert values is grid_values
|
943 |
+
|
944 |
+
|
945 |
+
def test_mixed_type_categorical():
|
946 |
+
"""Check that we raise a proper error when a column has mixed types and
|
947 |
+
the sorting of `np.unique` will fail."""
|
948 |
+
X = np.array(["A", "B", "C", np.nan], dtype=object).reshape(-1, 1)
|
949 |
+
y = np.array([0, 1, 0, 1])
|
950 |
+
|
951 |
+
from sklearn.preprocessing import OrdinalEncoder
|
952 |
+
|
953 |
+
clf = make_pipeline(
|
954 |
+
OrdinalEncoder(encoded_missing_value=-1),
|
955 |
+
LogisticRegression(),
|
956 |
+
).fit(X, y)
|
957 |
+
with pytest.raises(ValueError, match="The column #0 contains mixed data types"):
|
958 |
+
partial_dependence(clf, X, features=[0])
|
venv/lib/python3.10/site-packages/sklearn/inspection/tests/test_pd_utils.py
ADDED
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import pytest
|
3 |
+
|
4 |
+
from sklearn.inspection._pd_utils import _check_feature_names, _get_feature_index
|
5 |
+
from sklearn.utils._testing import _convert_container
|
6 |
+
|
7 |
+
|
8 |
+
@pytest.mark.parametrize(
|
9 |
+
"feature_names, array_type, expected_feature_names",
|
10 |
+
[
|
11 |
+
(None, "array", ["x0", "x1", "x2"]),
|
12 |
+
(None, "dataframe", ["a", "b", "c"]),
|
13 |
+
(np.array(["a", "b", "c"]), "array", ["a", "b", "c"]),
|
14 |
+
],
|
15 |
+
)
|
16 |
+
def test_check_feature_names(feature_names, array_type, expected_feature_names):
|
17 |
+
X = np.random.randn(10, 3)
|
18 |
+
column_names = ["a", "b", "c"]
|
19 |
+
X = _convert_container(X, constructor_name=array_type, columns_name=column_names)
|
20 |
+
feature_names_validated = _check_feature_names(X, feature_names)
|
21 |
+
assert feature_names_validated == expected_feature_names
|
22 |
+
|
23 |
+
|
24 |
+
def test_check_feature_names_error():
|
25 |
+
X = np.random.randn(10, 3)
|
26 |
+
feature_names = ["a", "b", "c", "a"]
|
27 |
+
msg = "feature_names should not contain duplicates."
|
28 |
+
with pytest.raises(ValueError, match=msg):
|
29 |
+
_check_feature_names(X, feature_names)
|
30 |
+
|
31 |
+
|
32 |
+
@pytest.mark.parametrize("fx, idx", [(0, 0), (1, 1), ("a", 0), ("b", 1), ("c", 2)])
|
33 |
+
def test_get_feature_index(fx, idx):
|
34 |
+
feature_names = ["a", "b", "c"]
|
35 |
+
assert _get_feature_index(fx, feature_names) == idx
|
36 |
+
|
37 |
+
|
38 |
+
@pytest.mark.parametrize(
|
39 |
+
"fx, feature_names, err_msg",
|
40 |
+
[
|
41 |
+
("a", None, "Cannot plot partial dependence for feature 'a'"),
|
42 |
+
("d", ["a", "b", "c"], "Feature 'd' not in feature_names"),
|
43 |
+
],
|
44 |
+
)
|
45 |
+
def test_get_feature_names_error(fx, feature_names, err_msg):
|
46 |
+
with pytest.raises(ValueError, match=err_msg):
|
47 |
+
_get_feature_index(fx, feature_names)
|
venv/lib/python3.10/site-packages/sklearn/inspection/tests/test_permutation_importance.py
ADDED
@@ -0,0 +1,542 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import pytest
|
3 |
+
from numpy.testing import assert_allclose
|
4 |
+
|
5 |
+
from sklearn.compose import ColumnTransformer
|
6 |
+
from sklearn.datasets import (
|
7 |
+
load_diabetes,
|
8 |
+
load_iris,
|
9 |
+
make_classification,
|
10 |
+
make_regression,
|
11 |
+
)
|
12 |
+
from sklearn.dummy import DummyClassifier
|
13 |
+
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
|
14 |
+
from sklearn.impute import SimpleImputer
|
15 |
+
from sklearn.inspection import permutation_importance
|
16 |
+
from sklearn.linear_model import LinearRegression, LogisticRegression
|
17 |
+
from sklearn.metrics import (
|
18 |
+
get_scorer,
|
19 |
+
mean_squared_error,
|
20 |
+
r2_score,
|
21 |
+
)
|
22 |
+
from sklearn.model_selection import train_test_split
|
23 |
+
from sklearn.pipeline import make_pipeline
|
24 |
+
from sklearn.preprocessing import KBinsDiscretizer, OneHotEncoder, StandardScaler, scale
|
25 |
+
from sklearn.utils import parallel_backend
|
26 |
+
from sklearn.utils._testing import _convert_container
|
27 |
+
|
28 |
+
|
29 |
+
@pytest.mark.parametrize("n_jobs", [1, 2])
|
30 |
+
@pytest.mark.parametrize("max_samples", [0.5, 1.0])
|
31 |
+
@pytest.mark.parametrize("sample_weight", [None, "ones"])
|
32 |
+
def test_permutation_importance_correlated_feature_regression(
|
33 |
+
n_jobs, max_samples, sample_weight
|
34 |
+
):
|
35 |
+
# Make sure that feature highly correlated to the target have a higher
|
36 |
+
# importance
|
37 |
+
rng = np.random.RandomState(42)
|
38 |
+
n_repeats = 5
|
39 |
+
|
40 |
+
X, y = load_diabetes(return_X_y=True)
|
41 |
+
y_with_little_noise = (y + rng.normal(scale=0.001, size=y.shape[0])).reshape(-1, 1)
|
42 |
+
|
43 |
+
X = np.hstack([X, y_with_little_noise])
|
44 |
+
|
45 |
+
weights = np.ones_like(y) if sample_weight == "ones" else sample_weight
|
46 |
+
clf = RandomForestRegressor(n_estimators=10, random_state=42)
|
47 |
+
clf.fit(X, y)
|
48 |
+
|
49 |
+
result = permutation_importance(
|
50 |
+
clf,
|
51 |
+
X,
|
52 |
+
y,
|
53 |
+
sample_weight=weights,
|
54 |
+
n_repeats=n_repeats,
|
55 |
+
random_state=rng,
|
56 |
+
n_jobs=n_jobs,
|
57 |
+
max_samples=max_samples,
|
58 |
+
)
|
59 |
+
|
60 |
+
assert result.importances.shape == (X.shape[1], n_repeats)
|
61 |
+
|
62 |
+
# the correlated feature with y was added as the last column and should
|
63 |
+
# have the highest importance
|
64 |
+
assert np.all(result.importances_mean[-1] > result.importances_mean[:-1])
|
65 |
+
|
66 |
+
|
67 |
+
@pytest.mark.parametrize("n_jobs", [1, 2])
|
68 |
+
@pytest.mark.parametrize("max_samples", [0.5, 1.0])
|
69 |
+
def test_permutation_importance_correlated_feature_regression_pandas(
|
70 |
+
n_jobs, max_samples
|
71 |
+
):
|
72 |
+
pd = pytest.importorskip("pandas")
|
73 |
+
|
74 |
+
# Make sure that feature highly correlated to the target have a higher
|
75 |
+
# importance
|
76 |
+
rng = np.random.RandomState(42)
|
77 |
+
n_repeats = 5
|
78 |
+
|
79 |
+
dataset = load_iris()
|
80 |
+
X, y = dataset.data, dataset.target
|
81 |
+
y_with_little_noise = (y + rng.normal(scale=0.001, size=y.shape[0])).reshape(-1, 1)
|
82 |
+
|
83 |
+
# Adds feature correlated with y as the last column
|
84 |
+
X = pd.DataFrame(X, columns=dataset.feature_names)
|
85 |
+
X["correlated_feature"] = y_with_little_noise
|
86 |
+
|
87 |
+
clf = RandomForestClassifier(n_estimators=10, random_state=42)
|
88 |
+
clf.fit(X, y)
|
89 |
+
|
90 |
+
result = permutation_importance(
|
91 |
+
clf,
|
92 |
+
X,
|
93 |
+
y,
|
94 |
+
n_repeats=n_repeats,
|
95 |
+
random_state=rng,
|
96 |
+
n_jobs=n_jobs,
|
97 |
+
max_samples=max_samples,
|
98 |
+
)
|
99 |
+
|
100 |
+
assert result.importances.shape == (X.shape[1], n_repeats)
|
101 |
+
|
102 |
+
# the correlated feature with y was added as the last column and should
|
103 |
+
# have the highest importance
|
104 |
+
assert np.all(result.importances_mean[-1] > result.importances_mean[:-1])
|
105 |
+
|
106 |
+
|
107 |
+
@pytest.mark.parametrize("n_jobs", [1, 2])
|
108 |
+
@pytest.mark.parametrize("max_samples", [0.5, 1.0])
|
109 |
+
def test_robustness_to_high_cardinality_noisy_feature(n_jobs, max_samples, seed=42):
|
110 |
+
# Permutation variable importance should not be affected by the high
|
111 |
+
# cardinality bias of traditional feature importances, especially when
|
112 |
+
# computed on a held-out test set:
|
113 |
+
rng = np.random.RandomState(seed)
|
114 |
+
n_repeats = 5
|
115 |
+
n_samples = 1000
|
116 |
+
n_classes = 5
|
117 |
+
n_informative_features = 2
|
118 |
+
n_noise_features = 1
|
119 |
+
n_features = n_informative_features + n_noise_features
|
120 |
+
|
121 |
+
# Generate a multiclass classification dataset and a set of informative
|
122 |
+
# binary features that can be used to predict some classes of y exactly
|
123 |
+
# while leaving some classes unexplained to make the problem harder.
|
124 |
+
classes = np.arange(n_classes)
|
125 |
+
y = rng.choice(classes, size=n_samples)
|
126 |
+
X = np.hstack([(y == c).reshape(-1, 1) for c in classes[:n_informative_features]])
|
127 |
+
X = X.astype(np.float32)
|
128 |
+
|
129 |
+
# Not all target classes are explained by the binary class indicator
|
130 |
+
# features:
|
131 |
+
assert n_informative_features < n_classes
|
132 |
+
|
133 |
+
# Add 10 other noisy features with high cardinality (numerical) values
|
134 |
+
# that can be used to overfit the training data.
|
135 |
+
X = np.concatenate([X, rng.randn(n_samples, n_noise_features)], axis=1)
|
136 |
+
assert X.shape == (n_samples, n_features)
|
137 |
+
|
138 |
+
# Split the dataset to be able to evaluate on a held-out test set. The
|
139 |
+
# Test size should be large enough for importance measurements to be
|
140 |
+
# stable:
|
141 |
+
X_train, X_test, y_train, y_test = train_test_split(
|
142 |
+
X, y, test_size=0.5, random_state=rng
|
143 |
+
)
|
144 |
+
clf = RandomForestClassifier(n_estimators=5, random_state=rng)
|
145 |
+
clf.fit(X_train, y_train)
|
146 |
+
|
147 |
+
# Variable importances computed by impurity decrease on the tree node
|
148 |
+
# splits often use the noisy features in splits. This can give misleading
|
149 |
+
# impression that high cardinality noisy variables are the most important:
|
150 |
+
tree_importances = clf.feature_importances_
|
151 |
+
informative_tree_importances = tree_importances[:n_informative_features]
|
152 |
+
noisy_tree_importances = tree_importances[n_informative_features:]
|
153 |
+
assert informative_tree_importances.max() < noisy_tree_importances.min()
|
154 |
+
|
155 |
+
# Let's check that permutation-based feature importances do not have this
|
156 |
+
# problem.
|
157 |
+
r = permutation_importance(
|
158 |
+
clf,
|
159 |
+
X_test,
|
160 |
+
y_test,
|
161 |
+
n_repeats=n_repeats,
|
162 |
+
random_state=rng,
|
163 |
+
n_jobs=n_jobs,
|
164 |
+
max_samples=max_samples,
|
165 |
+
)
|
166 |
+
|
167 |
+
assert r.importances.shape == (X.shape[1], n_repeats)
|
168 |
+
|
169 |
+
# Split the importances between informative and noisy features
|
170 |
+
informative_importances = r.importances_mean[:n_informative_features]
|
171 |
+
noisy_importances = r.importances_mean[n_informative_features:]
|
172 |
+
|
173 |
+
# Because we do not have a binary variable explaining each target classes,
|
174 |
+
# the RF model will have to use the random variable to make some
|
175 |
+
# (overfitting) splits (as max_depth is not set). Therefore the noisy
|
176 |
+
# variables will be non-zero but with small values oscillating around
|
177 |
+
# zero:
|
178 |
+
assert max(np.abs(noisy_importances)) > 1e-7
|
179 |
+
assert noisy_importances.max() < 0.05
|
180 |
+
|
181 |
+
# The binary features correlated with y should have a higher importance
|
182 |
+
# than the high cardinality noisy features.
|
183 |
+
# The maximum test accuracy is 2 / 5 == 0.4, each informative feature
|
184 |
+
# contributing approximately a bit more than 0.2 of accuracy.
|
185 |
+
assert informative_importances.min() > 0.15
|
186 |
+
|
187 |
+
|
188 |
+
def test_permutation_importance_mixed_types():
|
189 |
+
rng = np.random.RandomState(42)
|
190 |
+
n_repeats = 4
|
191 |
+
|
192 |
+
# Last column is correlated with y
|
193 |
+
X = np.array([[1.0, 2.0, 3.0, np.nan], [2, 1, 2, 1]]).T
|
194 |
+
y = np.array([0, 1, 0, 1])
|
195 |
+
|
196 |
+
clf = make_pipeline(SimpleImputer(), LogisticRegression(solver="lbfgs"))
|
197 |
+
clf.fit(X, y)
|
198 |
+
result = permutation_importance(clf, X, y, n_repeats=n_repeats, random_state=rng)
|
199 |
+
|
200 |
+
assert result.importances.shape == (X.shape[1], n_repeats)
|
201 |
+
|
202 |
+
# the correlated feature with y is the last column and should
|
203 |
+
# have the highest importance
|
204 |
+
assert np.all(result.importances_mean[-1] > result.importances_mean[:-1])
|
205 |
+
|
206 |
+
# use another random state
|
207 |
+
rng = np.random.RandomState(0)
|
208 |
+
result2 = permutation_importance(clf, X, y, n_repeats=n_repeats, random_state=rng)
|
209 |
+
assert result2.importances.shape == (X.shape[1], n_repeats)
|
210 |
+
|
211 |
+
assert not np.allclose(result.importances, result2.importances)
|
212 |
+
|
213 |
+
# the correlated feature with y is the last column and should
|
214 |
+
# have the highest importance
|
215 |
+
assert np.all(result2.importances_mean[-1] > result2.importances_mean[:-1])
|
216 |
+
|
217 |
+
|
218 |
+
def test_permutation_importance_mixed_types_pandas():
|
219 |
+
pd = pytest.importorskip("pandas")
|
220 |
+
rng = np.random.RandomState(42)
|
221 |
+
n_repeats = 5
|
222 |
+
|
223 |
+
# Last column is correlated with y
|
224 |
+
X = pd.DataFrame({"col1": [1.0, 2.0, 3.0, np.nan], "col2": ["a", "b", "a", "b"]})
|
225 |
+
y = np.array([0, 1, 0, 1])
|
226 |
+
|
227 |
+
num_preprocess = make_pipeline(SimpleImputer(), StandardScaler())
|
228 |
+
preprocess = ColumnTransformer(
|
229 |
+
[("num", num_preprocess, ["col1"]), ("cat", OneHotEncoder(), ["col2"])]
|
230 |
+
)
|
231 |
+
clf = make_pipeline(preprocess, LogisticRegression(solver="lbfgs"))
|
232 |
+
clf.fit(X, y)
|
233 |
+
|
234 |
+
result = permutation_importance(clf, X, y, n_repeats=n_repeats, random_state=rng)
|
235 |
+
|
236 |
+
assert result.importances.shape == (X.shape[1], n_repeats)
|
237 |
+
# the correlated feature with y is the last column and should
|
238 |
+
# have the highest importance
|
239 |
+
assert np.all(result.importances_mean[-1] > result.importances_mean[:-1])
|
240 |
+
|
241 |
+
|
242 |
+
def test_permutation_importance_linear_regresssion():
|
243 |
+
X, y = make_regression(n_samples=500, n_features=10, random_state=0)
|
244 |
+
|
245 |
+
X = scale(X)
|
246 |
+
y = scale(y)
|
247 |
+
|
248 |
+
lr = LinearRegression().fit(X, y)
|
249 |
+
|
250 |
+
# this relationship can be computed in closed form
|
251 |
+
expected_importances = 2 * lr.coef_**2
|
252 |
+
results = permutation_importance(
|
253 |
+
lr, X, y, n_repeats=50, scoring="neg_mean_squared_error"
|
254 |
+
)
|
255 |
+
assert_allclose(
|
256 |
+
expected_importances, results.importances_mean, rtol=1e-1, atol=1e-6
|
257 |
+
)
|
258 |
+
|
259 |
+
|
260 |
+
@pytest.mark.parametrize("max_samples", [500, 1.0])
|
261 |
+
def test_permutation_importance_equivalence_sequential_parallel(max_samples):
|
262 |
+
# regression test to make sure that sequential and parallel calls will
|
263 |
+
# output the same results.
|
264 |
+
# Also tests that max_samples equal to number of samples is equivalent to 1.0
|
265 |
+
X, y = make_regression(n_samples=500, n_features=10, random_state=0)
|
266 |
+
lr = LinearRegression().fit(X, y)
|
267 |
+
|
268 |
+
importance_sequential = permutation_importance(
|
269 |
+
lr, X, y, n_repeats=5, random_state=0, n_jobs=1, max_samples=max_samples
|
270 |
+
)
|
271 |
+
|
272 |
+
# First check that the problem is structured enough and that the model is
|
273 |
+
# complex enough to not yield trivial, constant importances:
|
274 |
+
imp_min = importance_sequential["importances"].min()
|
275 |
+
imp_max = importance_sequential["importances"].max()
|
276 |
+
assert imp_max - imp_min > 0.3
|
277 |
+
|
278 |
+
# The actually check that parallelism does not impact the results
|
279 |
+
# either with shared memory (threading) or without isolated memory
|
280 |
+
# via process-based parallelism using the default backend
|
281 |
+
# ('loky' or 'multiprocessing') depending on the joblib version:
|
282 |
+
|
283 |
+
# process-based parallelism (by default):
|
284 |
+
importance_processes = permutation_importance(
|
285 |
+
lr, X, y, n_repeats=5, random_state=0, n_jobs=2
|
286 |
+
)
|
287 |
+
assert_allclose(
|
288 |
+
importance_processes["importances"], importance_sequential["importances"]
|
289 |
+
)
|
290 |
+
|
291 |
+
# thread-based parallelism:
|
292 |
+
with parallel_backend("threading"):
|
293 |
+
importance_threading = permutation_importance(
|
294 |
+
lr, X, y, n_repeats=5, random_state=0, n_jobs=2
|
295 |
+
)
|
296 |
+
assert_allclose(
|
297 |
+
importance_threading["importances"], importance_sequential["importances"]
|
298 |
+
)
|
299 |
+
|
300 |
+
|
301 |
+
@pytest.mark.parametrize("n_jobs", [None, 1, 2])
|
302 |
+
@pytest.mark.parametrize("max_samples", [0.5, 1.0])
|
303 |
+
def test_permutation_importance_equivalence_array_dataframe(n_jobs, max_samples):
|
304 |
+
# This test checks that the column shuffling logic has the same behavior
|
305 |
+
# both a dataframe and a simple numpy array.
|
306 |
+
pd = pytest.importorskip("pandas")
|
307 |
+
|
308 |
+
# regression test to make sure that sequential and parallel calls will
|
309 |
+
# output the same results.
|
310 |
+
X, y = make_regression(n_samples=100, n_features=5, random_state=0)
|
311 |
+
X_df = pd.DataFrame(X)
|
312 |
+
|
313 |
+
# Add a categorical feature that is statistically linked to y:
|
314 |
+
binner = KBinsDiscretizer(n_bins=3, encode="ordinal")
|
315 |
+
cat_column = binner.fit_transform(y.reshape(-1, 1))
|
316 |
+
|
317 |
+
# Concatenate the extra column to the numpy array: integers will be
|
318 |
+
# cast to float values
|
319 |
+
X = np.hstack([X, cat_column])
|
320 |
+
assert X.dtype.kind == "f"
|
321 |
+
|
322 |
+
# Insert extra column as a non-numpy-native dtype (while keeping backward
|
323 |
+
# compat for old pandas versions):
|
324 |
+
if hasattr(pd, "Categorical"):
|
325 |
+
cat_column = pd.Categorical(cat_column.ravel())
|
326 |
+
else:
|
327 |
+
cat_column = cat_column.ravel()
|
328 |
+
new_col_idx = len(X_df.columns)
|
329 |
+
X_df[new_col_idx] = cat_column
|
330 |
+
assert X_df[new_col_idx].dtype == cat_column.dtype
|
331 |
+
|
332 |
+
# Stich an arbitrary index to the dataframe:
|
333 |
+
X_df.index = np.arange(len(X_df)).astype(str)
|
334 |
+
|
335 |
+
rf = RandomForestRegressor(n_estimators=5, max_depth=3, random_state=0)
|
336 |
+
rf.fit(X, y)
|
337 |
+
|
338 |
+
n_repeats = 3
|
339 |
+
importance_array = permutation_importance(
|
340 |
+
rf,
|
341 |
+
X,
|
342 |
+
y,
|
343 |
+
n_repeats=n_repeats,
|
344 |
+
random_state=0,
|
345 |
+
n_jobs=n_jobs,
|
346 |
+
max_samples=max_samples,
|
347 |
+
)
|
348 |
+
|
349 |
+
# First check that the problem is structured enough and that the model is
|
350 |
+
# complex enough to not yield trivial, constant importances:
|
351 |
+
imp_min = importance_array["importances"].min()
|
352 |
+
imp_max = importance_array["importances"].max()
|
353 |
+
assert imp_max - imp_min > 0.3
|
354 |
+
|
355 |
+
# Now check that importances computed on dataframe matche the values
|
356 |
+
# of those computed on the array with the same data.
|
357 |
+
importance_dataframe = permutation_importance(
|
358 |
+
rf,
|
359 |
+
X_df,
|
360 |
+
y,
|
361 |
+
n_repeats=n_repeats,
|
362 |
+
random_state=0,
|
363 |
+
n_jobs=n_jobs,
|
364 |
+
max_samples=max_samples,
|
365 |
+
)
|
366 |
+
assert_allclose(
|
367 |
+
importance_array["importances"], importance_dataframe["importances"]
|
368 |
+
)
|
369 |
+
|
370 |
+
|
371 |
+
@pytest.mark.parametrize("input_type", ["array", "dataframe"])
|
372 |
+
def test_permutation_importance_large_memmaped_data(input_type):
|
373 |
+
# Smoke, non-regression test for:
|
374 |
+
# https://github.com/scikit-learn/scikit-learn/issues/15810
|
375 |
+
n_samples, n_features = int(5e4), 4
|
376 |
+
X, y = make_classification(
|
377 |
+
n_samples=n_samples, n_features=n_features, random_state=0
|
378 |
+
)
|
379 |
+
assert X.nbytes > 1e6 # trigger joblib memmaping
|
380 |
+
|
381 |
+
X = _convert_container(X, input_type)
|
382 |
+
clf = DummyClassifier(strategy="prior").fit(X, y)
|
383 |
+
|
384 |
+
# Actual smoke test: should not raise any error:
|
385 |
+
n_repeats = 5
|
386 |
+
r = permutation_importance(clf, X, y, n_repeats=n_repeats, n_jobs=2)
|
387 |
+
|
388 |
+
# Auxiliary check: DummyClassifier is feature independent:
|
389 |
+
# permutating feature should not change the predictions
|
390 |
+
expected_importances = np.zeros((n_features, n_repeats))
|
391 |
+
assert_allclose(expected_importances, r.importances)
|
392 |
+
|
393 |
+
|
394 |
+
def test_permutation_importance_sample_weight():
|
395 |
+
# Creating data with 2 features and 1000 samples, where the target
|
396 |
+
# variable is a linear combination of the two features, such that
|
397 |
+
# in half of the samples the impact of feature 1 is twice the impact of
|
398 |
+
# feature 2, and vice versa on the other half of the samples.
|
399 |
+
rng = np.random.RandomState(1)
|
400 |
+
n_samples = 1000
|
401 |
+
n_features = 2
|
402 |
+
n_half_samples = n_samples // 2
|
403 |
+
x = rng.normal(0.0, 0.001, (n_samples, n_features))
|
404 |
+
y = np.zeros(n_samples)
|
405 |
+
y[:n_half_samples] = 2 * x[:n_half_samples, 0] + x[:n_half_samples, 1]
|
406 |
+
y[n_half_samples:] = x[n_half_samples:, 0] + 2 * x[n_half_samples:, 1]
|
407 |
+
|
408 |
+
# Fitting linear regression with perfect prediction
|
409 |
+
lr = LinearRegression(fit_intercept=False)
|
410 |
+
lr.fit(x, y)
|
411 |
+
|
412 |
+
# When all samples are weighted with the same weights, the ratio of
|
413 |
+
# the two features importance should equal to 1 on expectation (when using
|
414 |
+
# mean absolutes error as the loss function).
|
415 |
+
pi = permutation_importance(
|
416 |
+
lr, x, y, random_state=1, scoring="neg_mean_absolute_error", n_repeats=200
|
417 |
+
)
|
418 |
+
x1_x2_imp_ratio_w_none = pi.importances_mean[0] / pi.importances_mean[1]
|
419 |
+
assert x1_x2_imp_ratio_w_none == pytest.approx(1, 0.01)
|
420 |
+
|
421 |
+
# When passing a vector of ones as the sample_weight, results should be
|
422 |
+
# the same as in the case that sample_weight=None.
|
423 |
+
w = np.ones(n_samples)
|
424 |
+
pi = permutation_importance(
|
425 |
+
lr,
|
426 |
+
x,
|
427 |
+
y,
|
428 |
+
random_state=1,
|
429 |
+
scoring="neg_mean_absolute_error",
|
430 |
+
n_repeats=200,
|
431 |
+
sample_weight=w,
|
432 |
+
)
|
433 |
+
x1_x2_imp_ratio_w_ones = pi.importances_mean[0] / pi.importances_mean[1]
|
434 |
+
assert x1_x2_imp_ratio_w_ones == pytest.approx(x1_x2_imp_ratio_w_none, 0.01)
|
435 |
+
|
436 |
+
# When the ratio between the weights of the first half of the samples and
|
437 |
+
# the second half of the samples approaches to infinity, the ratio of
|
438 |
+
# the two features importance should equal to 2 on expectation (when using
|
439 |
+
# mean absolutes error as the loss function).
|
440 |
+
w = np.hstack(
|
441 |
+
[np.repeat(10.0**10, n_half_samples), np.repeat(1.0, n_half_samples)]
|
442 |
+
)
|
443 |
+
lr.fit(x, y, w)
|
444 |
+
pi = permutation_importance(
|
445 |
+
lr,
|
446 |
+
x,
|
447 |
+
y,
|
448 |
+
random_state=1,
|
449 |
+
scoring="neg_mean_absolute_error",
|
450 |
+
n_repeats=200,
|
451 |
+
sample_weight=w,
|
452 |
+
)
|
453 |
+
x1_x2_imp_ratio_w = pi.importances_mean[0] / pi.importances_mean[1]
|
454 |
+
assert x1_x2_imp_ratio_w / x1_x2_imp_ratio_w_none == pytest.approx(2, 0.01)
|
455 |
+
|
456 |
+
|
457 |
+
def test_permutation_importance_no_weights_scoring_function():
|
458 |
+
# Creating a scorer function that does not takes sample_weight
|
459 |
+
def my_scorer(estimator, X, y):
|
460 |
+
return 1
|
461 |
+
|
462 |
+
# Creating some data and estimator for the permutation test
|
463 |
+
x = np.array([[1, 2], [3, 4]])
|
464 |
+
y = np.array([1, 2])
|
465 |
+
w = np.array([1, 1])
|
466 |
+
lr = LinearRegression()
|
467 |
+
lr.fit(x, y)
|
468 |
+
|
469 |
+
# test that permutation_importance does not return error when
|
470 |
+
# sample_weight is None
|
471 |
+
try:
|
472 |
+
permutation_importance(lr, x, y, random_state=1, scoring=my_scorer, n_repeats=1)
|
473 |
+
except TypeError:
|
474 |
+
pytest.fail(
|
475 |
+
"permutation_test raised an error when using a scorer "
|
476 |
+
"function that does not accept sample_weight even though "
|
477 |
+
"sample_weight was None"
|
478 |
+
)
|
479 |
+
|
480 |
+
# test that permutation_importance raise exception when sample_weight is
|
481 |
+
# not None
|
482 |
+
with pytest.raises(TypeError):
|
483 |
+
permutation_importance(
|
484 |
+
lr, x, y, random_state=1, scoring=my_scorer, n_repeats=1, sample_weight=w
|
485 |
+
)
|
486 |
+
|
487 |
+
|
488 |
+
@pytest.mark.parametrize(
|
489 |
+
"list_single_scorer, multi_scorer",
|
490 |
+
[
|
491 |
+
(["r2", "neg_mean_squared_error"], ["r2", "neg_mean_squared_error"]),
|
492 |
+
(
|
493 |
+
["r2", "neg_mean_squared_error"],
|
494 |
+
{
|
495 |
+
"r2": get_scorer("r2"),
|
496 |
+
"neg_mean_squared_error": get_scorer("neg_mean_squared_error"),
|
497 |
+
},
|
498 |
+
),
|
499 |
+
(
|
500 |
+
["r2", "neg_mean_squared_error"],
|
501 |
+
lambda estimator, X, y: {
|
502 |
+
"r2": r2_score(y, estimator.predict(X)),
|
503 |
+
"neg_mean_squared_error": -mean_squared_error(y, estimator.predict(X)),
|
504 |
+
},
|
505 |
+
),
|
506 |
+
],
|
507 |
+
)
|
508 |
+
def test_permutation_importance_multi_metric(list_single_scorer, multi_scorer):
|
509 |
+
# Test permutation importance when scoring contains multiple scorers
|
510 |
+
|
511 |
+
# Creating some data and estimator for the permutation test
|
512 |
+
x, y = make_regression(n_samples=500, n_features=10, random_state=0)
|
513 |
+
lr = LinearRegression().fit(x, y)
|
514 |
+
|
515 |
+
multi_importance = permutation_importance(
|
516 |
+
lr, x, y, random_state=1, scoring=multi_scorer, n_repeats=2
|
517 |
+
)
|
518 |
+
assert set(multi_importance.keys()) == set(list_single_scorer)
|
519 |
+
|
520 |
+
for scorer in list_single_scorer:
|
521 |
+
multi_result = multi_importance[scorer]
|
522 |
+
single_result = permutation_importance(
|
523 |
+
lr, x, y, random_state=1, scoring=scorer, n_repeats=2
|
524 |
+
)
|
525 |
+
|
526 |
+
assert_allclose(multi_result.importances, single_result.importances)
|
527 |
+
|
528 |
+
|
529 |
+
def test_permutation_importance_max_samples_error():
|
530 |
+
"""Check that a proper error message is raised when `max_samples` is not
|
531 |
+
set to a valid input value.
|
532 |
+
"""
|
533 |
+
X = np.array([(1.0, 2.0, 3.0, 4.0)]).T
|
534 |
+
y = np.array([0, 1, 0, 1])
|
535 |
+
|
536 |
+
clf = LogisticRegression()
|
537 |
+
clf.fit(X, y)
|
538 |
+
|
539 |
+
err_msg = r"max_samples must be <= n_samples"
|
540 |
+
|
541 |
+
with pytest.raises(ValueError, match=err_msg):
|
542 |
+
permutation_importance(clf, X, y, max_samples=5)
|
venv/lib/python3.10/site-packages/sklearn/metrics/__init__.py
ADDED
@@ -0,0 +1,180 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
The :mod:`sklearn.metrics` module includes score functions, performance metrics
|
3 |
+
and pairwise metrics and distance computations.
|
4 |
+
"""
|
5 |
+
|
6 |
+
|
7 |
+
from . import cluster
|
8 |
+
from ._classification import (
|
9 |
+
accuracy_score,
|
10 |
+
balanced_accuracy_score,
|
11 |
+
brier_score_loss,
|
12 |
+
class_likelihood_ratios,
|
13 |
+
classification_report,
|
14 |
+
cohen_kappa_score,
|
15 |
+
confusion_matrix,
|
16 |
+
f1_score,
|
17 |
+
fbeta_score,
|
18 |
+
hamming_loss,
|
19 |
+
hinge_loss,
|
20 |
+
jaccard_score,
|
21 |
+
log_loss,
|
22 |
+
matthews_corrcoef,
|
23 |
+
multilabel_confusion_matrix,
|
24 |
+
precision_recall_fscore_support,
|
25 |
+
precision_score,
|
26 |
+
recall_score,
|
27 |
+
zero_one_loss,
|
28 |
+
)
|
29 |
+
from ._dist_metrics import DistanceMetric
|
30 |
+
from ._plot.confusion_matrix import ConfusionMatrixDisplay
|
31 |
+
from ._plot.det_curve import DetCurveDisplay
|
32 |
+
from ._plot.precision_recall_curve import PrecisionRecallDisplay
|
33 |
+
from ._plot.regression import PredictionErrorDisplay
|
34 |
+
from ._plot.roc_curve import RocCurveDisplay
|
35 |
+
from ._ranking import (
|
36 |
+
auc,
|
37 |
+
average_precision_score,
|
38 |
+
coverage_error,
|
39 |
+
dcg_score,
|
40 |
+
det_curve,
|
41 |
+
label_ranking_average_precision_score,
|
42 |
+
label_ranking_loss,
|
43 |
+
ndcg_score,
|
44 |
+
precision_recall_curve,
|
45 |
+
roc_auc_score,
|
46 |
+
roc_curve,
|
47 |
+
top_k_accuracy_score,
|
48 |
+
)
|
49 |
+
from ._regression import (
|
50 |
+
d2_absolute_error_score,
|
51 |
+
d2_pinball_score,
|
52 |
+
d2_tweedie_score,
|
53 |
+
explained_variance_score,
|
54 |
+
max_error,
|
55 |
+
mean_absolute_error,
|
56 |
+
mean_absolute_percentage_error,
|
57 |
+
mean_gamma_deviance,
|
58 |
+
mean_pinball_loss,
|
59 |
+
mean_poisson_deviance,
|
60 |
+
mean_squared_error,
|
61 |
+
mean_squared_log_error,
|
62 |
+
mean_tweedie_deviance,
|
63 |
+
median_absolute_error,
|
64 |
+
r2_score,
|
65 |
+
root_mean_squared_error,
|
66 |
+
root_mean_squared_log_error,
|
67 |
+
)
|
68 |
+
from ._scorer import check_scoring, get_scorer, get_scorer_names, make_scorer
|
69 |
+
from .cluster import (
|
70 |
+
adjusted_mutual_info_score,
|
71 |
+
adjusted_rand_score,
|
72 |
+
calinski_harabasz_score,
|
73 |
+
completeness_score,
|
74 |
+
consensus_score,
|
75 |
+
davies_bouldin_score,
|
76 |
+
fowlkes_mallows_score,
|
77 |
+
homogeneity_completeness_v_measure,
|
78 |
+
homogeneity_score,
|
79 |
+
mutual_info_score,
|
80 |
+
normalized_mutual_info_score,
|
81 |
+
pair_confusion_matrix,
|
82 |
+
rand_score,
|
83 |
+
silhouette_samples,
|
84 |
+
silhouette_score,
|
85 |
+
v_measure_score,
|
86 |
+
)
|
87 |
+
from .pairwise import (
|
88 |
+
euclidean_distances,
|
89 |
+
nan_euclidean_distances,
|
90 |
+
pairwise_distances,
|
91 |
+
pairwise_distances_argmin,
|
92 |
+
pairwise_distances_argmin_min,
|
93 |
+
pairwise_distances_chunked,
|
94 |
+
pairwise_kernels,
|
95 |
+
)
|
96 |
+
|
97 |
+
__all__ = [
|
98 |
+
"accuracy_score",
|
99 |
+
"adjusted_mutual_info_score",
|
100 |
+
"adjusted_rand_score",
|
101 |
+
"auc",
|
102 |
+
"average_precision_score",
|
103 |
+
"balanced_accuracy_score",
|
104 |
+
"calinski_harabasz_score",
|
105 |
+
"check_scoring",
|
106 |
+
"class_likelihood_ratios",
|
107 |
+
"classification_report",
|
108 |
+
"cluster",
|
109 |
+
"cohen_kappa_score",
|
110 |
+
"completeness_score",
|
111 |
+
"ConfusionMatrixDisplay",
|
112 |
+
"confusion_matrix",
|
113 |
+
"consensus_score",
|
114 |
+
"coverage_error",
|
115 |
+
"d2_tweedie_score",
|
116 |
+
"d2_absolute_error_score",
|
117 |
+
"d2_pinball_score",
|
118 |
+
"dcg_score",
|
119 |
+
"davies_bouldin_score",
|
120 |
+
"DetCurveDisplay",
|
121 |
+
"det_curve",
|
122 |
+
"DistanceMetric",
|
123 |
+
"euclidean_distances",
|
124 |
+
"explained_variance_score",
|
125 |
+
"f1_score",
|
126 |
+
"fbeta_score",
|
127 |
+
"fowlkes_mallows_score",
|
128 |
+
"get_scorer",
|
129 |
+
"hamming_loss",
|
130 |
+
"hinge_loss",
|
131 |
+
"homogeneity_completeness_v_measure",
|
132 |
+
"homogeneity_score",
|
133 |
+
"jaccard_score",
|
134 |
+
"label_ranking_average_precision_score",
|
135 |
+
"label_ranking_loss",
|
136 |
+
"log_loss",
|
137 |
+
"make_scorer",
|
138 |
+
"nan_euclidean_distances",
|
139 |
+
"matthews_corrcoef",
|
140 |
+
"max_error",
|
141 |
+
"mean_absolute_error",
|
142 |
+
"mean_squared_error",
|
143 |
+
"mean_squared_log_error",
|
144 |
+
"mean_pinball_loss",
|
145 |
+
"mean_poisson_deviance",
|
146 |
+
"mean_gamma_deviance",
|
147 |
+
"mean_tweedie_deviance",
|
148 |
+
"median_absolute_error",
|
149 |
+
"mean_absolute_percentage_error",
|
150 |
+
"multilabel_confusion_matrix",
|
151 |
+
"mutual_info_score",
|
152 |
+
"ndcg_score",
|
153 |
+
"normalized_mutual_info_score",
|
154 |
+
"pair_confusion_matrix",
|
155 |
+
"pairwise_distances",
|
156 |
+
"pairwise_distances_argmin",
|
157 |
+
"pairwise_distances_argmin_min",
|
158 |
+
"pairwise_distances_chunked",
|
159 |
+
"pairwise_kernels",
|
160 |
+
"PrecisionRecallDisplay",
|
161 |
+
"precision_recall_curve",
|
162 |
+
"precision_recall_fscore_support",
|
163 |
+
"precision_score",
|
164 |
+
"PredictionErrorDisplay",
|
165 |
+
"r2_score",
|
166 |
+
"rand_score",
|
167 |
+
"recall_score",
|
168 |
+
"RocCurveDisplay",
|
169 |
+
"roc_auc_score",
|
170 |
+
"roc_curve",
|
171 |
+
"root_mean_squared_log_error",
|
172 |
+
"root_mean_squared_error",
|
173 |
+
"get_scorer_names",
|
174 |
+
"silhouette_samples",
|
175 |
+
"silhouette_score",
|
176 |
+
"top_k_accuracy_score",
|
177 |
+
"v_measure_score",
|
178 |
+
"zero_one_loss",
|
179 |
+
"brier_score_loss",
|
180 |
+
]
|
venv/lib/python3.10/site-packages/sklearn/metrics/_base.py
ADDED
@@ -0,0 +1,199 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Common code for all metrics.
|
3 |
+
|
4 |
+
"""
|
5 |
+
# Authors: Alexandre Gramfort <[email protected]>
|
6 |
+
# Mathieu Blondel <[email protected]>
|
7 |
+
# Olivier Grisel <[email protected]>
|
8 |
+
# Arnaud Joly <[email protected]>
|
9 |
+
# Jochen Wersdorfer <[email protected]>
|
10 |
+
# Lars Buitinck
|
11 |
+
# Joel Nothman <[email protected]>
|
12 |
+
# Noel Dawe <[email protected]>
|
13 |
+
# License: BSD 3 clause
|
14 |
+
|
15 |
+
from itertools import combinations
|
16 |
+
|
17 |
+
import numpy as np
|
18 |
+
|
19 |
+
from ..utils import check_array, check_consistent_length
|
20 |
+
from ..utils.multiclass import type_of_target
|
21 |
+
|
22 |
+
|
23 |
+
def _average_binary_score(binary_metric, y_true, y_score, average, sample_weight=None):
|
24 |
+
"""Average a binary metric for multilabel classification.
|
25 |
+
|
26 |
+
Parameters
|
27 |
+
----------
|
28 |
+
y_true : array, shape = [n_samples] or [n_samples, n_classes]
|
29 |
+
True binary labels in binary label indicators.
|
30 |
+
|
31 |
+
y_score : array, shape = [n_samples] or [n_samples, n_classes]
|
32 |
+
Target scores, can either be probability estimates of the positive
|
33 |
+
class, confidence values, or binary decisions.
|
34 |
+
|
35 |
+
average : {None, 'micro', 'macro', 'samples', 'weighted'}, default='macro'
|
36 |
+
If ``None``, the scores for each class are returned. Otherwise,
|
37 |
+
this determines the type of averaging performed on the data:
|
38 |
+
|
39 |
+
``'micro'``:
|
40 |
+
Calculate metrics globally by considering each element of the label
|
41 |
+
indicator matrix as a label.
|
42 |
+
``'macro'``:
|
43 |
+
Calculate metrics for each label, and find their unweighted
|
44 |
+
mean. This does not take label imbalance into account.
|
45 |
+
``'weighted'``:
|
46 |
+
Calculate metrics for each label, and find their average, weighted
|
47 |
+
by support (the number of true instances for each label).
|
48 |
+
``'samples'``:
|
49 |
+
Calculate metrics for each instance, and find their average.
|
50 |
+
|
51 |
+
Will be ignored when ``y_true`` is binary.
|
52 |
+
|
53 |
+
sample_weight : array-like of shape (n_samples,), default=None
|
54 |
+
Sample weights.
|
55 |
+
|
56 |
+
binary_metric : callable, returns shape [n_classes]
|
57 |
+
The binary metric function to use.
|
58 |
+
|
59 |
+
Returns
|
60 |
+
-------
|
61 |
+
score : float or array of shape [n_classes]
|
62 |
+
If not ``None``, average the score, else return the score for each
|
63 |
+
classes.
|
64 |
+
|
65 |
+
"""
|
66 |
+
average_options = (None, "micro", "macro", "weighted", "samples")
|
67 |
+
if average not in average_options:
|
68 |
+
raise ValueError("average has to be one of {0}".format(average_options))
|
69 |
+
|
70 |
+
y_type = type_of_target(y_true)
|
71 |
+
if y_type not in ("binary", "multilabel-indicator"):
|
72 |
+
raise ValueError("{0} format is not supported".format(y_type))
|
73 |
+
|
74 |
+
if y_type == "binary":
|
75 |
+
return binary_metric(y_true, y_score, sample_weight=sample_weight)
|
76 |
+
|
77 |
+
check_consistent_length(y_true, y_score, sample_weight)
|
78 |
+
y_true = check_array(y_true)
|
79 |
+
y_score = check_array(y_score)
|
80 |
+
|
81 |
+
not_average_axis = 1
|
82 |
+
score_weight = sample_weight
|
83 |
+
average_weight = None
|
84 |
+
|
85 |
+
if average == "micro":
|
86 |
+
if score_weight is not None:
|
87 |
+
score_weight = np.repeat(score_weight, y_true.shape[1])
|
88 |
+
y_true = y_true.ravel()
|
89 |
+
y_score = y_score.ravel()
|
90 |
+
|
91 |
+
elif average == "weighted":
|
92 |
+
if score_weight is not None:
|
93 |
+
average_weight = np.sum(
|
94 |
+
np.multiply(y_true, np.reshape(score_weight, (-1, 1))), axis=0
|
95 |
+
)
|
96 |
+
else:
|
97 |
+
average_weight = np.sum(y_true, axis=0)
|
98 |
+
if np.isclose(average_weight.sum(), 0.0):
|
99 |
+
return 0
|
100 |
+
|
101 |
+
elif average == "samples":
|
102 |
+
# swap average_weight <-> score_weight
|
103 |
+
average_weight = score_weight
|
104 |
+
score_weight = None
|
105 |
+
not_average_axis = 0
|
106 |
+
|
107 |
+
if y_true.ndim == 1:
|
108 |
+
y_true = y_true.reshape((-1, 1))
|
109 |
+
|
110 |
+
if y_score.ndim == 1:
|
111 |
+
y_score = y_score.reshape((-1, 1))
|
112 |
+
|
113 |
+
n_classes = y_score.shape[not_average_axis]
|
114 |
+
score = np.zeros((n_classes,))
|
115 |
+
for c in range(n_classes):
|
116 |
+
y_true_c = y_true.take([c], axis=not_average_axis).ravel()
|
117 |
+
y_score_c = y_score.take([c], axis=not_average_axis).ravel()
|
118 |
+
score[c] = binary_metric(y_true_c, y_score_c, sample_weight=score_weight)
|
119 |
+
|
120 |
+
# Average the results
|
121 |
+
if average is not None:
|
122 |
+
if average_weight is not None:
|
123 |
+
# Scores with 0 weights are forced to be 0, preventing the average
|
124 |
+
# score from being affected by 0-weighted NaN elements.
|
125 |
+
average_weight = np.asarray(average_weight)
|
126 |
+
score[average_weight == 0] = 0
|
127 |
+
return np.average(score, weights=average_weight)
|
128 |
+
else:
|
129 |
+
return score
|
130 |
+
|
131 |
+
|
132 |
+
def _average_multiclass_ovo_score(binary_metric, y_true, y_score, average="macro"):
|
133 |
+
"""Average one-versus-one scores for multiclass classification.
|
134 |
+
|
135 |
+
Uses the binary metric for one-vs-one multiclass classification,
|
136 |
+
where the score is computed according to the Hand & Till (2001) algorithm.
|
137 |
+
|
138 |
+
Parameters
|
139 |
+
----------
|
140 |
+
binary_metric : callable
|
141 |
+
The binary metric function to use that accepts the following as input:
|
142 |
+
y_true_target : array, shape = [n_samples_target]
|
143 |
+
Some sub-array of y_true for a pair of classes designated
|
144 |
+
positive and negative in the one-vs-one scheme.
|
145 |
+
y_score_target : array, shape = [n_samples_target]
|
146 |
+
Scores corresponding to the probability estimates
|
147 |
+
of a sample belonging to the designated positive class label
|
148 |
+
|
149 |
+
y_true : array-like of shape (n_samples,)
|
150 |
+
True multiclass labels.
|
151 |
+
|
152 |
+
y_score : array-like of shape (n_samples, n_classes)
|
153 |
+
Target scores corresponding to probability estimates of a sample
|
154 |
+
belonging to a particular class.
|
155 |
+
|
156 |
+
average : {'macro', 'weighted'}, default='macro'
|
157 |
+
Determines the type of averaging performed on the pairwise binary
|
158 |
+
metric scores:
|
159 |
+
``'macro'``:
|
160 |
+
Calculate metrics for each label, and find their unweighted
|
161 |
+
mean. This does not take label imbalance into account. Classes
|
162 |
+
are assumed to be uniformly distributed.
|
163 |
+
``'weighted'``:
|
164 |
+
Calculate metrics for each label, taking into account the
|
165 |
+
prevalence of the classes.
|
166 |
+
|
167 |
+
Returns
|
168 |
+
-------
|
169 |
+
score : float
|
170 |
+
Average of the pairwise binary metric scores.
|
171 |
+
"""
|
172 |
+
check_consistent_length(y_true, y_score)
|
173 |
+
|
174 |
+
y_true_unique = np.unique(y_true)
|
175 |
+
n_classes = y_true_unique.shape[0]
|
176 |
+
n_pairs = n_classes * (n_classes - 1) // 2
|
177 |
+
pair_scores = np.empty(n_pairs)
|
178 |
+
|
179 |
+
is_weighted = average == "weighted"
|
180 |
+
prevalence = np.empty(n_pairs) if is_weighted else None
|
181 |
+
|
182 |
+
# Compute scores treating a as positive class and b as negative class,
|
183 |
+
# then b as positive class and a as negative class
|
184 |
+
for ix, (a, b) in enumerate(combinations(y_true_unique, 2)):
|
185 |
+
a_mask = y_true == a
|
186 |
+
b_mask = y_true == b
|
187 |
+
ab_mask = np.logical_or(a_mask, b_mask)
|
188 |
+
|
189 |
+
if is_weighted:
|
190 |
+
prevalence[ix] = np.average(ab_mask)
|
191 |
+
|
192 |
+
a_true = a_mask[ab_mask]
|
193 |
+
b_true = b_mask[ab_mask]
|
194 |
+
|
195 |
+
a_true_score = binary_metric(a_true, y_score[ab_mask, a])
|
196 |
+
b_true_score = binary_metric(b_true, y_score[ab_mask, b])
|
197 |
+
pair_scores[ix] = (a_true_score + b_true_score) / 2
|
198 |
+
|
199 |
+
return np.average(pair_scores, weights=prevalence)
|
venv/lib/python3.10/site-packages/sklearn/metrics/_classification.py
ADDED
The diff for this file is too large to render.
See raw diff
|
|
venv/lib/python3.10/site-packages/sklearn/metrics/_dist_metrics.cpython-310-x86_64-linux-gnu.so
ADDED
Binary file (721 kB). View file
|
|
venv/lib/python3.10/site-packages/sklearn/metrics/_dist_metrics.pxd
ADDED
@@ -0,0 +1,272 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# WARNING: Do not edit this file directly.
|
2 |
+
# It is automatically generated from 'sklearn/metrics/_dist_metrics.pxd.tp'.
|
3 |
+
# Changes must be made there.
|
4 |
+
|
5 |
+
from libc.math cimport sqrt, exp
|
6 |
+
|
7 |
+
from ..utils._typedefs cimport float64_t, float32_t, int32_t, intp_t
|
8 |
+
|
9 |
+
cdef class DistanceMetric:
|
10 |
+
pass
|
11 |
+
|
12 |
+
######################################################################
|
13 |
+
# Inline distance functions
|
14 |
+
#
|
15 |
+
# We use these for the default (euclidean) case so that they can be
|
16 |
+
# inlined. This leads to faster computation for the most common case
|
17 |
+
cdef inline float64_t euclidean_dist64(
|
18 |
+
const float64_t* x1,
|
19 |
+
const float64_t* x2,
|
20 |
+
intp_t size,
|
21 |
+
) except -1 nogil:
|
22 |
+
cdef float64_t tmp, d=0
|
23 |
+
cdef intp_t j
|
24 |
+
for j in range(size):
|
25 |
+
tmp = <float64_t> (x1[j] - x2[j])
|
26 |
+
d += tmp * tmp
|
27 |
+
return sqrt(d)
|
28 |
+
|
29 |
+
|
30 |
+
cdef inline float64_t euclidean_rdist64(
|
31 |
+
const float64_t* x1,
|
32 |
+
const float64_t* x2,
|
33 |
+
intp_t size,
|
34 |
+
) except -1 nogil:
|
35 |
+
cdef float64_t tmp, d=0
|
36 |
+
cdef intp_t j
|
37 |
+
for j in range(size):
|
38 |
+
tmp = <float64_t>(x1[j] - x2[j])
|
39 |
+
d += tmp * tmp
|
40 |
+
return d
|
41 |
+
|
42 |
+
|
43 |
+
cdef inline float64_t euclidean_dist_to_rdist64(const float64_t dist) except -1 nogil:
|
44 |
+
return dist * dist
|
45 |
+
|
46 |
+
|
47 |
+
cdef inline float64_t euclidean_rdist_to_dist64(const float64_t dist) except -1 nogil:
|
48 |
+
return sqrt(dist)
|
49 |
+
|
50 |
+
|
51 |
+
######################################################################
|
52 |
+
# DistanceMetric64 base class
|
53 |
+
cdef class DistanceMetric64(DistanceMetric):
|
54 |
+
# The following attributes are required for a few of the subclasses.
|
55 |
+
# we must define them here so that cython's limited polymorphism will work.
|
56 |
+
# Because we don't expect to instantiate a lot of these objects, the
|
57 |
+
# extra memory overhead of this setup should not be an issue.
|
58 |
+
cdef float64_t p
|
59 |
+
cdef const float64_t[::1] vec
|
60 |
+
cdef const float64_t[:, ::1] mat
|
61 |
+
cdef intp_t size
|
62 |
+
cdef object func
|
63 |
+
cdef object kwargs
|
64 |
+
|
65 |
+
cdef float64_t dist(
|
66 |
+
self,
|
67 |
+
const float64_t* x1,
|
68 |
+
const float64_t* x2,
|
69 |
+
intp_t size,
|
70 |
+
) except -1 nogil
|
71 |
+
|
72 |
+
cdef float64_t rdist(
|
73 |
+
self,
|
74 |
+
const float64_t* x1,
|
75 |
+
const float64_t* x2,
|
76 |
+
intp_t size,
|
77 |
+
) except -1 nogil
|
78 |
+
|
79 |
+
cdef float64_t dist_csr(
|
80 |
+
self,
|
81 |
+
const float64_t* x1_data,
|
82 |
+
const int32_t* x1_indices,
|
83 |
+
const float64_t* x2_data,
|
84 |
+
const int32_t* x2_indices,
|
85 |
+
const int32_t x1_start,
|
86 |
+
const int32_t x1_end,
|
87 |
+
const int32_t x2_start,
|
88 |
+
const int32_t x2_end,
|
89 |
+
const intp_t size,
|
90 |
+
) except -1 nogil
|
91 |
+
|
92 |
+
cdef float64_t rdist_csr(
|
93 |
+
self,
|
94 |
+
const float64_t* x1_data,
|
95 |
+
const int32_t* x1_indices,
|
96 |
+
const float64_t* x2_data,
|
97 |
+
const int32_t* x2_indices,
|
98 |
+
const int32_t x1_start,
|
99 |
+
const int32_t x1_end,
|
100 |
+
const int32_t x2_start,
|
101 |
+
const int32_t x2_end,
|
102 |
+
const intp_t size,
|
103 |
+
) except -1 nogil
|
104 |
+
|
105 |
+
cdef int pdist(
|
106 |
+
self,
|
107 |
+
const float64_t[:, ::1] X,
|
108 |
+
float64_t[:, ::1] D,
|
109 |
+
) except -1
|
110 |
+
|
111 |
+
cdef int cdist(
|
112 |
+
self,
|
113 |
+
const float64_t[:, ::1] X,
|
114 |
+
const float64_t[:, ::1] Y,
|
115 |
+
float64_t[:, ::1] D,
|
116 |
+
) except -1
|
117 |
+
|
118 |
+
cdef int pdist_csr(
|
119 |
+
self,
|
120 |
+
const float64_t* x1_data,
|
121 |
+
const int32_t[::1] x1_indices,
|
122 |
+
const int32_t[::1] x1_indptr,
|
123 |
+
const intp_t size,
|
124 |
+
float64_t[:, ::1] D,
|
125 |
+
) except -1 nogil
|
126 |
+
|
127 |
+
cdef int cdist_csr(
|
128 |
+
self,
|
129 |
+
const float64_t* x1_data,
|
130 |
+
const int32_t[::1] x1_indices,
|
131 |
+
const int32_t[::1] x1_indptr,
|
132 |
+
const float64_t* x2_data,
|
133 |
+
const int32_t[::1] x2_indices,
|
134 |
+
const int32_t[::1] x2_indptr,
|
135 |
+
const intp_t size,
|
136 |
+
float64_t[:, ::1] D,
|
137 |
+
) except -1 nogil
|
138 |
+
|
139 |
+
cdef float64_t _rdist_to_dist(self, float64_t rdist) except -1 nogil
|
140 |
+
|
141 |
+
cdef float64_t _dist_to_rdist(self, float64_t dist) except -1 nogil
|
142 |
+
|
143 |
+
######################################################################
|
144 |
+
# Inline distance functions
|
145 |
+
#
|
146 |
+
# We use these for the default (euclidean) case so that they can be
|
147 |
+
# inlined. This leads to faster computation for the most common case
|
148 |
+
cdef inline float64_t euclidean_dist32(
|
149 |
+
const float32_t* x1,
|
150 |
+
const float32_t* x2,
|
151 |
+
intp_t size,
|
152 |
+
) except -1 nogil:
|
153 |
+
cdef float64_t tmp, d=0
|
154 |
+
cdef intp_t j
|
155 |
+
for j in range(size):
|
156 |
+
tmp = <float64_t> (x1[j] - x2[j])
|
157 |
+
d += tmp * tmp
|
158 |
+
return sqrt(d)
|
159 |
+
|
160 |
+
|
161 |
+
cdef inline float64_t euclidean_rdist32(
|
162 |
+
const float32_t* x1,
|
163 |
+
const float32_t* x2,
|
164 |
+
intp_t size,
|
165 |
+
) except -1 nogil:
|
166 |
+
cdef float64_t tmp, d=0
|
167 |
+
cdef intp_t j
|
168 |
+
for j in range(size):
|
169 |
+
tmp = <float64_t>(x1[j] - x2[j])
|
170 |
+
d += tmp * tmp
|
171 |
+
return d
|
172 |
+
|
173 |
+
|
174 |
+
cdef inline float64_t euclidean_dist_to_rdist32(const float32_t dist) except -1 nogil:
|
175 |
+
return dist * dist
|
176 |
+
|
177 |
+
|
178 |
+
cdef inline float64_t euclidean_rdist_to_dist32(const float32_t dist) except -1 nogil:
|
179 |
+
return sqrt(dist)
|
180 |
+
|
181 |
+
|
182 |
+
######################################################################
|
183 |
+
# DistanceMetric32 base class
|
184 |
+
cdef class DistanceMetric32(DistanceMetric):
|
185 |
+
# The following attributes are required for a few of the subclasses.
|
186 |
+
# we must define them here so that cython's limited polymorphism will work.
|
187 |
+
# Because we don't expect to instantiate a lot of these objects, the
|
188 |
+
# extra memory overhead of this setup should not be an issue.
|
189 |
+
cdef float64_t p
|
190 |
+
cdef const float64_t[::1] vec
|
191 |
+
cdef const float64_t[:, ::1] mat
|
192 |
+
cdef intp_t size
|
193 |
+
cdef object func
|
194 |
+
cdef object kwargs
|
195 |
+
|
196 |
+
cdef float32_t dist(
|
197 |
+
self,
|
198 |
+
const float32_t* x1,
|
199 |
+
const float32_t* x2,
|
200 |
+
intp_t size,
|
201 |
+
) except -1 nogil
|
202 |
+
|
203 |
+
cdef float32_t rdist(
|
204 |
+
self,
|
205 |
+
const float32_t* x1,
|
206 |
+
const float32_t* x2,
|
207 |
+
intp_t size,
|
208 |
+
) except -1 nogil
|
209 |
+
|
210 |
+
cdef float32_t dist_csr(
|
211 |
+
self,
|
212 |
+
const float32_t* x1_data,
|
213 |
+
const int32_t* x1_indices,
|
214 |
+
const float32_t* x2_data,
|
215 |
+
const int32_t* x2_indices,
|
216 |
+
const int32_t x1_start,
|
217 |
+
const int32_t x1_end,
|
218 |
+
const int32_t x2_start,
|
219 |
+
const int32_t x2_end,
|
220 |
+
const intp_t size,
|
221 |
+
) except -1 nogil
|
222 |
+
|
223 |
+
cdef float32_t rdist_csr(
|
224 |
+
self,
|
225 |
+
const float32_t* x1_data,
|
226 |
+
const int32_t* x1_indices,
|
227 |
+
const float32_t* x2_data,
|
228 |
+
const int32_t* x2_indices,
|
229 |
+
const int32_t x1_start,
|
230 |
+
const int32_t x1_end,
|
231 |
+
const int32_t x2_start,
|
232 |
+
const int32_t x2_end,
|
233 |
+
const intp_t size,
|
234 |
+
) except -1 nogil
|
235 |
+
|
236 |
+
cdef int pdist(
|
237 |
+
self,
|
238 |
+
const float32_t[:, ::1] X,
|
239 |
+
float32_t[:, ::1] D,
|
240 |
+
) except -1
|
241 |
+
|
242 |
+
cdef int cdist(
|
243 |
+
self,
|
244 |
+
const float32_t[:, ::1] X,
|
245 |
+
const float32_t[:, ::1] Y,
|
246 |
+
float32_t[:, ::1] D,
|
247 |
+
) except -1
|
248 |
+
|
249 |
+
cdef int pdist_csr(
|
250 |
+
self,
|
251 |
+
const float32_t* x1_data,
|
252 |
+
const int32_t[::1] x1_indices,
|
253 |
+
const int32_t[::1] x1_indptr,
|
254 |
+
const intp_t size,
|
255 |
+
float32_t[:, ::1] D,
|
256 |
+
) except -1 nogil
|
257 |
+
|
258 |
+
cdef int cdist_csr(
|
259 |
+
self,
|
260 |
+
const float32_t* x1_data,
|
261 |
+
const int32_t[::1] x1_indices,
|
262 |
+
const int32_t[::1] x1_indptr,
|
263 |
+
const float32_t* x2_data,
|
264 |
+
const int32_t[::1] x2_indices,
|
265 |
+
const int32_t[::1] x2_indptr,
|
266 |
+
const intp_t size,
|
267 |
+
float32_t[:, ::1] D,
|
268 |
+
) except -1 nogil
|
269 |
+
|
270 |
+
cdef float32_t _rdist_to_dist(self, float32_t rdist) except -1 nogil
|
271 |
+
|
272 |
+
cdef float32_t _dist_to_rdist(self, float32_t dist) except -1 nogil
|
venv/lib/python3.10/site-packages/sklearn/metrics/_pairwise_distances_reduction/__init__.py
ADDED
@@ -0,0 +1,112 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#
|
2 |
+
# Pairwise Distances Reductions
|
3 |
+
# =============================
|
4 |
+
#
|
5 |
+
# Authors: The scikit-learn developers.
|
6 |
+
# License: BSD 3 clause
|
7 |
+
#
|
8 |
+
# Overview
|
9 |
+
# --------
|
10 |
+
#
|
11 |
+
# This module provides routines to compute pairwise distances between a set
|
12 |
+
# of row vectors of X and another set of row vectors of Y and apply a
|
13 |
+
# reduction on top. The canonical example is the brute-force computation
|
14 |
+
# of the top k nearest neighbors by leveraging the arg-k-min reduction.
|
15 |
+
#
|
16 |
+
# The reduction takes a matrix of pairwise distances between rows of X and Y
|
17 |
+
# as input and outputs an aggregate data-structure for each row of X. The
|
18 |
+
# aggregate values are typically smaller than the number of rows in Y, hence
|
19 |
+
# the term reduction.
|
20 |
+
#
|
21 |
+
# For computational reasons, the reduction are performed on the fly on chunks
|
22 |
+
# of rows of X and Y so as to keep intermediate data-structures in CPU cache
|
23 |
+
# and avoid unnecessary round trips of large distance arrays with the RAM
|
24 |
+
# that would otherwise severely degrade the speed by making the overall
|
25 |
+
# processing memory-bound.
|
26 |
+
#
|
27 |
+
# Finally, the routines follow a generic parallelization template to process
|
28 |
+
# chunks of data with OpenMP loops (via Cython prange), either on rows of X
|
29 |
+
# or rows of Y depending on their respective sizes.
|
30 |
+
#
|
31 |
+
#
|
32 |
+
# Dispatching to specialized implementations
|
33 |
+
# ------------------------------------------
|
34 |
+
#
|
35 |
+
# Dispatchers are meant to be used in the Python code. Under the hood, a
|
36 |
+
# dispatcher must only define the logic to choose at runtime to the correct
|
37 |
+
# dtype-specialized :class:`BaseDistancesReductionDispatcher` implementation based
|
38 |
+
# on the dtype of X and of Y.
|
39 |
+
#
|
40 |
+
#
|
41 |
+
# High-level diagram
|
42 |
+
# ------------------
|
43 |
+
#
|
44 |
+
# Legend:
|
45 |
+
#
|
46 |
+
# A ---⊳ B: A inherits from B
|
47 |
+
# A ---x B: A dispatches to B
|
48 |
+
#
|
49 |
+
#
|
50 |
+
# (base dispatcher)
|
51 |
+
# BaseDistancesReductionDispatcher
|
52 |
+
# ∆
|
53 |
+
# |
|
54 |
+
# |
|
55 |
+
# +------------------+---------------+---------------+------------------+
|
56 |
+
# | | | |
|
57 |
+
# | (dispatcher) (dispatcher) |
|
58 |
+
# | ArgKmin RadiusNeighbors |
|
59 |
+
# | | | |
|
60 |
+
# | | | |
|
61 |
+
# | | (float{32,64} implem.) | |
|
62 |
+
# | | BaseDistancesReduction{32,64} | |
|
63 |
+
# | | ∆ | |
|
64 |
+
# (dispatcher) | | | (dispatcher)
|
65 |
+
# ArgKminClassMode | | | RadiusNeighborsClassMode
|
66 |
+
# | | +----------+----------+ | |
|
67 |
+
# | | | | | |
|
68 |
+
# | | | | | |
|
69 |
+
# | x | | x |
|
70 |
+
# | +-------⊳ ArgKmin{32,64} RadiusNeighbors{32,64} ⊲---+ |
|
71 |
+
# x | | ∆ ∆ | | x
|
72 |
+
# ArgKminClassMode{32,64} | | | | RadiusNeighborsClassMode{32,64}
|
73 |
+
# ===================================== Specializations ============================================
|
74 |
+
# | | | |
|
75 |
+
# | | | |
|
76 |
+
# x | | x
|
77 |
+
# EuclideanArgKmin{32,64} EuclideanRadiusNeighbors{32,64}
|
78 |
+
#
|
79 |
+
#
|
80 |
+
# For instance :class:`ArgKmin` dispatches to:
|
81 |
+
# - :class:`ArgKmin64` if X and Y are two `float64` array-likes
|
82 |
+
# - :class:`ArgKmin32` if X and Y are two `float32` array-likes
|
83 |
+
#
|
84 |
+
# In addition, if the metric parameter is set to "euclidean" or "sqeuclidean",
|
85 |
+
# then some direct subclass of `BaseDistancesReduction{32,64}` further dispatches
|
86 |
+
# to one of their subclass for euclidean-specialized implementation. For instance,
|
87 |
+
# :class:`ArgKmin64` dispatches to :class:`EuclideanArgKmin64`.
|
88 |
+
#
|
89 |
+
# Those Euclidean-specialized implementations relies on optimal implementations of
|
90 |
+
# a decomposition of the squared euclidean distance matrix into a sum of three terms
|
91 |
+
# (see :class:`MiddleTermComputer{32,64}`).
|
92 |
+
#
|
93 |
+
|
94 |
+
from ._dispatcher import (
|
95 |
+
ArgKmin,
|
96 |
+
ArgKminClassMode,
|
97 |
+
BaseDistancesReductionDispatcher,
|
98 |
+
RadiusNeighbors,
|
99 |
+
RadiusNeighborsClassMode,
|
100 |
+
sqeuclidean_row_norms,
|
101 |
+
)
|
102 |
+
|
103 |
+
__all__ = [
|
104 |
+
"BaseDistancesReductionDispatcher",
|
105 |
+
"ArgKmin",
|
106 |
+
"RadiusNeighbors",
|
107 |
+
"ArgKminClassMode",
|
108 |
+
"RadiusNeighborsClassMode",
|
109 |
+
"sqeuclidean_row_norms",
|
110 |
+
]
|
111 |
+
|
112 |
+
# ruff: noqa: E501
|
venv/lib/python3.10/site-packages/sklearn/metrics/_pairwise_distances_reduction/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (470 Bytes). View file
|
|
venv/lib/python3.10/site-packages/sklearn/metrics/_pairwise_distances_reduction/__pycache__/_dispatcher.cpython-310.pyc
ADDED
Binary file (25.7 kB). View file
|
|
venv/lib/python3.10/site-packages/sklearn/metrics/_pairwise_distances_reduction/_argkmin.cpython-310-x86_64-linux-gnu.so
ADDED
Binary file (377 kB). View file
|
|
venv/lib/python3.10/site-packages/sklearn/metrics/_pairwise_distances_reduction/_argkmin.pxd
ADDED
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# WARNING: Do not edit this file directly.
|
2 |
+
# It is automatically generated from 'sklearn/metrics/_pairwise_distances_reduction/_argkmin.pxd.tp'.
|
3 |
+
# Changes must be made there.
|
4 |
+
|
5 |
+
from ...utils._typedefs cimport intp_t, float64_t
|
6 |
+
|
7 |
+
from ._base cimport BaseDistancesReduction64
|
8 |
+
from ._middle_term_computer cimport MiddleTermComputer64
|
9 |
+
|
10 |
+
cdef class ArgKmin64(BaseDistancesReduction64):
|
11 |
+
"""float64 implementation of the ArgKmin."""
|
12 |
+
|
13 |
+
cdef:
|
14 |
+
intp_t k
|
15 |
+
|
16 |
+
intp_t[:, ::1] argkmin_indices
|
17 |
+
float64_t[:, ::1] argkmin_distances
|
18 |
+
|
19 |
+
# Used as array of pointers to private datastructures used in threads.
|
20 |
+
float64_t ** heaps_r_distances_chunks
|
21 |
+
intp_t ** heaps_indices_chunks
|
22 |
+
|
23 |
+
|
24 |
+
cdef class EuclideanArgKmin64(ArgKmin64):
|
25 |
+
"""EuclideanDistance-specialisation of ArgKmin64."""
|
26 |
+
cdef:
|
27 |
+
MiddleTermComputer64 middle_term_computer
|
28 |
+
const float64_t[::1] X_norm_squared
|
29 |
+
const float64_t[::1] Y_norm_squared
|
30 |
+
|
31 |
+
bint use_squared_distances
|
32 |
+
|
33 |
+
from ._base cimport BaseDistancesReduction32
|
34 |
+
from ._middle_term_computer cimport MiddleTermComputer32
|
35 |
+
|
36 |
+
cdef class ArgKmin32(BaseDistancesReduction32):
|
37 |
+
"""float32 implementation of the ArgKmin."""
|
38 |
+
|
39 |
+
cdef:
|
40 |
+
intp_t k
|
41 |
+
|
42 |
+
intp_t[:, ::1] argkmin_indices
|
43 |
+
float64_t[:, ::1] argkmin_distances
|
44 |
+
|
45 |
+
# Used as array of pointers to private datastructures used in threads.
|
46 |
+
float64_t ** heaps_r_distances_chunks
|
47 |
+
intp_t ** heaps_indices_chunks
|
48 |
+
|
49 |
+
|
50 |
+
cdef class EuclideanArgKmin32(ArgKmin32):
|
51 |
+
"""EuclideanDistance-specialisation of ArgKmin32."""
|
52 |
+
cdef:
|
53 |
+
MiddleTermComputer32 middle_term_computer
|
54 |
+
const float64_t[::1] X_norm_squared
|
55 |
+
const float64_t[::1] Y_norm_squared
|
56 |
+
|
57 |
+
bint use_squared_distances
|
venv/lib/python3.10/site-packages/sklearn/metrics/_pairwise_distances_reduction/_argkmin_classmode.cpython-310-x86_64-linux-gnu.so
ADDED
Binary file (283 kB). View file
|
|
venv/lib/python3.10/site-packages/sklearn/metrics/_pairwise_distances_reduction/_base.cpython-310-x86_64-linux-gnu.so
ADDED
Binary file (345 kB). View file
|
|
venv/lib/python3.10/site-packages/sklearn/metrics/_pairwise_distances_reduction/_base.pxd
ADDED
@@ -0,0 +1,265 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# WARNING: Do not edit this file directly.
|
2 |
+
# It is automatically generated from 'sklearn/metrics/_pairwise_distances_reduction/_base.pxd.tp'.
|
3 |
+
# Changes must be made there.
|
4 |
+
|
5 |
+
from cython cimport final
|
6 |
+
|
7 |
+
from ...utils._typedefs cimport intp_t, float64_t
|
8 |
+
|
9 |
+
from ._datasets_pair cimport DatasetsPair64
|
10 |
+
|
11 |
+
|
12 |
+
cpdef float64_t[::1] _sqeuclidean_row_norms64(
|
13 |
+
X,
|
14 |
+
intp_t num_threads,
|
15 |
+
)
|
16 |
+
|
17 |
+
cdef class BaseDistancesReduction64:
|
18 |
+
"""
|
19 |
+
Base float64 implementation template of the pairwise-distances
|
20 |
+
reduction backends.
|
21 |
+
|
22 |
+
Implementations inherit from this template and may override the several
|
23 |
+
defined hooks as needed in order to easily extend functionality with
|
24 |
+
minimal redundant code.
|
25 |
+
"""
|
26 |
+
|
27 |
+
cdef:
|
28 |
+
readonly DatasetsPair64 datasets_pair
|
29 |
+
|
30 |
+
# The number of threads that can be used is stored in effective_n_threads.
|
31 |
+
#
|
32 |
+
# The number of threads to use in the parallelization strategy
|
33 |
+
# (i.e. parallel_on_X or parallel_on_Y) can be smaller than effective_n_threads:
|
34 |
+
# for small datasets, fewer threads might be needed to loop over pair of chunks.
|
35 |
+
#
|
36 |
+
# Hence, the number of threads that _will_ be used for looping over chunks
|
37 |
+
# is stored in chunks_n_threads, allowing solely using what we need.
|
38 |
+
#
|
39 |
+
# Thus, an invariant is:
|
40 |
+
#
|
41 |
+
# chunks_n_threads <= effective_n_threads
|
42 |
+
#
|
43 |
+
intp_t effective_n_threads
|
44 |
+
intp_t chunks_n_threads
|
45 |
+
|
46 |
+
intp_t n_samples_chunk, chunk_size
|
47 |
+
|
48 |
+
intp_t n_samples_X, X_n_samples_chunk, X_n_chunks, X_n_samples_last_chunk
|
49 |
+
intp_t n_samples_Y, Y_n_samples_chunk, Y_n_chunks, Y_n_samples_last_chunk
|
50 |
+
|
51 |
+
bint execute_in_parallel_on_Y
|
52 |
+
|
53 |
+
@final
|
54 |
+
cdef void _parallel_on_X(self) noexcept nogil
|
55 |
+
|
56 |
+
@final
|
57 |
+
cdef void _parallel_on_Y(self) noexcept nogil
|
58 |
+
|
59 |
+
# Placeholder methods which have to be implemented
|
60 |
+
|
61 |
+
cdef void _compute_and_reduce_distances_on_chunks(
|
62 |
+
self,
|
63 |
+
intp_t X_start,
|
64 |
+
intp_t X_end,
|
65 |
+
intp_t Y_start,
|
66 |
+
intp_t Y_end,
|
67 |
+
intp_t thread_num,
|
68 |
+
) noexcept nogil
|
69 |
+
|
70 |
+
|
71 |
+
# Placeholder methods which can be implemented
|
72 |
+
|
73 |
+
cdef void compute_exact_distances(self) noexcept nogil
|
74 |
+
|
75 |
+
cdef void _parallel_on_X_parallel_init(
|
76 |
+
self,
|
77 |
+
intp_t thread_num,
|
78 |
+
) noexcept nogil
|
79 |
+
|
80 |
+
cdef void _parallel_on_X_init_chunk(
|
81 |
+
self,
|
82 |
+
intp_t thread_num,
|
83 |
+
intp_t X_start,
|
84 |
+
intp_t X_end,
|
85 |
+
) noexcept nogil
|
86 |
+
|
87 |
+
cdef void _parallel_on_X_pre_compute_and_reduce_distances_on_chunks(
|
88 |
+
self,
|
89 |
+
intp_t X_start,
|
90 |
+
intp_t X_end,
|
91 |
+
intp_t Y_start,
|
92 |
+
intp_t Y_end,
|
93 |
+
intp_t thread_num,
|
94 |
+
) noexcept nogil
|
95 |
+
|
96 |
+
cdef void _parallel_on_X_prange_iter_finalize(
|
97 |
+
self,
|
98 |
+
intp_t thread_num,
|
99 |
+
intp_t X_start,
|
100 |
+
intp_t X_end,
|
101 |
+
) noexcept nogil
|
102 |
+
|
103 |
+
cdef void _parallel_on_X_parallel_finalize(
|
104 |
+
self,
|
105 |
+
intp_t thread_num
|
106 |
+
) noexcept nogil
|
107 |
+
|
108 |
+
cdef void _parallel_on_Y_init(
|
109 |
+
self,
|
110 |
+
) noexcept nogil
|
111 |
+
|
112 |
+
cdef void _parallel_on_Y_parallel_init(
|
113 |
+
self,
|
114 |
+
intp_t thread_num,
|
115 |
+
intp_t X_start,
|
116 |
+
intp_t X_end,
|
117 |
+
) noexcept nogil
|
118 |
+
|
119 |
+
cdef void _parallel_on_Y_pre_compute_and_reduce_distances_on_chunks(
|
120 |
+
self,
|
121 |
+
intp_t X_start,
|
122 |
+
intp_t X_end,
|
123 |
+
intp_t Y_start,
|
124 |
+
intp_t Y_end,
|
125 |
+
intp_t thread_num,
|
126 |
+
) noexcept nogil
|
127 |
+
|
128 |
+
cdef void _parallel_on_Y_synchronize(
|
129 |
+
self,
|
130 |
+
intp_t X_start,
|
131 |
+
intp_t X_end,
|
132 |
+
) noexcept nogil
|
133 |
+
|
134 |
+
cdef void _parallel_on_Y_finalize(
|
135 |
+
self,
|
136 |
+
) noexcept nogil
|
137 |
+
|
138 |
+
from ._datasets_pair cimport DatasetsPair32
|
139 |
+
|
140 |
+
|
141 |
+
cpdef float64_t[::1] _sqeuclidean_row_norms32(
|
142 |
+
X,
|
143 |
+
intp_t num_threads,
|
144 |
+
)
|
145 |
+
|
146 |
+
cdef class BaseDistancesReduction32:
|
147 |
+
"""
|
148 |
+
Base float32 implementation template of the pairwise-distances
|
149 |
+
reduction backends.
|
150 |
+
|
151 |
+
Implementations inherit from this template and may override the several
|
152 |
+
defined hooks as needed in order to easily extend functionality with
|
153 |
+
minimal redundant code.
|
154 |
+
"""
|
155 |
+
|
156 |
+
cdef:
|
157 |
+
readonly DatasetsPair32 datasets_pair
|
158 |
+
|
159 |
+
# The number of threads that can be used is stored in effective_n_threads.
|
160 |
+
#
|
161 |
+
# The number of threads to use in the parallelization strategy
|
162 |
+
# (i.e. parallel_on_X or parallel_on_Y) can be smaller than effective_n_threads:
|
163 |
+
# for small datasets, fewer threads might be needed to loop over pair of chunks.
|
164 |
+
#
|
165 |
+
# Hence, the number of threads that _will_ be used for looping over chunks
|
166 |
+
# is stored in chunks_n_threads, allowing solely using what we need.
|
167 |
+
#
|
168 |
+
# Thus, an invariant is:
|
169 |
+
#
|
170 |
+
# chunks_n_threads <= effective_n_threads
|
171 |
+
#
|
172 |
+
intp_t effective_n_threads
|
173 |
+
intp_t chunks_n_threads
|
174 |
+
|
175 |
+
intp_t n_samples_chunk, chunk_size
|
176 |
+
|
177 |
+
intp_t n_samples_X, X_n_samples_chunk, X_n_chunks, X_n_samples_last_chunk
|
178 |
+
intp_t n_samples_Y, Y_n_samples_chunk, Y_n_chunks, Y_n_samples_last_chunk
|
179 |
+
|
180 |
+
bint execute_in_parallel_on_Y
|
181 |
+
|
182 |
+
@final
|
183 |
+
cdef void _parallel_on_X(self) noexcept nogil
|
184 |
+
|
185 |
+
@final
|
186 |
+
cdef void _parallel_on_Y(self) noexcept nogil
|
187 |
+
|
188 |
+
# Placeholder methods which have to be implemented
|
189 |
+
|
190 |
+
cdef void _compute_and_reduce_distances_on_chunks(
|
191 |
+
self,
|
192 |
+
intp_t X_start,
|
193 |
+
intp_t X_end,
|
194 |
+
intp_t Y_start,
|
195 |
+
intp_t Y_end,
|
196 |
+
intp_t thread_num,
|
197 |
+
) noexcept nogil
|
198 |
+
|
199 |
+
|
200 |
+
# Placeholder methods which can be implemented
|
201 |
+
|
202 |
+
cdef void compute_exact_distances(self) noexcept nogil
|
203 |
+
|
204 |
+
cdef void _parallel_on_X_parallel_init(
|
205 |
+
self,
|
206 |
+
intp_t thread_num,
|
207 |
+
) noexcept nogil
|
208 |
+
|
209 |
+
cdef void _parallel_on_X_init_chunk(
|
210 |
+
self,
|
211 |
+
intp_t thread_num,
|
212 |
+
intp_t X_start,
|
213 |
+
intp_t X_end,
|
214 |
+
) noexcept nogil
|
215 |
+
|
216 |
+
cdef void _parallel_on_X_pre_compute_and_reduce_distances_on_chunks(
|
217 |
+
self,
|
218 |
+
intp_t X_start,
|
219 |
+
intp_t X_end,
|
220 |
+
intp_t Y_start,
|
221 |
+
intp_t Y_end,
|
222 |
+
intp_t thread_num,
|
223 |
+
) noexcept nogil
|
224 |
+
|
225 |
+
cdef void _parallel_on_X_prange_iter_finalize(
|
226 |
+
self,
|
227 |
+
intp_t thread_num,
|
228 |
+
intp_t X_start,
|
229 |
+
intp_t X_end,
|
230 |
+
) noexcept nogil
|
231 |
+
|
232 |
+
cdef void _parallel_on_X_parallel_finalize(
|
233 |
+
self,
|
234 |
+
intp_t thread_num
|
235 |
+
) noexcept nogil
|
236 |
+
|
237 |
+
cdef void _parallel_on_Y_init(
|
238 |
+
self,
|
239 |
+
) noexcept nogil
|
240 |
+
|
241 |
+
cdef void _parallel_on_Y_parallel_init(
|
242 |
+
self,
|
243 |
+
intp_t thread_num,
|
244 |
+
intp_t X_start,
|
245 |
+
intp_t X_end,
|
246 |
+
) noexcept nogil
|
247 |
+
|
248 |
+
cdef void _parallel_on_Y_pre_compute_and_reduce_distances_on_chunks(
|
249 |
+
self,
|
250 |
+
intp_t X_start,
|
251 |
+
intp_t X_end,
|
252 |
+
intp_t Y_start,
|
253 |
+
intp_t Y_end,
|
254 |
+
intp_t thread_num,
|
255 |
+
) noexcept nogil
|
256 |
+
|
257 |
+
cdef void _parallel_on_Y_synchronize(
|
258 |
+
self,
|
259 |
+
intp_t X_start,
|
260 |
+
intp_t X_end,
|
261 |
+
) noexcept nogil
|
262 |
+
|
263 |
+
cdef void _parallel_on_Y_finalize(
|
264 |
+
self,
|
265 |
+
) noexcept nogil
|
venv/lib/python3.10/site-packages/sklearn/metrics/_pairwise_distances_reduction/_classmode.pxd
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
cpdef enum WeightingStrategy:
|
2 |
+
uniform = 0
|
3 |
+
# TODO: Implement the following options in weighted_histogram_mode
|
4 |
+
distance = 1
|
5 |
+
callable = 2
|
venv/lib/python3.10/site-packages/sklearn/metrics/_pairwise_distances_reduction/_datasets_pair.cpython-310-x86_64-linux-gnu.so
ADDED
Binary file (500 kB). View file
|
|
venv/lib/python3.10/site-packages/sklearn/metrics/_pairwise_distances_reduction/_datasets_pair.pxd
ADDED
@@ -0,0 +1,106 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# WARNING: Do not edit this file directly.
|
2 |
+
# It is automatically generated from 'sklearn/metrics/_pairwise_distances_reduction/_datasets_pair.pxd.tp'.
|
3 |
+
# Changes must be made there.
|
4 |
+
|
5 |
+
from ...utils._typedefs cimport float64_t, float32_t, int32_t, intp_t
|
6 |
+
from ...metrics._dist_metrics cimport DistanceMetric64, DistanceMetric32, DistanceMetric
|
7 |
+
|
8 |
+
|
9 |
+
cdef class DatasetsPair64:
|
10 |
+
cdef:
|
11 |
+
DistanceMetric64 distance_metric
|
12 |
+
intp_t n_features
|
13 |
+
|
14 |
+
cdef intp_t n_samples_X(self) noexcept nogil
|
15 |
+
|
16 |
+
cdef intp_t n_samples_Y(self) noexcept nogil
|
17 |
+
|
18 |
+
cdef float64_t dist(self, intp_t i, intp_t j) noexcept nogil
|
19 |
+
|
20 |
+
cdef float64_t surrogate_dist(self, intp_t i, intp_t j) noexcept nogil
|
21 |
+
|
22 |
+
|
23 |
+
cdef class DenseDenseDatasetsPair64(DatasetsPair64):
|
24 |
+
cdef:
|
25 |
+
const float64_t[:, ::1] X
|
26 |
+
const float64_t[:, ::1] Y
|
27 |
+
|
28 |
+
|
29 |
+
cdef class SparseSparseDatasetsPair64(DatasetsPair64):
|
30 |
+
cdef:
|
31 |
+
const float64_t[:] X_data
|
32 |
+
const int32_t[::1] X_indices
|
33 |
+
const int32_t[::1] X_indptr
|
34 |
+
|
35 |
+
const float64_t[:] Y_data
|
36 |
+
const int32_t[::1] Y_indices
|
37 |
+
const int32_t[::1] Y_indptr
|
38 |
+
|
39 |
+
|
40 |
+
cdef class SparseDenseDatasetsPair64(DatasetsPair64):
|
41 |
+
cdef:
|
42 |
+
const float64_t[:] X_data
|
43 |
+
const int32_t[::1] X_indices
|
44 |
+
const int32_t[::1] X_indptr
|
45 |
+
|
46 |
+
const float64_t[:] Y_data
|
47 |
+
const int32_t[::1] Y_indices
|
48 |
+
intp_t n_Y
|
49 |
+
|
50 |
+
|
51 |
+
cdef class DenseSparseDatasetsPair64(DatasetsPair64):
|
52 |
+
cdef:
|
53 |
+
# As distance metrics are commutative, we can simply rely
|
54 |
+
# on the implementation of SparseDenseDatasetsPair and
|
55 |
+
# swap arguments.
|
56 |
+
DatasetsPair64 datasets_pair
|
57 |
+
|
58 |
+
|
59 |
+
cdef class DatasetsPair32:
|
60 |
+
cdef:
|
61 |
+
DistanceMetric32 distance_metric
|
62 |
+
intp_t n_features
|
63 |
+
|
64 |
+
cdef intp_t n_samples_X(self) noexcept nogil
|
65 |
+
|
66 |
+
cdef intp_t n_samples_Y(self) noexcept nogil
|
67 |
+
|
68 |
+
cdef float64_t dist(self, intp_t i, intp_t j) noexcept nogil
|
69 |
+
|
70 |
+
cdef float64_t surrogate_dist(self, intp_t i, intp_t j) noexcept nogil
|
71 |
+
|
72 |
+
|
73 |
+
cdef class DenseDenseDatasetsPair32(DatasetsPair32):
|
74 |
+
cdef:
|
75 |
+
const float32_t[:, ::1] X
|
76 |
+
const float32_t[:, ::1] Y
|
77 |
+
|
78 |
+
|
79 |
+
cdef class SparseSparseDatasetsPair32(DatasetsPair32):
|
80 |
+
cdef:
|
81 |
+
const float32_t[:] X_data
|
82 |
+
const int32_t[::1] X_indices
|
83 |
+
const int32_t[::1] X_indptr
|
84 |
+
|
85 |
+
const float32_t[:] Y_data
|
86 |
+
const int32_t[::1] Y_indices
|
87 |
+
const int32_t[::1] Y_indptr
|
88 |
+
|
89 |
+
|
90 |
+
cdef class SparseDenseDatasetsPair32(DatasetsPair32):
|
91 |
+
cdef:
|
92 |
+
const float32_t[:] X_data
|
93 |
+
const int32_t[::1] X_indices
|
94 |
+
const int32_t[::1] X_indptr
|
95 |
+
|
96 |
+
const float32_t[:] Y_data
|
97 |
+
const int32_t[::1] Y_indices
|
98 |
+
intp_t n_Y
|
99 |
+
|
100 |
+
|
101 |
+
cdef class DenseSparseDatasetsPair32(DatasetsPair32):
|
102 |
+
cdef:
|
103 |
+
# As distance metrics are commutative, we can simply rely
|
104 |
+
# on the implementation of SparseDenseDatasetsPair and
|
105 |
+
# swap arguments.
|
106 |
+
DatasetsPair32 datasets_pair
|
venv/lib/python3.10/site-packages/sklearn/metrics/_pairwise_distances_reduction/_dispatcher.py
ADDED
@@ -0,0 +1,764 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from abc import abstractmethod
|
2 |
+
from typing import List
|
3 |
+
|
4 |
+
import numpy as np
|
5 |
+
from scipy.sparse import issparse
|
6 |
+
|
7 |
+
from ... import get_config
|
8 |
+
from .._dist_metrics import (
|
9 |
+
BOOL_METRICS,
|
10 |
+
METRIC_MAPPING64,
|
11 |
+
DistanceMetric,
|
12 |
+
)
|
13 |
+
from ._argkmin import (
|
14 |
+
ArgKmin32,
|
15 |
+
ArgKmin64,
|
16 |
+
)
|
17 |
+
from ._argkmin_classmode import (
|
18 |
+
ArgKminClassMode32,
|
19 |
+
ArgKminClassMode64,
|
20 |
+
)
|
21 |
+
from ._base import _sqeuclidean_row_norms32, _sqeuclidean_row_norms64
|
22 |
+
from ._radius_neighbors import (
|
23 |
+
RadiusNeighbors32,
|
24 |
+
RadiusNeighbors64,
|
25 |
+
)
|
26 |
+
from ._radius_neighbors_classmode import (
|
27 |
+
RadiusNeighborsClassMode32,
|
28 |
+
RadiusNeighborsClassMode64,
|
29 |
+
)
|
30 |
+
|
31 |
+
|
32 |
+
def sqeuclidean_row_norms(X, num_threads):
|
33 |
+
"""Compute the squared euclidean norm of the rows of X in parallel.
|
34 |
+
|
35 |
+
Parameters
|
36 |
+
----------
|
37 |
+
X : ndarray or CSR matrix of shape (n_samples, n_features)
|
38 |
+
Input data. Must be c-contiguous.
|
39 |
+
|
40 |
+
num_threads : int
|
41 |
+
The number of OpenMP threads to use.
|
42 |
+
|
43 |
+
Returns
|
44 |
+
-------
|
45 |
+
sqeuclidean_row_norms : ndarray of shape (n_samples,)
|
46 |
+
Arrays containing the squared euclidean norm of each row of X.
|
47 |
+
"""
|
48 |
+
if X.dtype == np.float64:
|
49 |
+
return np.asarray(_sqeuclidean_row_norms64(X, num_threads))
|
50 |
+
if X.dtype == np.float32:
|
51 |
+
return np.asarray(_sqeuclidean_row_norms32(X, num_threads))
|
52 |
+
|
53 |
+
raise ValueError(
|
54 |
+
"Only float64 or float32 datasets are supported at this time, "
|
55 |
+
f"got: X.dtype={X.dtype}."
|
56 |
+
)
|
57 |
+
|
58 |
+
|
59 |
+
class BaseDistancesReductionDispatcher:
|
60 |
+
"""Abstract base dispatcher for pairwise distance computation & reduction.
|
61 |
+
|
62 |
+
Each dispatcher extending the base :class:`BaseDistancesReductionDispatcher`
|
63 |
+
dispatcher must implement the :meth:`compute` classmethod.
|
64 |
+
"""
|
65 |
+
|
66 |
+
@classmethod
|
67 |
+
def valid_metrics(cls) -> List[str]:
|
68 |
+
excluded = {
|
69 |
+
# PyFunc cannot be supported because it necessitates interacting with
|
70 |
+
# the CPython interpreter to call user defined functions.
|
71 |
+
"pyfunc",
|
72 |
+
"mahalanobis", # is numerically unstable
|
73 |
+
# In order to support discrete distance metrics, we need to have a
|
74 |
+
# stable simultaneous sort which preserves the order of the indices
|
75 |
+
# because there generally is a lot of occurrences for a given values
|
76 |
+
# of distances in this case.
|
77 |
+
# TODO: implement a stable simultaneous_sort.
|
78 |
+
"hamming",
|
79 |
+
*BOOL_METRICS,
|
80 |
+
}
|
81 |
+
return sorted(({"sqeuclidean"} | set(METRIC_MAPPING64.keys())) - excluded)
|
82 |
+
|
83 |
+
@classmethod
|
84 |
+
def is_usable_for(cls, X, Y, metric) -> bool:
|
85 |
+
"""Return True if the dispatcher can be used for the
|
86 |
+
given parameters.
|
87 |
+
|
88 |
+
Parameters
|
89 |
+
----------
|
90 |
+
X : {ndarray, sparse matrix} of shape (n_samples_X, n_features)
|
91 |
+
Input data.
|
92 |
+
|
93 |
+
Y : {ndarray, sparse matrix} of shape (n_samples_Y, n_features)
|
94 |
+
Input data.
|
95 |
+
|
96 |
+
metric : str, default='euclidean'
|
97 |
+
The distance metric to use.
|
98 |
+
For a list of available metrics, see the documentation of
|
99 |
+
:class:`~sklearn.metrics.DistanceMetric`.
|
100 |
+
|
101 |
+
Returns
|
102 |
+
-------
|
103 |
+
True if the dispatcher can be used, else False.
|
104 |
+
"""
|
105 |
+
|
106 |
+
# FIXME: the current Cython implementation is too slow for a large number of
|
107 |
+
# features. We temporarily disable it to fallback on SciPy's implementation.
|
108 |
+
# See: https://github.com/scikit-learn/scikit-learn/issues/28191
|
109 |
+
if (
|
110 |
+
issparse(X)
|
111 |
+
and issparse(Y)
|
112 |
+
and isinstance(metric, str)
|
113 |
+
and "euclidean" in metric
|
114 |
+
):
|
115 |
+
return False
|
116 |
+
|
117 |
+
def is_numpy_c_ordered(X):
|
118 |
+
return hasattr(X, "flags") and getattr(X.flags, "c_contiguous", False)
|
119 |
+
|
120 |
+
def is_valid_sparse_matrix(X):
|
121 |
+
return (
|
122 |
+
issparse(X)
|
123 |
+
and X.format == "csr"
|
124 |
+
and
|
125 |
+
# TODO: support CSR matrices without non-zeros elements
|
126 |
+
X.nnz > 0
|
127 |
+
and
|
128 |
+
# TODO: support CSR matrices with int64 indices and indptr
|
129 |
+
# See: https://github.com/scikit-learn/scikit-learn/issues/23653
|
130 |
+
X.indices.dtype == X.indptr.dtype == np.int32
|
131 |
+
)
|
132 |
+
|
133 |
+
is_usable = (
|
134 |
+
get_config().get("enable_cython_pairwise_dist", True)
|
135 |
+
and (is_numpy_c_ordered(X) or is_valid_sparse_matrix(X))
|
136 |
+
and (is_numpy_c_ordered(Y) or is_valid_sparse_matrix(Y))
|
137 |
+
and X.dtype == Y.dtype
|
138 |
+
and X.dtype in (np.float32, np.float64)
|
139 |
+
and (metric in cls.valid_metrics() or isinstance(metric, DistanceMetric))
|
140 |
+
)
|
141 |
+
|
142 |
+
return is_usable
|
143 |
+
|
144 |
+
@classmethod
|
145 |
+
@abstractmethod
|
146 |
+
def compute(
|
147 |
+
cls,
|
148 |
+
X,
|
149 |
+
Y,
|
150 |
+
**kwargs,
|
151 |
+
):
|
152 |
+
"""Compute the reduction.
|
153 |
+
|
154 |
+
Parameters
|
155 |
+
----------
|
156 |
+
X : ndarray or CSR matrix of shape (n_samples_X, n_features)
|
157 |
+
Input data.
|
158 |
+
|
159 |
+
Y : ndarray or CSR matrix of shape (n_samples_Y, n_features)
|
160 |
+
Input data.
|
161 |
+
|
162 |
+
**kwargs : additional parameters for the reduction
|
163 |
+
|
164 |
+
Notes
|
165 |
+
-----
|
166 |
+
This method is an abstract class method: it has to be implemented
|
167 |
+
for all subclasses.
|
168 |
+
"""
|
169 |
+
|
170 |
+
|
171 |
+
class ArgKmin(BaseDistancesReductionDispatcher):
|
172 |
+
"""Compute the argkmin of row vectors of X on the ones of Y.
|
173 |
+
|
174 |
+
For each row vector of X, computes the indices of k first the rows
|
175 |
+
vectors of Y with the smallest distances.
|
176 |
+
|
177 |
+
ArgKmin is typically used to perform
|
178 |
+
bruteforce k-nearest neighbors queries.
|
179 |
+
|
180 |
+
This class is not meant to be instantiated, one should only use
|
181 |
+
its :meth:`compute` classmethod which handles allocation and
|
182 |
+
deallocation consistently.
|
183 |
+
"""
|
184 |
+
|
185 |
+
@classmethod
|
186 |
+
def compute(
|
187 |
+
cls,
|
188 |
+
X,
|
189 |
+
Y,
|
190 |
+
k,
|
191 |
+
metric="euclidean",
|
192 |
+
chunk_size=None,
|
193 |
+
metric_kwargs=None,
|
194 |
+
strategy=None,
|
195 |
+
return_distance=False,
|
196 |
+
):
|
197 |
+
"""Compute the argkmin reduction.
|
198 |
+
|
199 |
+
Parameters
|
200 |
+
----------
|
201 |
+
X : ndarray or CSR matrix of shape (n_samples_X, n_features)
|
202 |
+
Input data.
|
203 |
+
|
204 |
+
Y : ndarray or CSR matrix of shape (n_samples_Y, n_features)
|
205 |
+
Input data.
|
206 |
+
|
207 |
+
k : int
|
208 |
+
The k for the argkmin reduction.
|
209 |
+
|
210 |
+
metric : str, default='euclidean'
|
211 |
+
The distance metric to use for argkmin.
|
212 |
+
For a list of available metrics, see the documentation of
|
213 |
+
:class:`~sklearn.metrics.DistanceMetric`.
|
214 |
+
|
215 |
+
chunk_size : int, default=None,
|
216 |
+
The number of vectors per chunk. If None (default) looks-up in
|
217 |
+
scikit-learn configuration for `pairwise_dist_chunk_size`,
|
218 |
+
and use 256 if it is not set.
|
219 |
+
|
220 |
+
metric_kwargs : dict, default=None
|
221 |
+
Keyword arguments to pass to specified metric function.
|
222 |
+
|
223 |
+
strategy : str, {'auto', 'parallel_on_X', 'parallel_on_Y'}, default=None
|
224 |
+
The chunking strategy defining which dataset parallelization are made on.
|
225 |
+
|
226 |
+
For both strategies the computations happens with two nested loops,
|
227 |
+
respectively on chunks of X and chunks of Y.
|
228 |
+
Strategies differs on which loop (outer or inner) is made to run
|
229 |
+
in parallel with the Cython `prange` construct:
|
230 |
+
|
231 |
+
- 'parallel_on_X' dispatches chunks of X uniformly on threads.
|
232 |
+
Each thread then iterates on all the chunks of Y. This strategy is
|
233 |
+
embarrassingly parallel and comes with no datastructures
|
234 |
+
synchronisation.
|
235 |
+
|
236 |
+
- 'parallel_on_Y' dispatches chunks of Y uniformly on threads.
|
237 |
+
Each thread processes all the chunks of X in turn. This strategy is
|
238 |
+
a sequence of embarrassingly parallel subtasks (the inner loop on Y
|
239 |
+
chunks) with intermediate datastructures synchronisation at each
|
240 |
+
iteration of the sequential outer loop on X chunks.
|
241 |
+
|
242 |
+
- 'auto' relies on a simple heuristic to choose between
|
243 |
+
'parallel_on_X' and 'parallel_on_Y': when `X.shape[0]` is large enough,
|
244 |
+
'parallel_on_X' is usually the most efficient strategy.
|
245 |
+
When `X.shape[0]` is small but `Y.shape[0]` is large, 'parallel_on_Y'
|
246 |
+
brings more opportunity for parallelism and is therefore more efficient
|
247 |
+
|
248 |
+
- None (default) looks-up in scikit-learn configuration for
|
249 |
+
`pairwise_dist_parallel_strategy`, and use 'auto' if it is not set.
|
250 |
+
|
251 |
+
return_distance : boolean, default=False
|
252 |
+
Return distances between each X vector and its
|
253 |
+
argkmin if set to True.
|
254 |
+
|
255 |
+
Returns
|
256 |
+
-------
|
257 |
+
If return_distance=False:
|
258 |
+
- argkmin_indices : ndarray of shape (n_samples_X, k)
|
259 |
+
Indices of the argkmin for each vector in X.
|
260 |
+
|
261 |
+
If return_distance=True:
|
262 |
+
- argkmin_distances : ndarray of shape (n_samples_X, k)
|
263 |
+
Distances to the argkmin for each vector in X.
|
264 |
+
- argkmin_indices : ndarray of shape (n_samples_X, k)
|
265 |
+
Indices of the argkmin for each vector in X.
|
266 |
+
|
267 |
+
Notes
|
268 |
+
-----
|
269 |
+
This classmethod inspects the arguments values to dispatch to the
|
270 |
+
dtype-specialized implementation of :class:`ArgKmin`.
|
271 |
+
|
272 |
+
This allows decoupling the API entirely from the implementation details
|
273 |
+
whilst maintaining RAII: all temporarily allocated datastructures necessary
|
274 |
+
for the concrete implementation are therefore freed when this classmethod
|
275 |
+
returns.
|
276 |
+
"""
|
277 |
+
if X.dtype == Y.dtype == np.float64:
|
278 |
+
return ArgKmin64.compute(
|
279 |
+
X=X,
|
280 |
+
Y=Y,
|
281 |
+
k=k,
|
282 |
+
metric=metric,
|
283 |
+
chunk_size=chunk_size,
|
284 |
+
metric_kwargs=metric_kwargs,
|
285 |
+
strategy=strategy,
|
286 |
+
return_distance=return_distance,
|
287 |
+
)
|
288 |
+
|
289 |
+
if X.dtype == Y.dtype == np.float32:
|
290 |
+
return ArgKmin32.compute(
|
291 |
+
X=X,
|
292 |
+
Y=Y,
|
293 |
+
k=k,
|
294 |
+
metric=metric,
|
295 |
+
chunk_size=chunk_size,
|
296 |
+
metric_kwargs=metric_kwargs,
|
297 |
+
strategy=strategy,
|
298 |
+
return_distance=return_distance,
|
299 |
+
)
|
300 |
+
|
301 |
+
raise ValueError(
|
302 |
+
"Only float64 or float32 datasets pairs are supported at this time, "
|
303 |
+
f"got: X.dtype={X.dtype} and Y.dtype={Y.dtype}."
|
304 |
+
)
|
305 |
+
|
306 |
+
|
307 |
+
class RadiusNeighbors(BaseDistancesReductionDispatcher):
|
308 |
+
"""Compute radius-based neighbors for two sets of vectors.
|
309 |
+
|
310 |
+
For each row-vector X[i] of the queries X, find all the indices j of
|
311 |
+
row-vectors in Y such that:
|
312 |
+
|
313 |
+
dist(X[i], Y[j]) <= radius
|
314 |
+
|
315 |
+
The distance function `dist` depends on the values of the `metric`
|
316 |
+
and `metric_kwargs` parameters.
|
317 |
+
|
318 |
+
This class is not meant to be instantiated, one should only use
|
319 |
+
its :meth:`compute` classmethod which handles allocation and
|
320 |
+
deallocation consistently.
|
321 |
+
"""
|
322 |
+
|
323 |
+
@classmethod
|
324 |
+
def compute(
|
325 |
+
cls,
|
326 |
+
X,
|
327 |
+
Y,
|
328 |
+
radius,
|
329 |
+
metric="euclidean",
|
330 |
+
chunk_size=None,
|
331 |
+
metric_kwargs=None,
|
332 |
+
strategy=None,
|
333 |
+
return_distance=False,
|
334 |
+
sort_results=False,
|
335 |
+
):
|
336 |
+
"""Return the results of the reduction for the given arguments.
|
337 |
+
|
338 |
+
Parameters
|
339 |
+
----------
|
340 |
+
X : ndarray or CSR matrix of shape (n_samples_X, n_features)
|
341 |
+
Input data.
|
342 |
+
|
343 |
+
Y : ndarray or CSR matrix of shape (n_samples_Y, n_features)
|
344 |
+
Input data.
|
345 |
+
|
346 |
+
radius : float
|
347 |
+
The radius defining the neighborhood.
|
348 |
+
|
349 |
+
metric : str, default='euclidean'
|
350 |
+
The distance metric to use.
|
351 |
+
For a list of available metrics, see the documentation of
|
352 |
+
:class:`~sklearn.metrics.DistanceMetric`.
|
353 |
+
|
354 |
+
chunk_size : int, default=None,
|
355 |
+
The number of vectors per chunk. If None (default) looks-up in
|
356 |
+
scikit-learn configuration for `pairwise_dist_chunk_size`,
|
357 |
+
and use 256 if it is not set.
|
358 |
+
|
359 |
+
metric_kwargs : dict, default=None
|
360 |
+
Keyword arguments to pass to specified metric function.
|
361 |
+
|
362 |
+
strategy : str, {'auto', 'parallel_on_X', 'parallel_on_Y'}, default=None
|
363 |
+
The chunking strategy defining which dataset parallelization are made on.
|
364 |
+
|
365 |
+
For both strategies the computations happens with two nested loops,
|
366 |
+
respectively on chunks of X and chunks of Y.
|
367 |
+
Strategies differs on which loop (outer or inner) is made to run
|
368 |
+
in parallel with the Cython `prange` construct:
|
369 |
+
|
370 |
+
- 'parallel_on_X' dispatches chunks of X uniformly on threads.
|
371 |
+
Each thread then iterates on all the chunks of Y. This strategy is
|
372 |
+
embarrassingly parallel and comes with no datastructures
|
373 |
+
synchronisation.
|
374 |
+
|
375 |
+
- 'parallel_on_Y' dispatches chunks of Y uniformly on threads.
|
376 |
+
Each thread processes all the chunks of X in turn. This strategy is
|
377 |
+
a sequence of embarrassingly parallel subtasks (the inner loop on Y
|
378 |
+
chunks) with intermediate datastructures synchronisation at each
|
379 |
+
iteration of the sequential outer loop on X chunks.
|
380 |
+
|
381 |
+
- 'auto' relies on a simple heuristic to choose between
|
382 |
+
'parallel_on_X' and 'parallel_on_Y': when `X.shape[0]` is large enough,
|
383 |
+
'parallel_on_X' is usually the most efficient strategy.
|
384 |
+
When `X.shape[0]` is small but `Y.shape[0]` is large, 'parallel_on_Y'
|
385 |
+
brings more opportunity for parallelism and is therefore more efficient
|
386 |
+
despite the synchronization step at each iteration of the outer loop
|
387 |
+
on chunks of `X`.
|
388 |
+
|
389 |
+
- None (default) looks-up in scikit-learn configuration for
|
390 |
+
`pairwise_dist_parallel_strategy`, and use 'auto' if it is not set.
|
391 |
+
|
392 |
+
return_distance : boolean, default=False
|
393 |
+
Return distances between each X vector and its neighbors if set to True.
|
394 |
+
|
395 |
+
sort_results : boolean, default=False
|
396 |
+
Sort results with respect to distances between each X vector and its
|
397 |
+
neighbors if set to True.
|
398 |
+
|
399 |
+
Returns
|
400 |
+
-------
|
401 |
+
If return_distance=False:
|
402 |
+
- neighbors_indices : ndarray of n_samples_X ndarray
|
403 |
+
Indices of the neighbors for each vector in X.
|
404 |
+
|
405 |
+
If return_distance=True:
|
406 |
+
- neighbors_indices : ndarray of n_samples_X ndarray
|
407 |
+
Indices of the neighbors for each vector in X.
|
408 |
+
- neighbors_distances : ndarray of n_samples_X ndarray
|
409 |
+
Distances to the neighbors for each vector in X.
|
410 |
+
|
411 |
+
Notes
|
412 |
+
-----
|
413 |
+
This classmethod inspects the arguments values to dispatch to the
|
414 |
+
dtype-specialized implementation of :class:`RadiusNeighbors`.
|
415 |
+
|
416 |
+
This allows decoupling the API entirely from the implementation details
|
417 |
+
whilst maintaining RAII: all temporarily allocated datastructures necessary
|
418 |
+
for the concrete implementation are therefore freed when this classmethod
|
419 |
+
returns.
|
420 |
+
"""
|
421 |
+
if X.dtype == Y.dtype == np.float64:
|
422 |
+
return RadiusNeighbors64.compute(
|
423 |
+
X=X,
|
424 |
+
Y=Y,
|
425 |
+
radius=radius,
|
426 |
+
metric=metric,
|
427 |
+
chunk_size=chunk_size,
|
428 |
+
metric_kwargs=metric_kwargs,
|
429 |
+
strategy=strategy,
|
430 |
+
sort_results=sort_results,
|
431 |
+
return_distance=return_distance,
|
432 |
+
)
|
433 |
+
|
434 |
+
if X.dtype == Y.dtype == np.float32:
|
435 |
+
return RadiusNeighbors32.compute(
|
436 |
+
X=X,
|
437 |
+
Y=Y,
|
438 |
+
radius=radius,
|
439 |
+
metric=metric,
|
440 |
+
chunk_size=chunk_size,
|
441 |
+
metric_kwargs=metric_kwargs,
|
442 |
+
strategy=strategy,
|
443 |
+
sort_results=sort_results,
|
444 |
+
return_distance=return_distance,
|
445 |
+
)
|
446 |
+
|
447 |
+
raise ValueError(
|
448 |
+
"Only float64 or float32 datasets pairs are supported at this time, "
|
449 |
+
f"got: X.dtype={X.dtype} and Y.dtype={Y.dtype}."
|
450 |
+
)
|
451 |
+
|
452 |
+
|
453 |
+
class ArgKminClassMode(BaseDistancesReductionDispatcher):
|
454 |
+
"""Compute the argkmin of row vectors of X on the ones of Y with labels.
|
455 |
+
|
456 |
+
For each row vector of X, computes the indices of k first the rows
|
457 |
+
vectors of Y with the smallest distances. Computes weighted mode of labels.
|
458 |
+
|
459 |
+
ArgKminClassMode is typically used to perform bruteforce k-nearest neighbors
|
460 |
+
queries when the weighted mode of the labels for the k-nearest neighbors
|
461 |
+
are required, such as in `predict` methods.
|
462 |
+
|
463 |
+
This class is not meant to be instantiated, one should only use
|
464 |
+
its :meth:`compute` classmethod which handles allocation and
|
465 |
+
deallocation consistently.
|
466 |
+
"""
|
467 |
+
|
468 |
+
@classmethod
|
469 |
+
def valid_metrics(cls) -> List[str]:
|
470 |
+
excluded = {
|
471 |
+
# Euclidean is technically usable for ArgKminClassMode
|
472 |
+
# but its current implementation would not be competitive.
|
473 |
+
# TODO: implement Euclidean specialization using GEMM.
|
474 |
+
"euclidean",
|
475 |
+
"sqeuclidean",
|
476 |
+
}
|
477 |
+
return list(set(BaseDistancesReductionDispatcher.valid_metrics()) - excluded)
|
478 |
+
|
479 |
+
@classmethod
|
480 |
+
def compute(
|
481 |
+
cls,
|
482 |
+
X,
|
483 |
+
Y,
|
484 |
+
k,
|
485 |
+
weights,
|
486 |
+
Y_labels,
|
487 |
+
unique_Y_labels,
|
488 |
+
metric="euclidean",
|
489 |
+
chunk_size=None,
|
490 |
+
metric_kwargs=None,
|
491 |
+
strategy=None,
|
492 |
+
):
|
493 |
+
"""Compute the argkmin reduction.
|
494 |
+
|
495 |
+
Parameters
|
496 |
+
----------
|
497 |
+
X : ndarray of shape (n_samples_X, n_features)
|
498 |
+
The input array to be labelled.
|
499 |
+
|
500 |
+
Y : ndarray of shape (n_samples_Y, n_features)
|
501 |
+
The input array whose class membership are provided through the
|
502 |
+
`Y_labels` parameter.
|
503 |
+
|
504 |
+
k : int
|
505 |
+
The number of nearest neighbors to consider.
|
506 |
+
|
507 |
+
weights : ndarray
|
508 |
+
The weights applied over the `Y_labels` of `Y` when computing the
|
509 |
+
weighted mode of the labels.
|
510 |
+
|
511 |
+
Y_labels : ndarray
|
512 |
+
An array containing the index of the class membership of the
|
513 |
+
associated samples in `Y`. This is used in labeling `X`.
|
514 |
+
|
515 |
+
unique_Y_labels : ndarray
|
516 |
+
An array containing all unique indices contained in the
|
517 |
+
corresponding `Y_labels` array.
|
518 |
+
|
519 |
+
metric : str, default='euclidean'
|
520 |
+
The distance metric to use. For a list of available metrics, see
|
521 |
+
the documentation of :class:`~sklearn.metrics.DistanceMetric`.
|
522 |
+
Currently does not support `'precomputed'`.
|
523 |
+
|
524 |
+
chunk_size : int, default=None,
|
525 |
+
The number of vectors per chunk. If None (default) looks-up in
|
526 |
+
scikit-learn configuration for `pairwise_dist_chunk_size`,
|
527 |
+
and use 256 if it is not set.
|
528 |
+
|
529 |
+
metric_kwargs : dict, default=None
|
530 |
+
Keyword arguments to pass to specified metric function.
|
531 |
+
|
532 |
+
strategy : str, {'auto', 'parallel_on_X', 'parallel_on_Y'}, default=None
|
533 |
+
The chunking strategy defining which dataset parallelization are made on.
|
534 |
+
|
535 |
+
For both strategies the computations happens with two nested loops,
|
536 |
+
respectively on chunks of X and chunks of Y.
|
537 |
+
Strategies differs on which loop (outer or inner) is made to run
|
538 |
+
in parallel with the Cython `prange` construct:
|
539 |
+
|
540 |
+
- 'parallel_on_X' dispatches chunks of X uniformly on threads.
|
541 |
+
Each thread then iterates on all the chunks of Y. This strategy is
|
542 |
+
embarrassingly parallel and comes with no datastructures
|
543 |
+
synchronisation.
|
544 |
+
|
545 |
+
- 'parallel_on_Y' dispatches chunks of Y uniformly on threads.
|
546 |
+
Each thread processes all the chunks of X in turn. This strategy is
|
547 |
+
a sequence of embarrassingly parallel subtasks (the inner loop on Y
|
548 |
+
chunks) with intermediate datastructures synchronisation at each
|
549 |
+
iteration of the sequential outer loop on X chunks.
|
550 |
+
|
551 |
+
- 'auto' relies on a simple heuristic to choose between
|
552 |
+
'parallel_on_X' and 'parallel_on_Y': when `X.shape[0]` is large enough,
|
553 |
+
'parallel_on_X' is usually the most efficient strategy.
|
554 |
+
When `X.shape[0]` is small but `Y.shape[0]` is large, 'parallel_on_Y'
|
555 |
+
brings more opportunity for parallelism and is therefore more efficient
|
556 |
+
despite the synchronization step at each iteration of the outer loop
|
557 |
+
on chunks of `X`.
|
558 |
+
|
559 |
+
- None (default) looks-up in scikit-learn configuration for
|
560 |
+
`pairwise_dist_parallel_strategy`, and use 'auto' if it is not set.
|
561 |
+
|
562 |
+
Returns
|
563 |
+
-------
|
564 |
+
probabilities : ndarray of shape (n_samples_X, n_classes)
|
565 |
+
An array containing the class probabilities for each sample.
|
566 |
+
|
567 |
+
Notes
|
568 |
+
-----
|
569 |
+
This classmethod is responsible for introspecting the arguments
|
570 |
+
values to dispatch to the most appropriate implementation of
|
571 |
+
:class:`PairwiseDistancesArgKmin`.
|
572 |
+
|
573 |
+
This allows decoupling the API entirely from the implementation details
|
574 |
+
whilst maintaining RAII: all temporarily allocated datastructures necessary
|
575 |
+
for the concrete implementation are therefore freed when this classmethod
|
576 |
+
returns.
|
577 |
+
"""
|
578 |
+
if weights not in {"uniform", "distance"}:
|
579 |
+
raise ValueError(
|
580 |
+
"Only the 'uniform' or 'distance' weights options are supported"
|
581 |
+
f" at this time. Got: {weights=}."
|
582 |
+
)
|
583 |
+
if X.dtype == Y.dtype == np.float64:
|
584 |
+
return ArgKminClassMode64.compute(
|
585 |
+
X=X,
|
586 |
+
Y=Y,
|
587 |
+
k=k,
|
588 |
+
weights=weights,
|
589 |
+
Y_labels=np.array(Y_labels, dtype=np.intp),
|
590 |
+
unique_Y_labels=np.array(unique_Y_labels, dtype=np.intp),
|
591 |
+
metric=metric,
|
592 |
+
chunk_size=chunk_size,
|
593 |
+
metric_kwargs=metric_kwargs,
|
594 |
+
strategy=strategy,
|
595 |
+
)
|
596 |
+
|
597 |
+
if X.dtype == Y.dtype == np.float32:
|
598 |
+
return ArgKminClassMode32.compute(
|
599 |
+
X=X,
|
600 |
+
Y=Y,
|
601 |
+
k=k,
|
602 |
+
weights=weights,
|
603 |
+
Y_labels=np.array(Y_labels, dtype=np.intp),
|
604 |
+
unique_Y_labels=np.array(unique_Y_labels, dtype=np.intp),
|
605 |
+
metric=metric,
|
606 |
+
chunk_size=chunk_size,
|
607 |
+
metric_kwargs=metric_kwargs,
|
608 |
+
strategy=strategy,
|
609 |
+
)
|
610 |
+
|
611 |
+
raise ValueError(
|
612 |
+
"Only float64 or float32 datasets pairs are supported at this time, "
|
613 |
+
f"got: X.dtype={X.dtype} and Y.dtype={Y.dtype}."
|
614 |
+
)
|
615 |
+
|
616 |
+
|
617 |
+
class RadiusNeighborsClassMode(BaseDistancesReductionDispatcher):
|
618 |
+
"""Compute radius-based class modes of row vectors of X using the
|
619 |
+
those of Y.
|
620 |
+
|
621 |
+
For each row-vector X[i] of the queries X, find all the indices j of
|
622 |
+
row-vectors in Y such that:
|
623 |
+
|
624 |
+
dist(X[i], Y[j]) <= radius
|
625 |
+
|
626 |
+
RadiusNeighborsClassMode is typically used to perform bruteforce
|
627 |
+
radius neighbors queries when the weighted mode of the labels for
|
628 |
+
the nearest neighbors within the specified radius are required,
|
629 |
+
such as in `predict` methods.
|
630 |
+
|
631 |
+
This class is not meant to be instantiated, one should only use
|
632 |
+
its :meth:`compute` classmethod which handles allocation and
|
633 |
+
deallocation consistently.
|
634 |
+
"""
|
635 |
+
|
636 |
+
@classmethod
|
637 |
+
def valid_metrics(cls) -> List[str]:
|
638 |
+
excluded = {
|
639 |
+
# Euclidean is technically usable for RadiusNeighborsClassMode
|
640 |
+
# but it would not be competitive.
|
641 |
+
# TODO: implement Euclidean specialization using GEMM.
|
642 |
+
"euclidean",
|
643 |
+
"sqeuclidean",
|
644 |
+
}
|
645 |
+
return sorted(set(BaseDistancesReductionDispatcher.valid_metrics()) - excluded)
|
646 |
+
|
647 |
+
@classmethod
|
648 |
+
def compute(
|
649 |
+
cls,
|
650 |
+
X,
|
651 |
+
Y,
|
652 |
+
radius,
|
653 |
+
weights,
|
654 |
+
Y_labels,
|
655 |
+
unique_Y_labels,
|
656 |
+
outlier_label,
|
657 |
+
metric="euclidean",
|
658 |
+
chunk_size=None,
|
659 |
+
metric_kwargs=None,
|
660 |
+
strategy=None,
|
661 |
+
):
|
662 |
+
"""Return the results of the reduction for the given arguments.
|
663 |
+
Parameters
|
664 |
+
----------
|
665 |
+
X : ndarray of shape (n_samples_X, n_features)
|
666 |
+
The input array to be labelled.
|
667 |
+
Y : ndarray of shape (n_samples_Y, n_features)
|
668 |
+
The input array whose class membership is provided through
|
669 |
+
the `Y_labels` parameter.
|
670 |
+
radius : float
|
671 |
+
The radius defining the neighborhood.
|
672 |
+
weights : ndarray
|
673 |
+
The weights applied to the `Y_labels` when computing the
|
674 |
+
weighted mode of the labels.
|
675 |
+
Y_labels : ndarray
|
676 |
+
An array containing the index of the class membership of the
|
677 |
+
associated samples in `Y`. This is used in labeling `X`.
|
678 |
+
unique_Y_labels : ndarray
|
679 |
+
An array containing all unique class labels.
|
680 |
+
outlier_label : int, default=None
|
681 |
+
Label for outlier samples (samples with no neighbors in given
|
682 |
+
radius). In the default case when the value is None if any
|
683 |
+
outlier is detected, a ValueError will be raised. The outlier
|
684 |
+
label should be selected from among the unique 'Y' labels. If
|
685 |
+
it is specified with a different value a warning will be raised
|
686 |
+
and all class probabilities of outliers will be assigned to be 0.
|
687 |
+
metric : str, default='euclidean'
|
688 |
+
The distance metric to use. For a list of available metrics, see
|
689 |
+
the documentation of :class:`~sklearn.metrics.DistanceMetric`.
|
690 |
+
Currently does not support `'precomputed'`.
|
691 |
+
chunk_size : int, default=None,
|
692 |
+
The number of vectors per chunk. If None (default) looks-up in
|
693 |
+
scikit-learn configuration for `pairwise_dist_chunk_size`,
|
694 |
+
and use 256 if it is not set.
|
695 |
+
metric_kwargs : dict, default=None
|
696 |
+
Keyword arguments to pass to specified metric function.
|
697 |
+
strategy : str, {'auto', 'parallel_on_X', 'parallel_on_Y'}, default=None
|
698 |
+
The chunking strategy defining which dataset parallelization are made on.
|
699 |
+
For both strategies the computations happens with two nested loops,
|
700 |
+
respectively on chunks of X and chunks of Y.
|
701 |
+
Strategies differs on which loop (outer or inner) is made to run
|
702 |
+
in parallel with the Cython `prange` construct:
|
703 |
+
- 'parallel_on_X' dispatches chunks of X uniformly on threads.
|
704 |
+
Each thread then iterates on all the chunks of Y. This strategy is
|
705 |
+
embarrassingly parallel and comes with no datastructures
|
706 |
+
synchronisation.
|
707 |
+
- 'parallel_on_Y' dispatches chunks of Y uniformly on threads.
|
708 |
+
Each thread processes all the chunks of X in turn. This strategy is
|
709 |
+
a sequence of embarrassingly parallel subtasks (the inner loop on Y
|
710 |
+
chunks) with intermediate datastructures synchronisation at each
|
711 |
+
iteration of the sequential outer loop on X chunks.
|
712 |
+
- 'auto' relies on a simple heuristic to choose between
|
713 |
+
'parallel_on_X' and 'parallel_on_Y': when `X.shape[0]` is large enough,
|
714 |
+
'parallel_on_X' is usually the most efficient strategy.
|
715 |
+
When `X.shape[0]` is small but `Y.shape[0]` is large, 'parallel_on_Y'
|
716 |
+
brings more opportunity for parallelism and is therefore more efficient
|
717 |
+
despite the synchronization step at each iteration of the outer loop
|
718 |
+
on chunks of `X`.
|
719 |
+
- None (default) looks-up in scikit-learn configuration for
|
720 |
+
`pairwise_dist_parallel_strategy`, and use 'auto' if it is not set.
|
721 |
+
Returns
|
722 |
+
-------
|
723 |
+
probabilities : ndarray of shape (n_samples_X, n_classes)
|
724 |
+
An array containing the class probabilities for each sample.
|
725 |
+
"""
|
726 |
+
if weights not in {"uniform", "distance"}:
|
727 |
+
raise ValueError(
|
728 |
+
"Only the 'uniform' or 'distance' weights options are supported"
|
729 |
+
f" at this time. Got: {weights=}."
|
730 |
+
)
|
731 |
+
if X.dtype == Y.dtype == np.float64:
|
732 |
+
return RadiusNeighborsClassMode64.compute(
|
733 |
+
X=X,
|
734 |
+
Y=Y,
|
735 |
+
radius=radius,
|
736 |
+
weights=weights,
|
737 |
+
Y_labels=np.array(Y_labels, dtype=np.intp),
|
738 |
+
unique_Y_labels=np.array(unique_Y_labels, dtype=np.intp),
|
739 |
+
outlier_label=outlier_label,
|
740 |
+
metric=metric,
|
741 |
+
chunk_size=chunk_size,
|
742 |
+
metric_kwargs=metric_kwargs,
|
743 |
+
strategy=strategy,
|
744 |
+
)
|
745 |
+
|
746 |
+
if X.dtype == Y.dtype == np.float32:
|
747 |
+
return RadiusNeighborsClassMode32.compute(
|
748 |
+
X=X,
|
749 |
+
Y=Y,
|
750 |
+
radius=radius,
|
751 |
+
weights=weights,
|
752 |
+
Y_labels=np.array(Y_labels, dtype=np.intp),
|
753 |
+
unique_Y_labels=np.array(unique_Y_labels, dtype=np.intp),
|
754 |
+
outlier_label=outlier_label,
|
755 |
+
metric=metric,
|
756 |
+
chunk_size=chunk_size,
|
757 |
+
metric_kwargs=metric_kwargs,
|
758 |
+
strategy=strategy,
|
759 |
+
)
|
760 |
+
|
761 |
+
raise ValueError(
|
762 |
+
"Only float64 or float32 datasets pairs are supported at this time, "
|
763 |
+
f"got: X.dtype={X.dtype} and Y.dtype={Y.dtype}."
|
764 |
+
)
|
venv/lib/python3.10/site-packages/sklearn/metrics/_pairwise_distances_reduction/_middle_term_computer.cpython-310-x86_64-linux-gnu.so
ADDED
Binary file (510 kB). View file
|
|
venv/lib/python3.10/site-packages/sklearn/metrics/_pairwise_distances_reduction/_middle_term_computer.pxd
ADDED
@@ -0,0 +1,396 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# WARNING: Do not edit this file directly.
|
2 |
+
# It is automatically generated from 'sklearn/metrics/_pairwise_distances_reduction/_middle_term_computer.pxd.tp'.
|
3 |
+
# Changes must be made there.
|
4 |
+
|
5 |
+
from libcpp.vector cimport vector
|
6 |
+
|
7 |
+
from ...utils._typedefs cimport float64_t, float32_t, int32_t, intp_t
|
8 |
+
|
9 |
+
|
10 |
+
cdef void _middle_term_sparse_sparse_64(
|
11 |
+
const float64_t[:] X_data,
|
12 |
+
const int32_t[:] X_indices,
|
13 |
+
const int32_t[:] X_indptr,
|
14 |
+
intp_t X_start,
|
15 |
+
intp_t X_end,
|
16 |
+
const float64_t[:] Y_data,
|
17 |
+
const int32_t[:] Y_indices,
|
18 |
+
const int32_t[:] Y_indptr,
|
19 |
+
intp_t Y_start,
|
20 |
+
intp_t Y_end,
|
21 |
+
float64_t * D,
|
22 |
+
) noexcept nogil
|
23 |
+
|
24 |
+
|
25 |
+
|
26 |
+
cdef class MiddleTermComputer64:
|
27 |
+
cdef:
|
28 |
+
intp_t effective_n_threads
|
29 |
+
intp_t chunks_n_threads
|
30 |
+
intp_t dist_middle_terms_chunks_size
|
31 |
+
intp_t n_features
|
32 |
+
intp_t chunk_size
|
33 |
+
|
34 |
+
# Buffers for the `-2 * X_c @ Y_c.T` term computed via GEMM
|
35 |
+
vector[vector[float64_t]] dist_middle_terms_chunks
|
36 |
+
|
37 |
+
cdef void _parallel_on_X_pre_compute_and_reduce_distances_on_chunks(
|
38 |
+
self,
|
39 |
+
intp_t X_start,
|
40 |
+
intp_t X_end,
|
41 |
+
intp_t Y_start,
|
42 |
+
intp_t Y_end,
|
43 |
+
intp_t thread_num,
|
44 |
+
) noexcept nogil
|
45 |
+
|
46 |
+
cdef void _parallel_on_X_parallel_init(self, intp_t thread_num) noexcept nogil
|
47 |
+
|
48 |
+
cdef void _parallel_on_X_init_chunk(
|
49 |
+
self,
|
50 |
+
intp_t thread_num,
|
51 |
+
intp_t X_start,
|
52 |
+
intp_t X_end,
|
53 |
+
) noexcept nogil
|
54 |
+
|
55 |
+
cdef void _parallel_on_Y_init(self) noexcept nogil
|
56 |
+
|
57 |
+
cdef void _parallel_on_Y_parallel_init(
|
58 |
+
self,
|
59 |
+
intp_t thread_num,
|
60 |
+
intp_t X_start,
|
61 |
+
intp_t X_end,
|
62 |
+
) noexcept nogil
|
63 |
+
|
64 |
+
cdef void _parallel_on_Y_pre_compute_and_reduce_distances_on_chunks(
|
65 |
+
self,
|
66 |
+
intp_t X_start,
|
67 |
+
intp_t X_end,
|
68 |
+
intp_t Y_start,
|
69 |
+
intp_t Y_end,
|
70 |
+
intp_t thread_num
|
71 |
+
) noexcept nogil
|
72 |
+
|
73 |
+
cdef float64_t * _compute_dist_middle_terms(
|
74 |
+
self,
|
75 |
+
intp_t X_start,
|
76 |
+
intp_t X_end,
|
77 |
+
intp_t Y_start,
|
78 |
+
intp_t Y_end,
|
79 |
+
intp_t thread_num,
|
80 |
+
) noexcept nogil
|
81 |
+
|
82 |
+
|
83 |
+
cdef class DenseDenseMiddleTermComputer64(MiddleTermComputer64):
|
84 |
+
cdef:
|
85 |
+
const float64_t[:, ::1] X
|
86 |
+
const float64_t[:, ::1] Y
|
87 |
+
|
88 |
+
|
89 |
+
cdef void _parallel_on_X_pre_compute_and_reduce_distances_on_chunks(
|
90 |
+
self,
|
91 |
+
intp_t X_start,
|
92 |
+
intp_t X_end,
|
93 |
+
intp_t Y_start,
|
94 |
+
intp_t Y_end,
|
95 |
+
intp_t thread_num,
|
96 |
+
) noexcept nogil
|
97 |
+
|
98 |
+
cdef void _parallel_on_X_init_chunk(
|
99 |
+
self,
|
100 |
+
intp_t thread_num,
|
101 |
+
intp_t X_start,
|
102 |
+
intp_t X_end,
|
103 |
+
) noexcept nogil
|
104 |
+
|
105 |
+
cdef void _parallel_on_Y_parallel_init(
|
106 |
+
self,
|
107 |
+
intp_t thread_num,
|
108 |
+
intp_t X_start,
|
109 |
+
intp_t X_end,
|
110 |
+
) noexcept nogil
|
111 |
+
|
112 |
+
cdef void _parallel_on_Y_pre_compute_and_reduce_distances_on_chunks(
|
113 |
+
self,
|
114 |
+
intp_t X_start,
|
115 |
+
intp_t X_end,
|
116 |
+
intp_t Y_start,
|
117 |
+
intp_t Y_end,
|
118 |
+
intp_t thread_num
|
119 |
+
) noexcept nogil
|
120 |
+
|
121 |
+
cdef float64_t * _compute_dist_middle_terms(
|
122 |
+
self,
|
123 |
+
intp_t X_start,
|
124 |
+
intp_t X_end,
|
125 |
+
intp_t Y_start,
|
126 |
+
intp_t Y_end,
|
127 |
+
intp_t thread_num,
|
128 |
+
) noexcept nogil
|
129 |
+
|
130 |
+
|
131 |
+
cdef class SparseSparseMiddleTermComputer64(MiddleTermComputer64):
|
132 |
+
cdef:
|
133 |
+
const float64_t[:] X_data
|
134 |
+
const int32_t[:] X_indices
|
135 |
+
const int32_t[:] X_indptr
|
136 |
+
|
137 |
+
const float64_t[:] Y_data
|
138 |
+
const int32_t[:] Y_indices
|
139 |
+
const int32_t[:] Y_indptr
|
140 |
+
|
141 |
+
cdef void _parallel_on_X_pre_compute_and_reduce_distances_on_chunks(
|
142 |
+
self,
|
143 |
+
intp_t X_start,
|
144 |
+
intp_t X_end,
|
145 |
+
intp_t Y_start,
|
146 |
+
intp_t Y_end,
|
147 |
+
intp_t thread_num
|
148 |
+
) noexcept nogil
|
149 |
+
|
150 |
+
cdef void _parallel_on_Y_pre_compute_and_reduce_distances_on_chunks(
|
151 |
+
self,
|
152 |
+
intp_t X_start,
|
153 |
+
intp_t X_end,
|
154 |
+
intp_t Y_start,
|
155 |
+
intp_t Y_end,
|
156 |
+
intp_t thread_num
|
157 |
+
) noexcept nogil
|
158 |
+
|
159 |
+
cdef float64_t * _compute_dist_middle_terms(
|
160 |
+
self,
|
161 |
+
intp_t X_start,
|
162 |
+
intp_t X_end,
|
163 |
+
intp_t Y_start,
|
164 |
+
intp_t Y_end,
|
165 |
+
intp_t thread_num,
|
166 |
+
) noexcept nogil
|
167 |
+
|
168 |
+
|
169 |
+
cdef class SparseDenseMiddleTermComputer64(MiddleTermComputer64):
|
170 |
+
cdef:
|
171 |
+
const float64_t[:] X_data
|
172 |
+
const int32_t[:] X_indices
|
173 |
+
const int32_t[:] X_indptr
|
174 |
+
|
175 |
+
const float64_t[:, ::1] Y
|
176 |
+
|
177 |
+
# We treat the dense-sparse case with the sparse-dense case by simply
|
178 |
+
# treating the dist_middle_terms as F-ordered and by swapping arguments.
|
179 |
+
# This attribute is meant to encode the case and adapt the logic
|
180 |
+
# accordingly.
|
181 |
+
bint c_ordered_middle_term
|
182 |
+
|
183 |
+
cdef void _parallel_on_X_pre_compute_and_reduce_distances_on_chunks(
|
184 |
+
self,
|
185 |
+
intp_t X_start,
|
186 |
+
intp_t X_end,
|
187 |
+
intp_t Y_start,
|
188 |
+
intp_t Y_end,
|
189 |
+
intp_t thread_num
|
190 |
+
) noexcept nogil
|
191 |
+
|
192 |
+
cdef void _parallel_on_Y_pre_compute_and_reduce_distances_on_chunks(
|
193 |
+
self,
|
194 |
+
intp_t X_start,
|
195 |
+
intp_t X_end,
|
196 |
+
intp_t Y_start,
|
197 |
+
intp_t Y_end,
|
198 |
+
intp_t thread_num
|
199 |
+
) noexcept nogil
|
200 |
+
|
201 |
+
cdef float64_t * _compute_dist_middle_terms(
|
202 |
+
self,
|
203 |
+
intp_t X_start,
|
204 |
+
intp_t X_end,
|
205 |
+
intp_t Y_start,
|
206 |
+
intp_t Y_end,
|
207 |
+
intp_t thread_num,
|
208 |
+
) noexcept nogil
|
209 |
+
|
210 |
+
|
211 |
+
cdef class MiddleTermComputer32:
|
212 |
+
cdef:
|
213 |
+
intp_t effective_n_threads
|
214 |
+
intp_t chunks_n_threads
|
215 |
+
intp_t dist_middle_terms_chunks_size
|
216 |
+
intp_t n_features
|
217 |
+
intp_t chunk_size
|
218 |
+
|
219 |
+
# Buffers for the `-2 * X_c @ Y_c.T` term computed via GEMM
|
220 |
+
vector[vector[float64_t]] dist_middle_terms_chunks
|
221 |
+
|
222 |
+
cdef void _parallel_on_X_pre_compute_and_reduce_distances_on_chunks(
|
223 |
+
self,
|
224 |
+
intp_t X_start,
|
225 |
+
intp_t X_end,
|
226 |
+
intp_t Y_start,
|
227 |
+
intp_t Y_end,
|
228 |
+
intp_t thread_num,
|
229 |
+
) noexcept nogil
|
230 |
+
|
231 |
+
cdef void _parallel_on_X_parallel_init(self, intp_t thread_num) noexcept nogil
|
232 |
+
|
233 |
+
cdef void _parallel_on_X_init_chunk(
|
234 |
+
self,
|
235 |
+
intp_t thread_num,
|
236 |
+
intp_t X_start,
|
237 |
+
intp_t X_end,
|
238 |
+
) noexcept nogil
|
239 |
+
|
240 |
+
cdef void _parallel_on_Y_init(self) noexcept nogil
|
241 |
+
|
242 |
+
cdef void _parallel_on_Y_parallel_init(
|
243 |
+
self,
|
244 |
+
intp_t thread_num,
|
245 |
+
intp_t X_start,
|
246 |
+
intp_t X_end,
|
247 |
+
) noexcept nogil
|
248 |
+
|
249 |
+
cdef void _parallel_on_Y_pre_compute_and_reduce_distances_on_chunks(
|
250 |
+
self,
|
251 |
+
intp_t X_start,
|
252 |
+
intp_t X_end,
|
253 |
+
intp_t Y_start,
|
254 |
+
intp_t Y_end,
|
255 |
+
intp_t thread_num
|
256 |
+
) noexcept nogil
|
257 |
+
|
258 |
+
cdef float64_t * _compute_dist_middle_terms(
|
259 |
+
self,
|
260 |
+
intp_t X_start,
|
261 |
+
intp_t X_end,
|
262 |
+
intp_t Y_start,
|
263 |
+
intp_t Y_end,
|
264 |
+
intp_t thread_num,
|
265 |
+
) noexcept nogil
|
266 |
+
|
267 |
+
|
268 |
+
cdef class DenseDenseMiddleTermComputer32(MiddleTermComputer32):
|
269 |
+
cdef:
|
270 |
+
const float32_t[:, ::1] X
|
271 |
+
const float32_t[:, ::1] Y
|
272 |
+
|
273 |
+
# Buffers for upcasting chunks of X and Y from 32bit to 64bit
|
274 |
+
vector[vector[float64_t]] X_c_upcast
|
275 |
+
vector[vector[float64_t]] Y_c_upcast
|
276 |
+
|
277 |
+
cdef void _parallel_on_X_pre_compute_and_reduce_distances_on_chunks(
|
278 |
+
self,
|
279 |
+
intp_t X_start,
|
280 |
+
intp_t X_end,
|
281 |
+
intp_t Y_start,
|
282 |
+
intp_t Y_end,
|
283 |
+
intp_t thread_num,
|
284 |
+
) noexcept nogil
|
285 |
+
|
286 |
+
cdef void _parallel_on_X_init_chunk(
|
287 |
+
self,
|
288 |
+
intp_t thread_num,
|
289 |
+
intp_t X_start,
|
290 |
+
intp_t X_end,
|
291 |
+
) noexcept nogil
|
292 |
+
|
293 |
+
cdef void _parallel_on_Y_parallel_init(
|
294 |
+
self,
|
295 |
+
intp_t thread_num,
|
296 |
+
intp_t X_start,
|
297 |
+
intp_t X_end,
|
298 |
+
) noexcept nogil
|
299 |
+
|
300 |
+
cdef void _parallel_on_Y_pre_compute_and_reduce_distances_on_chunks(
|
301 |
+
self,
|
302 |
+
intp_t X_start,
|
303 |
+
intp_t X_end,
|
304 |
+
intp_t Y_start,
|
305 |
+
intp_t Y_end,
|
306 |
+
intp_t thread_num
|
307 |
+
) noexcept nogil
|
308 |
+
|
309 |
+
cdef float64_t * _compute_dist_middle_terms(
|
310 |
+
self,
|
311 |
+
intp_t X_start,
|
312 |
+
intp_t X_end,
|
313 |
+
intp_t Y_start,
|
314 |
+
intp_t Y_end,
|
315 |
+
intp_t thread_num,
|
316 |
+
) noexcept nogil
|
317 |
+
|
318 |
+
|
319 |
+
cdef class SparseSparseMiddleTermComputer32(MiddleTermComputer32):
|
320 |
+
cdef:
|
321 |
+
const float64_t[:] X_data
|
322 |
+
const int32_t[:] X_indices
|
323 |
+
const int32_t[:] X_indptr
|
324 |
+
|
325 |
+
const float64_t[:] Y_data
|
326 |
+
const int32_t[:] Y_indices
|
327 |
+
const int32_t[:] Y_indptr
|
328 |
+
|
329 |
+
cdef void _parallel_on_X_pre_compute_and_reduce_distances_on_chunks(
|
330 |
+
self,
|
331 |
+
intp_t X_start,
|
332 |
+
intp_t X_end,
|
333 |
+
intp_t Y_start,
|
334 |
+
intp_t Y_end,
|
335 |
+
intp_t thread_num
|
336 |
+
) noexcept nogil
|
337 |
+
|
338 |
+
cdef void _parallel_on_Y_pre_compute_and_reduce_distances_on_chunks(
|
339 |
+
self,
|
340 |
+
intp_t X_start,
|
341 |
+
intp_t X_end,
|
342 |
+
intp_t Y_start,
|
343 |
+
intp_t Y_end,
|
344 |
+
intp_t thread_num
|
345 |
+
) noexcept nogil
|
346 |
+
|
347 |
+
cdef float64_t * _compute_dist_middle_terms(
|
348 |
+
self,
|
349 |
+
intp_t X_start,
|
350 |
+
intp_t X_end,
|
351 |
+
intp_t Y_start,
|
352 |
+
intp_t Y_end,
|
353 |
+
intp_t thread_num,
|
354 |
+
) noexcept nogil
|
355 |
+
|
356 |
+
|
357 |
+
cdef class SparseDenseMiddleTermComputer32(MiddleTermComputer32):
|
358 |
+
cdef:
|
359 |
+
const float64_t[:] X_data
|
360 |
+
const int32_t[:] X_indices
|
361 |
+
const int32_t[:] X_indptr
|
362 |
+
|
363 |
+
const float32_t[:, ::1] Y
|
364 |
+
|
365 |
+
# We treat the dense-sparse case with the sparse-dense case by simply
|
366 |
+
# treating the dist_middle_terms as F-ordered and by swapping arguments.
|
367 |
+
# This attribute is meant to encode the case and adapt the logic
|
368 |
+
# accordingly.
|
369 |
+
bint c_ordered_middle_term
|
370 |
+
|
371 |
+
cdef void _parallel_on_X_pre_compute_and_reduce_distances_on_chunks(
|
372 |
+
self,
|
373 |
+
intp_t X_start,
|
374 |
+
intp_t X_end,
|
375 |
+
intp_t Y_start,
|
376 |
+
intp_t Y_end,
|
377 |
+
intp_t thread_num
|
378 |
+
) noexcept nogil
|
379 |
+
|
380 |
+
cdef void _parallel_on_Y_pre_compute_and_reduce_distances_on_chunks(
|
381 |
+
self,
|
382 |
+
intp_t X_start,
|
383 |
+
intp_t X_end,
|
384 |
+
intp_t Y_start,
|
385 |
+
intp_t Y_end,
|
386 |
+
intp_t thread_num
|
387 |
+
) noexcept nogil
|
388 |
+
|
389 |
+
cdef float64_t * _compute_dist_middle_terms(
|
390 |
+
self,
|
391 |
+
intp_t X_start,
|
392 |
+
intp_t X_end,
|
393 |
+
intp_t Y_start,
|
394 |
+
intp_t Y_end,
|
395 |
+
intp_t thread_num,
|
396 |
+
) noexcept nogil
|
venv/lib/python3.10/site-packages/sklearn/metrics/_pairwise_distances_reduction/_radius_neighbors.cpython-310-x86_64-linux-gnu.so
ADDED
Binary file (393 kB). View file
|
|
venv/lib/python3.10/site-packages/sklearn/metrics/_pairwise_distances_reduction/_radius_neighbors.pxd
ADDED
@@ -0,0 +1,150 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# WARNING: Do not edit this file directly.
|
2 |
+
# It is automatically generated from 'sklearn/metrics/_pairwise_distances_reduction/_radius_neighbors.pxd.tp'.
|
3 |
+
# Changes must be made there.
|
4 |
+
|
5 |
+
cimport numpy as cnp
|
6 |
+
|
7 |
+
from libcpp.memory cimport shared_ptr
|
8 |
+
from libcpp.vector cimport vector
|
9 |
+
from cython cimport final
|
10 |
+
|
11 |
+
from ...utils._typedefs cimport intp_t, float64_t
|
12 |
+
|
13 |
+
cnp.import_array()
|
14 |
+
|
15 |
+
######################
|
16 |
+
## std::vector to np.ndarray coercion
|
17 |
+
# As type covariance is not supported for C++ containers via Cython,
|
18 |
+
# we need to redefine fused types.
|
19 |
+
ctypedef fused vector_double_intp_t:
|
20 |
+
vector[intp_t]
|
21 |
+
vector[float64_t]
|
22 |
+
|
23 |
+
|
24 |
+
ctypedef fused vector_vector_double_intp_t:
|
25 |
+
vector[vector[intp_t]]
|
26 |
+
vector[vector[float64_t]]
|
27 |
+
|
28 |
+
cdef cnp.ndarray[object, ndim=1] coerce_vectors_to_nd_arrays(
|
29 |
+
shared_ptr[vector_vector_double_intp_t] vecs
|
30 |
+
)
|
31 |
+
|
32 |
+
#####################
|
33 |
+
|
34 |
+
from ._base cimport BaseDistancesReduction64
|
35 |
+
from ._middle_term_computer cimport MiddleTermComputer64
|
36 |
+
|
37 |
+
cdef class RadiusNeighbors64(BaseDistancesReduction64):
|
38 |
+
"""float64 implementation of the RadiusNeighbors."""
|
39 |
+
|
40 |
+
cdef:
|
41 |
+
float64_t radius
|
42 |
+
|
43 |
+
# DistanceMetric64 compute rank-preserving surrogate distance via rdist
|
44 |
+
# which are proxies necessitating less computations.
|
45 |
+
# We get the equivalent for the radius to be able to compare it against
|
46 |
+
# vectors' rank-preserving surrogate distances.
|
47 |
+
float64_t r_radius
|
48 |
+
|
49 |
+
# Neighbors indices and distances are returned as np.ndarrays of np.ndarrays.
|
50 |
+
#
|
51 |
+
# For this implementation, we want resizable buffers which we will wrap
|
52 |
+
# into numpy arrays at the end. std::vector comes as a handy container
|
53 |
+
# for interacting efficiently with resizable buffers.
|
54 |
+
#
|
55 |
+
# Though it is possible to access their buffer address with
|
56 |
+
# std::vector::data, they can't be stolen: buffers lifetime
|
57 |
+
# is tied to their std::vector and are deallocated when
|
58 |
+
# std::vectors are.
|
59 |
+
#
|
60 |
+
# To solve this, we dynamically allocate std::vectors and then
|
61 |
+
# encapsulate them in a StdVectorSentinel responsible for
|
62 |
+
# freeing them when the associated np.ndarray is freed.
|
63 |
+
#
|
64 |
+
# Shared pointers (defined via shared_ptr) are use for safer memory management.
|
65 |
+
# Unique pointers (defined via unique_ptr) can't be used as datastructures
|
66 |
+
# are shared across threads for parallel_on_X; see _parallel_on_X_init_chunk.
|
67 |
+
shared_ptr[vector[vector[intp_t]]] neigh_indices
|
68 |
+
shared_ptr[vector[vector[float64_t]]] neigh_distances
|
69 |
+
|
70 |
+
# Used as array of pointers to private datastructures used in threads.
|
71 |
+
vector[shared_ptr[vector[vector[intp_t]]]] neigh_indices_chunks
|
72 |
+
vector[shared_ptr[vector[vector[float64_t]]]] neigh_distances_chunks
|
73 |
+
|
74 |
+
bint sort_results
|
75 |
+
|
76 |
+
@final
|
77 |
+
cdef void _merge_vectors(
|
78 |
+
self,
|
79 |
+
intp_t idx,
|
80 |
+
intp_t num_threads,
|
81 |
+
) noexcept nogil
|
82 |
+
|
83 |
+
|
84 |
+
cdef class EuclideanRadiusNeighbors64(RadiusNeighbors64):
|
85 |
+
"""EuclideanDistance-specialisation of RadiusNeighbors64."""
|
86 |
+
cdef:
|
87 |
+
MiddleTermComputer64 middle_term_computer
|
88 |
+
const float64_t[::1] X_norm_squared
|
89 |
+
const float64_t[::1] Y_norm_squared
|
90 |
+
|
91 |
+
bint use_squared_distances
|
92 |
+
|
93 |
+
from ._base cimport BaseDistancesReduction32
|
94 |
+
from ._middle_term_computer cimport MiddleTermComputer32
|
95 |
+
|
96 |
+
cdef class RadiusNeighbors32(BaseDistancesReduction32):
|
97 |
+
"""float32 implementation of the RadiusNeighbors."""
|
98 |
+
|
99 |
+
cdef:
|
100 |
+
float64_t radius
|
101 |
+
|
102 |
+
# DistanceMetric32 compute rank-preserving surrogate distance via rdist
|
103 |
+
# which are proxies necessitating less computations.
|
104 |
+
# We get the equivalent for the radius to be able to compare it against
|
105 |
+
# vectors' rank-preserving surrogate distances.
|
106 |
+
float64_t r_radius
|
107 |
+
|
108 |
+
# Neighbors indices and distances are returned as np.ndarrays of np.ndarrays.
|
109 |
+
#
|
110 |
+
# For this implementation, we want resizable buffers which we will wrap
|
111 |
+
# into numpy arrays at the end. std::vector comes as a handy container
|
112 |
+
# for interacting efficiently with resizable buffers.
|
113 |
+
#
|
114 |
+
# Though it is possible to access their buffer address with
|
115 |
+
# std::vector::data, they can't be stolen: buffers lifetime
|
116 |
+
# is tied to their std::vector and are deallocated when
|
117 |
+
# std::vectors are.
|
118 |
+
#
|
119 |
+
# To solve this, we dynamically allocate std::vectors and then
|
120 |
+
# encapsulate them in a StdVectorSentinel responsible for
|
121 |
+
# freeing them when the associated np.ndarray is freed.
|
122 |
+
#
|
123 |
+
# Shared pointers (defined via shared_ptr) are use for safer memory management.
|
124 |
+
# Unique pointers (defined via unique_ptr) can't be used as datastructures
|
125 |
+
# are shared across threads for parallel_on_X; see _parallel_on_X_init_chunk.
|
126 |
+
shared_ptr[vector[vector[intp_t]]] neigh_indices
|
127 |
+
shared_ptr[vector[vector[float64_t]]] neigh_distances
|
128 |
+
|
129 |
+
# Used as array of pointers to private datastructures used in threads.
|
130 |
+
vector[shared_ptr[vector[vector[intp_t]]]] neigh_indices_chunks
|
131 |
+
vector[shared_ptr[vector[vector[float64_t]]]] neigh_distances_chunks
|
132 |
+
|
133 |
+
bint sort_results
|
134 |
+
|
135 |
+
@final
|
136 |
+
cdef void _merge_vectors(
|
137 |
+
self,
|
138 |
+
intp_t idx,
|
139 |
+
intp_t num_threads,
|
140 |
+
) noexcept nogil
|
141 |
+
|
142 |
+
|
143 |
+
cdef class EuclideanRadiusNeighbors32(RadiusNeighbors32):
|
144 |
+
"""EuclideanDistance-specialisation of RadiusNeighbors32."""
|
145 |
+
cdef:
|
146 |
+
MiddleTermComputer32 middle_term_computer
|
147 |
+
const float64_t[::1] X_norm_squared
|
148 |
+
const float64_t[::1] Y_norm_squared
|
149 |
+
|
150 |
+
bint use_squared_distances
|
venv/lib/python3.10/site-packages/sklearn/metrics/_pairwise_distances_reduction/_radius_neighbors_classmode.cpython-310-x86_64-linux-gnu.so
ADDED
Binary file (307 kB). View file
|
|
venv/lib/python3.10/site-packages/sklearn/metrics/_pairwise_fast.cpython-310-x86_64-linux-gnu.so
ADDED
Binary file (307 kB). View file
|
|
venv/lib/python3.10/site-packages/sklearn/metrics/_plot/__init__.py
ADDED
File without changes
|
venv/lib/python3.10/site-packages/sklearn/metrics/_plot/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (189 Bytes). View file
|
|
venv/lib/python3.10/site-packages/sklearn/metrics/_plot/__pycache__/confusion_matrix.cpython-310.pyc
ADDED
Binary file (14.7 kB). View file
|
|