Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- ckpts/universal/global_step40/zero/19.mlp.dense_h_to_4h_swiglu.weight/fp32.pt +3 -0
- ckpts/universal/global_step40/zero/25.input_layernorm.weight/exp_avg.pt +3 -0
- ckpts/universal/global_step40/zero/25.input_layernorm.weight/fp32.pt +3 -0
- venv/lib/python3.10/site-packages/sklearn/datasets/__init__.py +162 -0
- venv/lib/python3.10/site-packages/sklearn/datasets/_california_housing.py +223 -0
- venv/lib/python3.10/site-packages/sklearn/datasets/_kddcup99.py +401 -0
- venv/lib/python3.10/site-packages/sklearn/datasets/_lfw.py +570 -0
- venv/lib/python3.10/site-packages/sklearn/datasets/_rcv1.py +306 -0
- venv/lib/python3.10/site-packages/sklearn/datasets/_samples_generator.py +2284 -0
- venv/lib/python3.10/site-packages/sklearn/datasets/_species_distributions.py +273 -0
- venv/lib/python3.10/site-packages/sklearn/datasets/_svmlight_format_fast.cpython-310-x86_64-linux-gnu.so +0 -0
- venv/lib/python3.10/site-packages/sklearn/datasets/_svmlight_format_io.py +584 -0
- venv/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_dict_learning.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_factor_analysis.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_incremental_pca.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_kernel_pca.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_lda.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_pca.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_sparse_pca.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/sklearn/decomposition/tests/test_dict_learning.py +983 -0
- venv/lib/python3.10/site-packages/sklearn/decomposition/tests/test_sparse_pca.py +367 -0
- venv/lib/python3.10/site-packages/sklearn/metrics/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/sklearn/metrics/__pycache__/_base.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/sklearn/metrics/__pycache__/_classification.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/sklearn/metrics/__pycache__/_ranking.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/sklearn/metrics/__pycache__/_regression.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/sklearn/metrics/__pycache__/_scorer.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/sklearn/metrics/__pycache__/pairwise.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/sklearn/metrics/cluster/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/sklearn/metrics/cluster/__pycache__/_bicluster.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/sklearn/metrics/cluster/__pycache__/_supervised.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/sklearn/metrics/cluster/__pycache__/_unsupervised.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/sklearn/metrics/cluster/_supervised.py +1298 -0
- venv/lib/python3.10/site-packages/sklearn/metrics/cluster/tests/__init__.py +0 -0
- venv/lib/python3.10/site-packages/sklearn/metrics/cluster/tests/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/sklearn/metrics/cluster/tests/__pycache__/test_bicluster.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/sklearn/metrics/cluster/tests/__pycache__/test_common.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/sklearn/metrics/cluster/tests/__pycache__/test_supervised.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/sklearn/metrics/cluster/tests/__pycache__/test_unsupervised.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/sklearn/metrics/cluster/tests/test_bicluster.py +56 -0
- venv/lib/python3.10/site-packages/sklearn/metrics/cluster/tests/test_common.py +219 -0
- venv/lib/python3.10/site-packages/sklearn/metrics/cluster/tests/test_supervised.py +482 -0
- venv/lib/python3.10/site-packages/sklearn/metrics/cluster/tests/test_unsupervised.py +413 -0
- venv/lib/python3.10/site-packages/sklearn/metrics/tests/__init__.py +0 -0
- venv/lib/python3.10/site-packages/sklearn/metrics/tests/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/sklearn/metrics/tests/__pycache__/test_classification.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/sklearn/metrics/tests/__pycache__/test_common.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/sklearn/metrics/tests/__pycache__/test_dist_metrics.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/sklearn/metrics/tests/__pycache__/test_pairwise.cpython-310.pyc +0 -0
ckpts/universal/global_step40/zero/19.mlp.dense_h_to_4h_swiglu.weight/fp32.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a62e2b38d4b05a49cb79f73688c63fcd392d525e9680342e58032b19c6a08cf0
|
3 |
+
size 33555533
|
ckpts/universal/global_step40/zero/25.input_layernorm.weight/exp_avg.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7eb69a0f83463d456b09d9f04406518c181fcd4dab9472d093e2bab259b7e895
|
3 |
+
size 9372
|
ckpts/universal/global_step40/zero/25.input_layernorm.weight/fp32.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:134cf9a96301b45e9a70aad00df044ad3af56a473c6b15b634f1f381fb382427
|
3 |
+
size 9293
|
venv/lib/python3.10/site-packages/sklearn/datasets/__init__.py
ADDED
@@ -0,0 +1,162 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
The :mod:`sklearn.datasets` module includes utilities to load datasets,
|
3 |
+
including methods to load and fetch popular reference datasets. It also
|
4 |
+
features some artificial data generators.
|
5 |
+
"""
|
6 |
+
import textwrap
|
7 |
+
|
8 |
+
from ._base import (
|
9 |
+
clear_data_home,
|
10 |
+
get_data_home,
|
11 |
+
load_breast_cancer,
|
12 |
+
load_diabetes,
|
13 |
+
load_digits,
|
14 |
+
load_files,
|
15 |
+
load_iris,
|
16 |
+
load_linnerud,
|
17 |
+
load_sample_image,
|
18 |
+
load_sample_images,
|
19 |
+
load_wine,
|
20 |
+
)
|
21 |
+
from ._california_housing import fetch_california_housing
|
22 |
+
from ._covtype import fetch_covtype
|
23 |
+
from ._kddcup99 import fetch_kddcup99
|
24 |
+
from ._lfw import fetch_lfw_pairs, fetch_lfw_people
|
25 |
+
from ._olivetti_faces import fetch_olivetti_faces
|
26 |
+
from ._openml import fetch_openml
|
27 |
+
from ._rcv1 import fetch_rcv1
|
28 |
+
from ._samples_generator import (
|
29 |
+
make_biclusters,
|
30 |
+
make_blobs,
|
31 |
+
make_checkerboard,
|
32 |
+
make_circles,
|
33 |
+
make_classification,
|
34 |
+
make_friedman1,
|
35 |
+
make_friedman2,
|
36 |
+
make_friedman3,
|
37 |
+
make_gaussian_quantiles,
|
38 |
+
make_hastie_10_2,
|
39 |
+
make_low_rank_matrix,
|
40 |
+
make_moons,
|
41 |
+
make_multilabel_classification,
|
42 |
+
make_regression,
|
43 |
+
make_s_curve,
|
44 |
+
make_sparse_coded_signal,
|
45 |
+
make_sparse_spd_matrix,
|
46 |
+
make_sparse_uncorrelated,
|
47 |
+
make_spd_matrix,
|
48 |
+
make_swiss_roll,
|
49 |
+
)
|
50 |
+
from ._species_distributions import fetch_species_distributions
|
51 |
+
from ._svmlight_format_io import (
|
52 |
+
dump_svmlight_file,
|
53 |
+
load_svmlight_file,
|
54 |
+
load_svmlight_files,
|
55 |
+
)
|
56 |
+
from ._twenty_newsgroups import fetch_20newsgroups, fetch_20newsgroups_vectorized
|
57 |
+
|
58 |
+
__all__ = [
|
59 |
+
"clear_data_home",
|
60 |
+
"dump_svmlight_file",
|
61 |
+
"fetch_20newsgroups",
|
62 |
+
"fetch_20newsgroups_vectorized",
|
63 |
+
"fetch_lfw_pairs",
|
64 |
+
"fetch_lfw_people",
|
65 |
+
"fetch_olivetti_faces",
|
66 |
+
"fetch_species_distributions",
|
67 |
+
"fetch_california_housing",
|
68 |
+
"fetch_covtype",
|
69 |
+
"fetch_rcv1",
|
70 |
+
"fetch_kddcup99",
|
71 |
+
"fetch_openml",
|
72 |
+
"get_data_home",
|
73 |
+
"load_diabetes",
|
74 |
+
"load_digits",
|
75 |
+
"load_files",
|
76 |
+
"load_iris",
|
77 |
+
"load_breast_cancer",
|
78 |
+
"load_linnerud",
|
79 |
+
"load_sample_image",
|
80 |
+
"load_sample_images",
|
81 |
+
"load_svmlight_file",
|
82 |
+
"load_svmlight_files",
|
83 |
+
"load_wine",
|
84 |
+
"make_biclusters",
|
85 |
+
"make_blobs",
|
86 |
+
"make_circles",
|
87 |
+
"make_classification",
|
88 |
+
"make_checkerboard",
|
89 |
+
"make_friedman1",
|
90 |
+
"make_friedman2",
|
91 |
+
"make_friedman3",
|
92 |
+
"make_gaussian_quantiles",
|
93 |
+
"make_hastie_10_2",
|
94 |
+
"make_low_rank_matrix",
|
95 |
+
"make_moons",
|
96 |
+
"make_multilabel_classification",
|
97 |
+
"make_regression",
|
98 |
+
"make_s_curve",
|
99 |
+
"make_sparse_coded_signal",
|
100 |
+
"make_sparse_spd_matrix",
|
101 |
+
"make_sparse_uncorrelated",
|
102 |
+
"make_spd_matrix",
|
103 |
+
"make_swiss_roll",
|
104 |
+
]
|
105 |
+
|
106 |
+
|
107 |
+
def __getattr__(name):
|
108 |
+
if name == "load_boston":
|
109 |
+
msg = textwrap.dedent("""
|
110 |
+
`load_boston` has been removed from scikit-learn since version 1.2.
|
111 |
+
|
112 |
+
The Boston housing prices dataset has an ethical problem: as
|
113 |
+
investigated in [1], the authors of this dataset engineered a
|
114 |
+
non-invertible variable "B" assuming that racial self-segregation had a
|
115 |
+
positive impact on house prices [2]. Furthermore the goal of the
|
116 |
+
research that led to the creation of this dataset was to study the
|
117 |
+
impact of air quality but it did not give adequate demonstration of the
|
118 |
+
validity of this assumption.
|
119 |
+
|
120 |
+
The scikit-learn maintainers therefore strongly discourage the use of
|
121 |
+
this dataset unless the purpose of the code is to study and educate
|
122 |
+
about ethical issues in data science and machine learning.
|
123 |
+
|
124 |
+
In this special case, you can fetch the dataset from the original
|
125 |
+
source::
|
126 |
+
|
127 |
+
import pandas as pd
|
128 |
+
import numpy as np
|
129 |
+
|
130 |
+
data_url = "http://lib.stat.cmu.edu/datasets/boston"
|
131 |
+
raw_df = pd.read_csv(data_url, sep="\\s+", skiprows=22, header=None)
|
132 |
+
data = np.hstack([raw_df.values[::2, :], raw_df.values[1::2, :2]])
|
133 |
+
target = raw_df.values[1::2, 2]
|
134 |
+
|
135 |
+
Alternative datasets include the California housing dataset and the
|
136 |
+
Ames housing dataset. You can load the datasets as follows::
|
137 |
+
|
138 |
+
from sklearn.datasets import fetch_california_housing
|
139 |
+
housing = fetch_california_housing()
|
140 |
+
|
141 |
+
for the California housing dataset and::
|
142 |
+
|
143 |
+
from sklearn.datasets import fetch_openml
|
144 |
+
housing = fetch_openml(name="house_prices", as_frame=True)
|
145 |
+
|
146 |
+
for the Ames housing dataset.
|
147 |
+
|
148 |
+
[1] M Carlisle.
|
149 |
+
"Racist data destruction?"
|
150 |
+
<https://medium.com/@docintangible/racist-data-destruction-113e3eff54a8>
|
151 |
+
|
152 |
+
[2] Harrison Jr, David, and Daniel L. Rubinfeld.
|
153 |
+
"Hedonic housing prices and the demand for clean air."
|
154 |
+
Journal of environmental economics and management 5.1 (1978): 81-102.
|
155 |
+
<https://www.researchgate.net/publication/4974606_Hedonic_housing_prices_and_the_demand_for_clean_air>
|
156 |
+
""")
|
157 |
+
raise ImportError(msg)
|
158 |
+
try:
|
159 |
+
return globals()[name]
|
160 |
+
except KeyError:
|
161 |
+
# This is turned into the appropriate ImportError
|
162 |
+
raise AttributeError
|
venv/lib/python3.10/site-packages/sklearn/datasets/_california_housing.py
ADDED
@@ -0,0 +1,223 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""California housing dataset.
|
2 |
+
|
3 |
+
The original database is available from StatLib
|
4 |
+
|
5 |
+
http://lib.stat.cmu.edu/datasets/
|
6 |
+
|
7 |
+
The data contains 20,640 observations on 9 variables.
|
8 |
+
|
9 |
+
This dataset contains the average house value as target variable
|
10 |
+
and the following input variables (features): average income,
|
11 |
+
housing average age, average rooms, average bedrooms, population,
|
12 |
+
average occupation, latitude, and longitude in that order.
|
13 |
+
|
14 |
+
References
|
15 |
+
----------
|
16 |
+
|
17 |
+
Pace, R. Kelley and Ronald Barry, Sparse Spatial Autoregressions,
|
18 |
+
Statistics and Probability Letters, 33 (1997) 291-297.
|
19 |
+
|
20 |
+
"""
|
21 |
+
# Authors: Peter Prettenhofer
|
22 |
+
# License: BSD 3 clause
|
23 |
+
|
24 |
+
import logging
|
25 |
+
import tarfile
|
26 |
+
from os import PathLike, makedirs, remove
|
27 |
+
from os.path import exists
|
28 |
+
|
29 |
+
import joblib
|
30 |
+
import numpy as np
|
31 |
+
|
32 |
+
from ..utils import Bunch
|
33 |
+
from ..utils._param_validation import validate_params
|
34 |
+
from . import get_data_home
|
35 |
+
from ._base import (
|
36 |
+
RemoteFileMetadata,
|
37 |
+
_convert_data_dataframe,
|
38 |
+
_fetch_remote,
|
39 |
+
_pkl_filepath,
|
40 |
+
load_descr,
|
41 |
+
)
|
42 |
+
|
43 |
+
# The original data can be found at:
|
44 |
+
# https://www.dcc.fc.up.pt/~ltorgo/Regression/cal_housing.tgz
|
45 |
+
ARCHIVE = RemoteFileMetadata(
|
46 |
+
filename="cal_housing.tgz",
|
47 |
+
url="https://ndownloader.figshare.com/files/5976036",
|
48 |
+
checksum="aaa5c9a6afe2225cc2aed2723682ae403280c4a3695a2ddda4ffb5d8215ea681",
|
49 |
+
)
|
50 |
+
|
51 |
+
logger = logging.getLogger(__name__)
|
52 |
+
|
53 |
+
|
54 |
+
@validate_params(
|
55 |
+
{
|
56 |
+
"data_home": [str, PathLike, None],
|
57 |
+
"download_if_missing": ["boolean"],
|
58 |
+
"return_X_y": ["boolean"],
|
59 |
+
"as_frame": ["boolean"],
|
60 |
+
},
|
61 |
+
prefer_skip_nested_validation=True,
|
62 |
+
)
|
63 |
+
def fetch_california_housing(
|
64 |
+
*, data_home=None, download_if_missing=True, return_X_y=False, as_frame=False
|
65 |
+
):
|
66 |
+
"""Load the California housing dataset (regression).
|
67 |
+
|
68 |
+
============== ==============
|
69 |
+
Samples total 20640
|
70 |
+
Dimensionality 8
|
71 |
+
Features real
|
72 |
+
Target real 0.15 - 5.
|
73 |
+
============== ==============
|
74 |
+
|
75 |
+
Read more in the :ref:`User Guide <california_housing_dataset>`.
|
76 |
+
|
77 |
+
Parameters
|
78 |
+
----------
|
79 |
+
data_home : str or path-like, default=None
|
80 |
+
Specify another download and cache folder for the datasets. By default
|
81 |
+
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
|
82 |
+
|
83 |
+
download_if_missing : bool, default=True
|
84 |
+
If False, raise an OSError if the data is not locally available
|
85 |
+
instead of trying to download the data from the source site.
|
86 |
+
|
87 |
+
return_X_y : bool, default=False
|
88 |
+
If True, returns ``(data.data, data.target)`` instead of a Bunch
|
89 |
+
object.
|
90 |
+
|
91 |
+
.. versionadded:: 0.20
|
92 |
+
|
93 |
+
as_frame : bool, default=False
|
94 |
+
If True, the data is a pandas DataFrame including columns with
|
95 |
+
appropriate dtypes (numeric, string or categorical). The target is
|
96 |
+
a pandas DataFrame or Series depending on the number of target_columns.
|
97 |
+
|
98 |
+
.. versionadded:: 0.23
|
99 |
+
|
100 |
+
Returns
|
101 |
+
-------
|
102 |
+
dataset : :class:`~sklearn.utils.Bunch`
|
103 |
+
Dictionary-like object, with the following attributes.
|
104 |
+
|
105 |
+
data : ndarray, shape (20640, 8)
|
106 |
+
Each row corresponding to the 8 feature values in order.
|
107 |
+
If ``as_frame`` is True, ``data`` is a pandas object.
|
108 |
+
target : numpy array of shape (20640,)
|
109 |
+
Each value corresponds to the average
|
110 |
+
house value in units of 100,000.
|
111 |
+
If ``as_frame`` is True, ``target`` is a pandas object.
|
112 |
+
feature_names : list of length 8
|
113 |
+
Array of ordered feature names used in the dataset.
|
114 |
+
DESCR : str
|
115 |
+
Description of the California housing dataset.
|
116 |
+
frame : pandas DataFrame
|
117 |
+
Only present when `as_frame=True`. DataFrame with ``data`` and
|
118 |
+
``target``.
|
119 |
+
|
120 |
+
.. versionadded:: 0.23
|
121 |
+
|
122 |
+
(data, target) : tuple if ``return_X_y`` is True
|
123 |
+
A tuple of two ndarray. The first containing a 2D array of
|
124 |
+
shape (n_samples, n_features) with each row representing one
|
125 |
+
sample and each column representing the features. The second
|
126 |
+
ndarray of shape (n_samples,) containing the target samples.
|
127 |
+
|
128 |
+
.. versionadded:: 0.20
|
129 |
+
|
130 |
+
Notes
|
131 |
+
-----
|
132 |
+
|
133 |
+
This dataset consists of 20,640 samples and 9 features.
|
134 |
+
|
135 |
+
Examples
|
136 |
+
--------
|
137 |
+
>>> from sklearn.datasets import fetch_california_housing
|
138 |
+
>>> housing = fetch_california_housing()
|
139 |
+
>>> print(housing.data.shape, housing.target.shape)
|
140 |
+
(20640, 8) (20640,)
|
141 |
+
>>> print(housing.feature_names[0:6])
|
142 |
+
['MedInc', 'HouseAge', 'AveRooms', 'AveBedrms', 'Population', 'AveOccup']
|
143 |
+
"""
|
144 |
+
data_home = get_data_home(data_home=data_home)
|
145 |
+
if not exists(data_home):
|
146 |
+
makedirs(data_home)
|
147 |
+
|
148 |
+
filepath = _pkl_filepath(data_home, "cal_housing.pkz")
|
149 |
+
if not exists(filepath):
|
150 |
+
if not download_if_missing:
|
151 |
+
raise OSError("Data not found and `download_if_missing` is False")
|
152 |
+
|
153 |
+
logger.info(
|
154 |
+
"Downloading Cal. housing from {} to {}".format(ARCHIVE.url, data_home)
|
155 |
+
)
|
156 |
+
|
157 |
+
archive_path = _fetch_remote(ARCHIVE, dirname=data_home)
|
158 |
+
|
159 |
+
with tarfile.open(mode="r:gz", name=archive_path) as f:
|
160 |
+
cal_housing = np.loadtxt(
|
161 |
+
f.extractfile("CaliforniaHousing/cal_housing.data"), delimiter=","
|
162 |
+
)
|
163 |
+
# Columns are not in the same order compared to the previous
|
164 |
+
# URL resource on lib.stat.cmu.edu
|
165 |
+
columns_index = [8, 7, 2, 3, 4, 5, 6, 1, 0]
|
166 |
+
cal_housing = cal_housing[:, columns_index]
|
167 |
+
|
168 |
+
joblib.dump(cal_housing, filepath, compress=6)
|
169 |
+
remove(archive_path)
|
170 |
+
|
171 |
+
else:
|
172 |
+
cal_housing = joblib.load(filepath)
|
173 |
+
|
174 |
+
feature_names = [
|
175 |
+
"MedInc",
|
176 |
+
"HouseAge",
|
177 |
+
"AveRooms",
|
178 |
+
"AveBedrms",
|
179 |
+
"Population",
|
180 |
+
"AveOccup",
|
181 |
+
"Latitude",
|
182 |
+
"Longitude",
|
183 |
+
]
|
184 |
+
|
185 |
+
target, data = cal_housing[:, 0], cal_housing[:, 1:]
|
186 |
+
|
187 |
+
# avg rooms = total rooms / households
|
188 |
+
data[:, 2] /= data[:, 5]
|
189 |
+
|
190 |
+
# avg bed rooms = total bed rooms / households
|
191 |
+
data[:, 3] /= data[:, 5]
|
192 |
+
|
193 |
+
# avg occupancy = population / households
|
194 |
+
data[:, 5] = data[:, 4] / data[:, 5]
|
195 |
+
|
196 |
+
# target in units of 100,000
|
197 |
+
target = target / 100000.0
|
198 |
+
|
199 |
+
descr = load_descr("california_housing.rst")
|
200 |
+
|
201 |
+
X = data
|
202 |
+
y = target
|
203 |
+
|
204 |
+
frame = None
|
205 |
+
target_names = [
|
206 |
+
"MedHouseVal",
|
207 |
+
]
|
208 |
+
if as_frame:
|
209 |
+
frame, X, y = _convert_data_dataframe(
|
210 |
+
"fetch_california_housing", data, target, feature_names, target_names
|
211 |
+
)
|
212 |
+
|
213 |
+
if return_X_y:
|
214 |
+
return X, y
|
215 |
+
|
216 |
+
return Bunch(
|
217 |
+
data=X,
|
218 |
+
target=y,
|
219 |
+
frame=frame,
|
220 |
+
target_names=target_names,
|
221 |
+
feature_names=feature_names,
|
222 |
+
DESCR=descr,
|
223 |
+
)
|
venv/lib/python3.10/site-packages/sklearn/datasets/_kddcup99.py
ADDED
@@ -0,0 +1,401 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""KDDCUP 99 dataset.
|
2 |
+
|
3 |
+
A classic dataset for anomaly detection.
|
4 |
+
|
5 |
+
The dataset page is available from UCI Machine Learning Repository
|
6 |
+
|
7 |
+
https://archive.ics.uci.edu/ml/machine-learning-databases/kddcup99-mld/kddcup.data.gz
|
8 |
+
|
9 |
+
"""
|
10 |
+
|
11 |
+
import errno
|
12 |
+
import logging
|
13 |
+
import os
|
14 |
+
from gzip import GzipFile
|
15 |
+
from os.path import exists, join
|
16 |
+
|
17 |
+
import joblib
|
18 |
+
import numpy as np
|
19 |
+
|
20 |
+
from ..utils import Bunch, check_random_state
|
21 |
+
from ..utils import shuffle as shuffle_method
|
22 |
+
from ..utils._param_validation import StrOptions, validate_params
|
23 |
+
from . import get_data_home
|
24 |
+
from ._base import (
|
25 |
+
RemoteFileMetadata,
|
26 |
+
_convert_data_dataframe,
|
27 |
+
_fetch_remote,
|
28 |
+
load_descr,
|
29 |
+
)
|
30 |
+
|
31 |
+
# The original data can be found at:
|
32 |
+
# https://archive.ics.uci.edu/ml/machine-learning-databases/kddcup99-mld/kddcup.data.gz
|
33 |
+
ARCHIVE = RemoteFileMetadata(
|
34 |
+
filename="kddcup99_data",
|
35 |
+
url="https://ndownloader.figshare.com/files/5976045",
|
36 |
+
checksum="3b6c942aa0356c0ca35b7b595a26c89d343652c9db428893e7494f837b274292",
|
37 |
+
)
|
38 |
+
|
39 |
+
# The original data can be found at:
|
40 |
+
# https://archive.ics.uci.edu/ml/machine-learning-databases/kddcup99-mld/kddcup.data_10_percent.gz
|
41 |
+
ARCHIVE_10_PERCENT = RemoteFileMetadata(
|
42 |
+
filename="kddcup99_10_data",
|
43 |
+
url="https://ndownloader.figshare.com/files/5976042",
|
44 |
+
checksum="8045aca0d84e70e622d1148d7df782496f6333bf6eb979a1b0837c42a9fd9561",
|
45 |
+
)
|
46 |
+
|
47 |
+
logger = logging.getLogger(__name__)
|
48 |
+
|
49 |
+
|
50 |
+
@validate_params(
|
51 |
+
{
|
52 |
+
"subset": [StrOptions({"SA", "SF", "http", "smtp"}), None],
|
53 |
+
"data_home": [str, os.PathLike, None],
|
54 |
+
"shuffle": ["boolean"],
|
55 |
+
"random_state": ["random_state"],
|
56 |
+
"percent10": ["boolean"],
|
57 |
+
"download_if_missing": ["boolean"],
|
58 |
+
"return_X_y": ["boolean"],
|
59 |
+
"as_frame": ["boolean"],
|
60 |
+
},
|
61 |
+
prefer_skip_nested_validation=True,
|
62 |
+
)
|
63 |
+
def fetch_kddcup99(
|
64 |
+
*,
|
65 |
+
subset=None,
|
66 |
+
data_home=None,
|
67 |
+
shuffle=False,
|
68 |
+
random_state=None,
|
69 |
+
percent10=True,
|
70 |
+
download_if_missing=True,
|
71 |
+
return_X_y=False,
|
72 |
+
as_frame=False,
|
73 |
+
):
|
74 |
+
"""Load the kddcup99 dataset (classification).
|
75 |
+
|
76 |
+
Download it if necessary.
|
77 |
+
|
78 |
+
================= ====================================
|
79 |
+
Classes 23
|
80 |
+
Samples total 4898431
|
81 |
+
Dimensionality 41
|
82 |
+
Features discrete (int) or continuous (float)
|
83 |
+
================= ====================================
|
84 |
+
|
85 |
+
Read more in the :ref:`User Guide <kddcup99_dataset>`.
|
86 |
+
|
87 |
+
.. versionadded:: 0.18
|
88 |
+
|
89 |
+
Parameters
|
90 |
+
----------
|
91 |
+
subset : {'SA', 'SF', 'http', 'smtp'}, default=None
|
92 |
+
To return the corresponding classical subsets of kddcup 99.
|
93 |
+
If None, return the entire kddcup 99 dataset.
|
94 |
+
|
95 |
+
data_home : str or path-like, default=None
|
96 |
+
Specify another download and cache folder for the datasets. By default
|
97 |
+
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
|
98 |
+
|
99 |
+
.. versionadded:: 0.19
|
100 |
+
|
101 |
+
shuffle : bool, default=False
|
102 |
+
Whether to shuffle dataset.
|
103 |
+
|
104 |
+
random_state : int, RandomState instance or None, default=None
|
105 |
+
Determines random number generation for dataset shuffling and for
|
106 |
+
selection of abnormal samples if `subset='SA'`. Pass an int for
|
107 |
+
reproducible output across multiple function calls.
|
108 |
+
See :term:`Glossary <random_state>`.
|
109 |
+
|
110 |
+
percent10 : bool, default=True
|
111 |
+
Whether to load only 10 percent of the data.
|
112 |
+
|
113 |
+
download_if_missing : bool, default=True
|
114 |
+
If False, raise an OSError if the data is not locally available
|
115 |
+
instead of trying to download the data from the source site.
|
116 |
+
|
117 |
+
return_X_y : bool, default=False
|
118 |
+
If True, returns ``(data, target)`` instead of a Bunch object. See
|
119 |
+
below for more information about the `data` and `target` object.
|
120 |
+
|
121 |
+
.. versionadded:: 0.20
|
122 |
+
|
123 |
+
as_frame : bool, default=False
|
124 |
+
If `True`, returns a pandas Dataframe for the ``data`` and ``target``
|
125 |
+
objects in the `Bunch` returned object; `Bunch` return object will also
|
126 |
+
have a ``frame`` member.
|
127 |
+
|
128 |
+
.. versionadded:: 0.24
|
129 |
+
|
130 |
+
Returns
|
131 |
+
-------
|
132 |
+
data : :class:`~sklearn.utils.Bunch`
|
133 |
+
Dictionary-like object, with the following attributes.
|
134 |
+
|
135 |
+
data : {ndarray, dataframe} of shape (494021, 41)
|
136 |
+
The data matrix to learn. If `as_frame=True`, `data` will be a
|
137 |
+
pandas DataFrame.
|
138 |
+
target : {ndarray, series} of shape (494021,)
|
139 |
+
The regression target for each sample. If `as_frame=True`, `target`
|
140 |
+
will be a pandas Series.
|
141 |
+
frame : dataframe of shape (494021, 42)
|
142 |
+
Only present when `as_frame=True`. Contains `data` and `target`.
|
143 |
+
DESCR : str
|
144 |
+
The full description of the dataset.
|
145 |
+
feature_names : list
|
146 |
+
The names of the dataset columns
|
147 |
+
target_names: list
|
148 |
+
The names of the target columns
|
149 |
+
|
150 |
+
(data, target) : tuple if ``return_X_y`` is True
|
151 |
+
A tuple of two ndarray. The first containing a 2D array of
|
152 |
+
shape (n_samples, n_features) with each row representing one
|
153 |
+
sample and each column representing the features. The second
|
154 |
+
ndarray of shape (n_samples,) containing the target samples.
|
155 |
+
|
156 |
+
.. versionadded:: 0.20
|
157 |
+
"""
|
158 |
+
data_home = get_data_home(data_home=data_home)
|
159 |
+
kddcup99 = _fetch_brute_kddcup99(
|
160 |
+
data_home=data_home,
|
161 |
+
percent10=percent10,
|
162 |
+
download_if_missing=download_if_missing,
|
163 |
+
)
|
164 |
+
|
165 |
+
data = kddcup99.data
|
166 |
+
target = kddcup99.target
|
167 |
+
feature_names = kddcup99.feature_names
|
168 |
+
target_names = kddcup99.target_names
|
169 |
+
|
170 |
+
if subset == "SA":
|
171 |
+
s = target == b"normal."
|
172 |
+
t = np.logical_not(s)
|
173 |
+
normal_samples = data[s, :]
|
174 |
+
normal_targets = target[s]
|
175 |
+
abnormal_samples = data[t, :]
|
176 |
+
abnormal_targets = target[t]
|
177 |
+
|
178 |
+
n_samples_abnormal = abnormal_samples.shape[0]
|
179 |
+
# selected abnormal samples:
|
180 |
+
random_state = check_random_state(random_state)
|
181 |
+
r = random_state.randint(0, n_samples_abnormal, 3377)
|
182 |
+
abnormal_samples = abnormal_samples[r]
|
183 |
+
abnormal_targets = abnormal_targets[r]
|
184 |
+
|
185 |
+
data = np.r_[normal_samples, abnormal_samples]
|
186 |
+
target = np.r_[normal_targets, abnormal_targets]
|
187 |
+
|
188 |
+
if subset == "SF" or subset == "http" or subset == "smtp":
|
189 |
+
# select all samples with positive logged_in attribute:
|
190 |
+
s = data[:, 11] == 1
|
191 |
+
data = np.c_[data[s, :11], data[s, 12:]]
|
192 |
+
feature_names = feature_names[:11] + feature_names[12:]
|
193 |
+
target = target[s]
|
194 |
+
|
195 |
+
data[:, 0] = np.log((data[:, 0] + 0.1).astype(float, copy=False))
|
196 |
+
data[:, 4] = np.log((data[:, 4] + 0.1).astype(float, copy=False))
|
197 |
+
data[:, 5] = np.log((data[:, 5] + 0.1).astype(float, copy=False))
|
198 |
+
|
199 |
+
if subset == "http":
|
200 |
+
s = data[:, 2] == b"http"
|
201 |
+
data = data[s]
|
202 |
+
target = target[s]
|
203 |
+
data = np.c_[data[:, 0], data[:, 4], data[:, 5]]
|
204 |
+
feature_names = [feature_names[0], feature_names[4], feature_names[5]]
|
205 |
+
|
206 |
+
if subset == "smtp":
|
207 |
+
s = data[:, 2] == b"smtp"
|
208 |
+
data = data[s]
|
209 |
+
target = target[s]
|
210 |
+
data = np.c_[data[:, 0], data[:, 4], data[:, 5]]
|
211 |
+
feature_names = [feature_names[0], feature_names[4], feature_names[5]]
|
212 |
+
|
213 |
+
if subset == "SF":
|
214 |
+
data = np.c_[data[:, 0], data[:, 2], data[:, 4], data[:, 5]]
|
215 |
+
feature_names = [
|
216 |
+
feature_names[0],
|
217 |
+
feature_names[2],
|
218 |
+
feature_names[4],
|
219 |
+
feature_names[5],
|
220 |
+
]
|
221 |
+
|
222 |
+
if shuffle:
|
223 |
+
data, target = shuffle_method(data, target, random_state=random_state)
|
224 |
+
|
225 |
+
fdescr = load_descr("kddcup99.rst")
|
226 |
+
|
227 |
+
frame = None
|
228 |
+
if as_frame:
|
229 |
+
frame, data, target = _convert_data_dataframe(
|
230 |
+
"fetch_kddcup99", data, target, feature_names, target_names
|
231 |
+
)
|
232 |
+
|
233 |
+
if return_X_y:
|
234 |
+
return data, target
|
235 |
+
|
236 |
+
return Bunch(
|
237 |
+
data=data,
|
238 |
+
target=target,
|
239 |
+
frame=frame,
|
240 |
+
target_names=target_names,
|
241 |
+
feature_names=feature_names,
|
242 |
+
DESCR=fdescr,
|
243 |
+
)
|
244 |
+
|
245 |
+
|
246 |
+
def _fetch_brute_kddcup99(data_home=None, download_if_missing=True, percent10=True):
|
247 |
+
"""Load the kddcup99 dataset, downloading it if necessary.
|
248 |
+
|
249 |
+
Parameters
|
250 |
+
----------
|
251 |
+
data_home : str, default=None
|
252 |
+
Specify another download and cache folder for the datasets. By default
|
253 |
+
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
|
254 |
+
|
255 |
+
download_if_missing : bool, default=True
|
256 |
+
If False, raise an OSError if the data is not locally available
|
257 |
+
instead of trying to download the data from the source site.
|
258 |
+
|
259 |
+
percent10 : bool, default=True
|
260 |
+
Whether to load only 10 percent of the data.
|
261 |
+
|
262 |
+
Returns
|
263 |
+
-------
|
264 |
+
dataset : :class:`~sklearn.utils.Bunch`
|
265 |
+
Dictionary-like object, with the following attributes.
|
266 |
+
|
267 |
+
data : ndarray of shape (494021, 41)
|
268 |
+
Each row corresponds to the 41 features in the dataset.
|
269 |
+
target : ndarray of shape (494021,)
|
270 |
+
Each value corresponds to one of the 21 attack types or to the
|
271 |
+
label 'normal.'.
|
272 |
+
feature_names : list
|
273 |
+
The names of the dataset columns
|
274 |
+
target_names: list
|
275 |
+
The names of the target columns
|
276 |
+
DESCR : str
|
277 |
+
Description of the kddcup99 dataset.
|
278 |
+
|
279 |
+
"""
|
280 |
+
|
281 |
+
data_home = get_data_home(data_home=data_home)
|
282 |
+
dir_suffix = "-py3"
|
283 |
+
|
284 |
+
if percent10:
|
285 |
+
kddcup_dir = join(data_home, "kddcup99_10" + dir_suffix)
|
286 |
+
archive = ARCHIVE_10_PERCENT
|
287 |
+
else:
|
288 |
+
kddcup_dir = join(data_home, "kddcup99" + dir_suffix)
|
289 |
+
archive = ARCHIVE
|
290 |
+
|
291 |
+
samples_path = join(kddcup_dir, "samples")
|
292 |
+
targets_path = join(kddcup_dir, "targets")
|
293 |
+
available = exists(samples_path)
|
294 |
+
|
295 |
+
dt = [
|
296 |
+
("duration", int),
|
297 |
+
("protocol_type", "S4"),
|
298 |
+
("service", "S11"),
|
299 |
+
("flag", "S6"),
|
300 |
+
("src_bytes", int),
|
301 |
+
("dst_bytes", int),
|
302 |
+
("land", int),
|
303 |
+
("wrong_fragment", int),
|
304 |
+
("urgent", int),
|
305 |
+
("hot", int),
|
306 |
+
("num_failed_logins", int),
|
307 |
+
("logged_in", int),
|
308 |
+
("num_compromised", int),
|
309 |
+
("root_shell", int),
|
310 |
+
("su_attempted", int),
|
311 |
+
("num_root", int),
|
312 |
+
("num_file_creations", int),
|
313 |
+
("num_shells", int),
|
314 |
+
("num_access_files", int),
|
315 |
+
("num_outbound_cmds", int),
|
316 |
+
("is_host_login", int),
|
317 |
+
("is_guest_login", int),
|
318 |
+
("count", int),
|
319 |
+
("srv_count", int),
|
320 |
+
("serror_rate", float),
|
321 |
+
("srv_serror_rate", float),
|
322 |
+
("rerror_rate", float),
|
323 |
+
("srv_rerror_rate", float),
|
324 |
+
("same_srv_rate", float),
|
325 |
+
("diff_srv_rate", float),
|
326 |
+
("srv_diff_host_rate", float),
|
327 |
+
("dst_host_count", int),
|
328 |
+
("dst_host_srv_count", int),
|
329 |
+
("dst_host_same_srv_rate", float),
|
330 |
+
("dst_host_diff_srv_rate", float),
|
331 |
+
("dst_host_same_src_port_rate", float),
|
332 |
+
("dst_host_srv_diff_host_rate", float),
|
333 |
+
("dst_host_serror_rate", float),
|
334 |
+
("dst_host_srv_serror_rate", float),
|
335 |
+
("dst_host_rerror_rate", float),
|
336 |
+
("dst_host_srv_rerror_rate", float),
|
337 |
+
("labels", "S16"),
|
338 |
+
]
|
339 |
+
|
340 |
+
column_names = [c[0] for c in dt]
|
341 |
+
target_names = column_names[-1]
|
342 |
+
feature_names = column_names[:-1]
|
343 |
+
|
344 |
+
if available:
|
345 |
+
try:
|
346 |
+
X = joblib.load(samples_path)
|
347 |
+
y = joblib.load(targets_path)
|
348 |
+
except Exception as e:
|
349 |
+
raise OSError(
|
350 |
+
"The cache for fetch_kddcup99 is invalid, please delete "
|
351 |
+
f"{str(kddcup_dir)} and run the fetch_kddcup99 again"
|
352 |
+
) from e
|
353 |
+
|
354 |
+
elif download_if_missing:
|
355 |
+
_mkdirp(kddcup_dir)
|
356 |
+
logger.info("Downloading %s" % archive.url)
|
357 |
+
_fetch_remote(archive, dirname=kddcup_dir)
|
358 |
+
DT = np.dtype(dt)
|
359 |
+
logger.debug("extracting archive")
|
360 |
+
archive_path = join(kddcup_dir, archive.filename)
|
361 |
+
file_ = GzipFile(filename=archive_path, mode="r")
|
362 |
+
Xy = []
|
363 |
+
for line in file_.readlines():
|
364 |
+
line = line.decode()
|
365 |
+
Xy.append(line.replace("\n", "").split(","))
|
366 |
+
file_.close()
|
367 |
+
logger.debug("extraction done")
|
368 |
+
os.remove(archive_path)
|
369 |
+
|
370 |
+
Xy = np.asarray(Xy, dtype=object)
|
371 |
+
for j in range(42):
|
372 |
+
Xy[:, j] = Xy[:, j].astype(DT[j])
|
373 |
+
|
374 |
+
X = Xy[:, :-1]
|
375 |
+
y = Xy[:, -1]
|
376 |
+
# XXX bug when compress!=0:
|
377 |
+
# (error: 'Incorrect data length while decompressing[...] the file
|
378 |
+
# could be corrupted.')
|
379 |
+
|
380 |
+
joblib.dump(X, samples_path, compress=0)
|
381 |
+
joblib.dump(y, targets_path, compress=0)
|
382 |
+
else:
|
383 |
+
raise OSError("Data not found and `download_if_missing` is False")
|
384 |
+
|
385 |
+
return Bunch(
|
386 |
+
data=X,
|
387 |
+
target=y,
|
388 |
+
feature_names=feature_names,
|
389 |
+
target_names=[target_names],
|
390 |
+
)
|
391 |
+
|
392 |
+
|
393 |
+
def _mkdirp(d):
|
394 |
+
"""Ensure directory d exists (like mkdir -p on Unix)
|
395 |
+
No guarantee that the directory is writable.
|
396 |
+
"""
|
397 |
+
try:
|
398 |
+
os.makedirs(d)
|
399 |
+
except OSError as e:
|
400 |
+
if e.errno != errno.EEXIST:
|
401 |
+
raise
|
venv/lib/python3.10/site-packages/sklearn/datasets/_lfw.py
ADDED
@@ -0,0 +1,570 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Labeled Faces in the Wild (LFW) dataset
|
2 |
+
|
3 |
+
This dataset is a collection of JPEG pictures of famous people collected
|
4 |
+
over the internet, all details are available on the official website:
|
5 |
+
|
6 |
+
http://vis-www.cs.umass.edu/lfw/
|
7 |
+
"""
|
8 |
+
# Copyright (c) 2011 Olivier Grisel <[email protected]>
|
9 |
+
# License: BSD 3 clause
|
10 |
+
|
11 |
+
import logging
|
12 |
+
from numbers import Integral, Real
|
13 |
+
from os import PathLike, listdir, makedirs, remove
|
14 |
+
from os.path import exists, isdir, join
|
15 |
+
|
16 |
+
import numpy as np
|
17 |
+
from joblib import Memory
|
18 |
+
|
19 |
+
from ..utils import Bunch
|
20 |
+
from ..utils._param_validation import Hidden, Interval, StrOptions, validate_params
|
21 |
+
from ._base import (
|
22 |
+
RemoteFileMetadata,
|
23 |
+
_fetch_remote,
|
24 |
+
get_data_home,
|
25 |
+
load_descr,
|
26 |
+
)
|
27 |
+
|
28 |
+
logger = logging.getLogger(__name__)
|
29 |
+
|
30 |
+
# The original data can be found in:
|
31 |
+
# http://vis-www.cs.umass.edu/lfw/lfw.tgz
|
32 |
+
ARCHIVE = RemoteFileMetadata(
|
33 |
+
filename="lfw.tgz",
|
34 |
+
url="https://ndownloader.figshare.com/files/5976018",
|
35 |
+
checksum="055f7d9c632d7370e6fb4afc7468d40f970c34a80d4c6f50ffec63f5a8d536c0",
|
36 |
+
)
|
37 |
+
|
38 |
+
# The original funneled data can be found in:
|
39 |
+
# http://vis-www.cs.umass.edu/lfw/lfw-funneled.tgz
|
40 |
+
FUNNELED_ARCHIVE = RemoteFileMetadata(
|
41 |
+
filename="lfw-funneled.tgz",
|
42 |
+
url="https://ndownloader.figshare.com/files/5976015",
|
43 |
+
checksum="b47c8422c8cded889dc5a13418c4bc2abbda121092b3533a83306f90d900100a",
|
44 |
+
)
|
45 |
+
|
46 |
+
# The original target data can be found in:
|
47 |
+
# http://vis-www.cs.umass.edu/lfw/pairsDevTrain.txt',
|
48 |
+
# http://vis-www.cs.umass.edu/lfw/pairsDevTest.txt',
|
49 |
+
# http://vis-www.cs.umass.edu/lfw/pairs.txt',
|
50 |
+
TARGETS = (
|
51 |
+
RemoteFileMetadata(
|
52 |
+
filename="pairsDevTrain.txt",
|
53 |
+
url="https://ndownloader.figshare.com/files/5976012",
|
54 |
+
checksum="1d454dada7dfeca0e7eab6f65dc4e97a6312d44cf142207be28d688be92aabfa",
|
55 |
+
),
|
56 |
+
RemoteFileMetadata(
|
57 |
+
filename="pairsDevTest.txt",
|
58 |
+
url="https://ndownloader.figshare.com/files/5976009",
|
59 |
+
checksum="7cb06600ea8b2814ac26e946201cdb304296262aad67d046a16a7ec85d0ff87c",
|
60 |
+
),
|
61 |
+
RemoteFileMetadata(
|
62 |
+
filename="pairs.txt",
|
63 |
+
url="https://ndownloader.figshare.com/files/5976006",
|
64 |
+
checksum="ea42330c62c92989f9d7c03237ed5d591365e89b3e649747777b70e692dc1592",
|
65 |
+
),
|
66 |
+
)
|
67 |
+
|
68 |
+
|
69 |
+
#
|
70 |
+
# Common private utilities for data fetching from the original LFW website
|
71 |
+
# local disk caching, and image decoding.
|
72 |
+
#
|
73 |
+
|
74 |
+
|
75 |
+
def _check_fetch_lfw(data_home=None, funneled=True, download_if_missing=True):
|
76 |
+
"""Helper function to download any missing LFW data"""
|
77 |
+
|
78 |
+
data_home = get_data_home(data_home=data_home)
|
79 |
+
lfw_home = join(data_home, "lfw_home")
|
80 |
+
|
81 |
+
if not exists(lfw_home):
|
82 |
+
makedirs(lfw_home)
|
83 |
+
|
84 |
+
for target in TARGETS:
|
85 |
+
target_filepath = join(lfw_home, target.filename)
|
86 |
+
if not exists(target_filepath):
|
87 |
+
if download_if_missing:
|
88 |
+
logger.info("Downloading LFW metadata: %s", target.url)
|
89 |
+
_fetch_remote(target, dirname=lfw_home)
|
90 |
+
else:
|
91 |
+
raise OSError("%s is missing" % target_filepath)
|
92 |
+
|
93 |
+
if funneled:
|
94 |
+
data_folder_path = join(lfw_home, "lfw_funneled")
|
95 |
+
archive = FUNNELED_ARCHIVE
|
96 |
+
else:
|
97 |
+
data_folder_path = join(lfw_home, "lfw")
|
98 |
+
archive = ARCHIVE
|
99 |
+
|
100 |
+
if not exists(data_folder_path):
|
101 |
+
archive_path = join(lfw_home, archive.filename)
|
102 |
+
if not exists(archive_path):
|
103 |
+
if download_if_missing:
|
104 |
+
logger.info("Downloading LFW data (~200MB): %s", archive.url)
|
105 |
+
_fetch_remote(archive, dirname=lfw_home)
|
106 |
+
else:
|
107 |
+
raise OSError("%s is missing" % archive_path)
|
108 |
+
|
109 |
+
import tarfile
|
110 |
+
|
111 |
+
logger.debug("Decompressing the data archive to %s", data_folder_path)
|
112 |
+
tarfile.open(archive_path, "r:gz").extractall(path=lfw_home)
|
113 |
+
remove(archive_path)
|
114 |
+
|
115 |
+
return lfw_home, data_folder_path
|
116 |
+
|
117 |
+
|
118 |
+
def _load_imgs(file_paths, slice_, color, resize):
|
119 |
+
"""Internally used to load images"""
|
120 |
+
try:
|
121 |
+
from PIL import Image
|
122 |
+
except ImportError:
|
123 |
+
raise ImportError(
|
124 |
+
"The Python Imaging Library (PIL) is required to load data "
|
125 |
+
"from jpeg files. Please refer to "
|
126 |
+
"https://pillow.readthedocs.io/en/stable/installation.html "
|
127 |
+
"for installing PIL."
|
128 |
+
)
|
129 |
+
|
130 |
+
# compute the portion of the images to load to respect the slice_ parameter
|
131 |
+
# given by the caller
|
132 |
+
default_slice = (slice(0, 250), slice(0, 250))
|
133 |
+
if slice_ is None:
|
134 |
+
slice_ = default_slice
|
135 |
+
else:
|
136 |
+
slice_ = tuple(s or ds for s, ds in zip(slice_, default_slice))
|
137 |
+
|
138 |
+
h_slice, w_slice = slice_
|
139 |
+
h = (h_slice.stop - h_slice.start) // (h_slice.step or 1)
|
140 |
+
w = (w_slice.stop - w_slice.start) // (w_slice.step or 1)
|
141 |
+
|
142 |
+
if resize is not None:
|
143 |
+
resize = float(resize)
|
144 |
+
h = int(resize * h)
|
145 |
+
w = int(resize * w)
|
146 |
+
|
147 |
+
# allocate some contiguous memory to host the decoded image slices
|
148 |
+
n_faces = len(file_paths)
|
149 |
+
if not color:
|
150 |
+
faces = np.zeros((n_faces, h, w), dtype=np.float32)
|
151 |
+
else:
|
152 |
+
faces = np.zeros((n_faces, h, w, 3), dtype=np.float32)
|
153 |
+
|
154 |
+
# iterate over the collected file path to load the jpeg files as numpy
|
155 |
+
# arrays
|
156 |
+
for i, file_path in enumerate(file_paths):
|
157 |
+
if i % 1000 == 0:
|
158 |
+
logger.debug("Loading face #%05d / %05d", i + 1, n_faces)
|
159 |
+
|
160 |
+
# Checks if jpeg reading worked. Refer to issue #3594 for more
|
161 |
+
# details.
|
162 |
+
pil_img = Image.open(file_path)
|
163 |
+
pil_img = pil_img.crop(
|
164 |
+
(w_slice.start, h_slice.start, w_slice.stop, h_slice.stop)
|
165 |
+
)
|
166 |
+
if resize is not None:
|
167 |
+
pil_img = pil_img.resize((w, h))
|
168 |
+
face = np.asarray(pil_img, dtype=np.float32)
|
169 |
+
|
170 |
+
if face.ndim == 0:
|
171 |
+
raise RuntimeError(
|
172 |
+
"Failed to read the image file %s, "
|
173 |
+
"Please make sure that libjpeg is installed" % file_path
|
174 |
+
)
|
175 |
+
|
176 |
+
face /= 255.0 # scale uint8 coded colors to the [0.0, 1.0] floats
|
177 |
+
if not color:
|
178 |
+
# average the color channels to compute a gray levels
|
179 |
+
# representation
|
180 |
+
face = face.mean(axis=2)
|
181 |
+
|
182 |
+
faces[i, ...] = face
|
183 |
+
|
184 |
+
return faces
|
185 |
+
|
186 |
+
|
187 |
+
#
|
188 |
+
# Task #1: Face Identification on picture with names
|
189 |
+
#
|
190 |
+
|
191 |
+
|
192 |
+
def _fetch_lfw_people(
|
193 |
+
data_folder_path, slice_=None, color=False, resize=None, min_faces_per_person=0
|
194 |
+
):
|
195 |
+
"""Perform the actual data loading for the lfw people dataset
|
196 |
+
|
197 |
+
This operation is meant to be cached by a joblib wrapper.
|
198 |
+
"""
|
199 |
+
# scan the data folder content to retain people with more that
|
200 |
+
# `min_faces_per_person` face pictures
|
201 |
+
person_names, file_paths = [], []
|
202 |
+
for person_name in sorted(listdir(data_folder_path)):
|
203 |
+
folder_path = join(data_folder_path, person_name)
|
204 |
+
if not isdir(folder_path):
|
205 |
+
continue
|
206 |
+
paths = [join(folder_path, f) for f in sorted(listdir(folder_path))]
|
207 |
+
n_pictures = len(paths)
|
208 |
+
if n_pictures >= min_faces_per_person:
|
209 |
+
person_name = person_name.replace("_", " ")
|
210 |
+
person_names.extend([person_name] * n_pictures)
|
211 |
+
file_paths.extend(paths)
|
212 |
+
|
213 |
+
n_faces = len(file_paths)
|
214 |
+
if n_faces == 0:
|
215 |
+
raise ValueError(
|
216 |
+
"min_faces_per_person=%d is too restrictive" % min_faces_per_person
|
217 |
+
)
|
218 |
+
|
219 |
+
target_names = np.unique(person_names)
|
220 |
+
target = np.searchsorted(target_names, person_names)
|
221 |
+
|
222 |
+
faces = _load_imgs(file_paths, slice_, color, resize)
|
223 |
+
|
224 |
+
# shuffle the faces with a deterministic RNG scheme to avoid having
|
225 |
+
# all faces of the same person in a row, as it would break some
|
226 |
+
# cross validation and learning algorithms such as SGD and online
|
227 |
+
# k-means that make an IID assumption
|
228 |
+
|
229 |
+
indices = np.arange(n_faces)
|
230 |
+
np.random.RandomState(42).shuffle(indices)
|
231 |
+
faces, target = faces[indices], target[indices]
|
232 |
+
return faces, target, target_names
|
233 |
+
|
234 |
+
|
235 |
+
@validate_params(
|
236 |
+
{
|
237 |
+
"data_home": [str, PathLike, None],
|
238 |
+
"funneled": ["boolean"],
|
239 |
+
"resize": [Interval(Real, 0, None, closed="neither"), None],
|
240 |
+
"min_faces_per_person": [Interval(Integral, 0, None, closed="left"), None],
|
241 |
+
"color": ["boolean"],
|
242 |
+
"slice_": [tuple, Hidden(None)],
|
243 |
+
"download_if_missing": ["boolean"],
|
244 |
+
"return_X_y": ["boolean"],
|
245 |
+
},
|
246 |
+
prefer_skip_nested_validation=True,
|
247 |
+
)
|
248 |
+
def fetch_lfw_people(
|
249 |
+
*,
|
250 |
+
data_home=None,
|
251 |
+
funneled=True,
|
252 |
+
resize=0.5,
|
253 |
+
min_faces_per_person=0,
|
254 |
+
color=False,
|
255 |
+
slice_=(slice(70, 195), slice(78, 172)),
|
256 |
+
download_if_missing=True,
|
257 |
+
return_X_y=False,
|
258 |
+
):
|
259 |
+
"""Load the Labeled Faces in the Wild (LFW) people dataset \
|
260 |
+
(classification).
|
261 |
+
|
262 |
+
Download it if necessary.
|
263 |
+
|
264 |
+
================= =======================
|
265 |
+
Classes 5749
|
266 |
+
Samples total 13233
|
267 |
+
Dimensionality 5828
|
268 |
+
Features real, between 0 and 255
|
269 |
+
================= =======================
|
270 |
+
|
271 |
+
Read more in the :ref:`User Guide <labeled_faces_in_the_wild_dataset>`.
|
272 |
+
|
273 |
+
Parameters
|
274 |
+
----------
|
275 |
+
data_home : str or path-like, default=None
|
276 |
+
Specify another download and cache folder for the datasets. By default
|
277 |
+
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
|
278 |
+
|
279 |
+
funneled : bool, default=True
|
280 |
+
Download and use the funneled variant of the dataset.
|
281 |
+
|
282 |
+
resize : float or None, default=0.5
|
283 |
+
Ratio used to resize the each face picture. If `None`, no resizing is
|
284 |
+
performed.
|
285 |
+
|
286 |
+
min_faces_per_person : int, default=None
|
287 |
+
The extracted dataset will only retain pictures of people that have at
|
288 |
+
least `min_faces_per_person` different pictures.
|
289 |
+
|
290 |
+
color : bool, default=False
|
291 |
+
Keep the 3 RGB channels instead of averaging them to a single
|
292 |
+
gray level channel. If color is True the shape of the data has
|
293 |
+
one more dimension than the shape with color = False.
|
294 |
+
|
295 |
+
slice_ : tuple of slice, default=(slice(70, 195), slice(78, 172))
|
296 |
+
Provide a custom 2D slice (height, width) to extract the
|
297 |
+
'interesting' part of the jpeg files and avoid use statistical
|
298 |
+
correlation from the background.
|
299 |
+
|
300 |
+
download_if_missing : bool, default=True
|
301 |
+
If False, raise an OSError if the data is not locally available
|
302 |
+
instead of trying to download the data from the source site.
|
303 |
+
|
304 |
+
return_X_y : bool, default=False
|
305 |
+
If True, returns ``(dataset.data, dataset.target)`` instead of a Bunch
|
306 |
+
object. See below for more information about the `dataset.data` and
|
307 |
+
`dataset.target` object.
|
308 |
+
|
309 |
+
.. versionadded:: 0.20
|
310 |
+
|
311 |
+
Returns
|
312 |
+
-------
|
313 |
+
dataset : :class:`~sklearn.utils.Bunch`
|
314 |
+
Dictionary-like object, with the following attributes.
|
315 |
+
|
316 |
+
data : numpy array of shape (13233, 2914)
|
317 |
+
Each row corresponds to a ravelled face image
|
318 |
+
of original size 62 x 47 pixels.
|
319 |
+
Changing the ``slice_`` or resize parameters will change the
|
320 |
+
shape of the output.
|
321 |
+
images : numpy array of shape (13233, 62, 47)
|
322 |
+
Each row is a face image corresponding to one of the 5749 people in
|
323 |
+
the dataset. Changing the ``slice_``
|
324 |
+
or resize parameters will change the shape of the output.
|
325 |
+
target : numpy array of shape (13233,)
|
326 |
+
Labels associated to each face image.
|
327 |
+
Those labels range from 0-5748 and correspond to the person IDs.
|
328 |
+
target_names : numpy array of shape (5749,)
|
329 |
+
Names of all persons in the dataset.
|
330 |
+
Position in array corresponds to the person ID in the target array.
|
331 |
+
DESCR : str
|
332 |
+
Description of the Labeled Faces in the Wild (LFW) dataset.
|
333 |
+
|
334 |
+
(data, target) : tuple if ``return_X_y`` is True
|
335 |
+
A tuple of two ndarray. The first containing a 2D array of
|
336 |
+
shape (n_samples, n_features) with each row representing one
|
337 |
+
sample and each column representing the features. The second
|
338 |
+
ndarray of shape (n_samples,) containing the target samples.
|
339 |
+
|
340 |
+
.. versionadded:: 0.20
|
341 |
+
"""
|
342 |
+
lfw_home, data_folder_path = _check_fetch_lfw(
|
343 |
+
data_home=data_home, funneled=funneled, download_if_missing=download_if_missing
|
344 |
+
)
|
345 |
+
logger.debug("Loading LFW people faces from %s", lfw_home)
|
346 |
+
|
347 |
+
# wrap the loader in a memoizing function that will return memmaped data
|
348 |
+
# arrays for optimal memory usage
|
349 |
+
m = Memory(location=lfw_home, compress=6, verbose=0)
|
350 |
+
load_func = m.cache(_fetch_lfw_people)
|
351 |
+
|
352 |
+
# load and memoize the pairs as np arrays
|
353 |
+
faces, target, target_names = load_func(
|
354 |
+
data_folder_path,
|
355 |
+
resize=resize,
|
356 |
+
min_faces_per_person=min_faces_per_person,
|
357 |
+
color=color,
|
358 |
+
slice_=slice_,
|
359 |
+
)
|
360 |
+
|
361 |
+
X = faces.reshape(len(faces), -1)
|
362 |
+
|
363 |
+
fdescr = load_descr("lfw.rst")
|
364 |
+
|
365 |
+
if return_X_y:
|
366 |
+
return X, target
|
367 |
+
|
368 |
+
# pack the results as a Bunch instance
|
369 |
+
return Bunch(
|
370 |
+
data=X, images=faces, target=target, target_names=target_names, DESCR=fdescr
|
371 |
+
)
|
372 |
+
|
373 |
+
|
374 |
+
#
|
375 |
+
# Task #2: Face Verification on pairs of face pictures
|
376 |
+
#
|
377 |
+
|
378 |
+
|
379 |
+
def _fetch_lfw_pairs(
|
380 |
+
index_file_path, data_folder_path, slice_=None, color=False, resize=None
|
381 |
+
):
|
382 |
+
"""Perform the actual data loading for the LFW pairs dataset
|
383 |
+
|
384 |
+
This operation is meant to be cached by a joblib wrapper.
|
385 |
+
"""
|
386 |
+
# parse the index file to find the number of pairs to be able to allocate
|
387 |
+
# the right amount of memory before starting to decode the jpeg files
|
388 |
+
with open(index_file_path, "rb") as index_file:
|
389 |
+
split_lines = [ln.decode().strip().split("\t") for ln in index_file]
|
390 |
+
pair_specs = [sl for sl in split_lines if len(sl) > 2]
|
391 |
+
n_pairs = len(pair_specs)
|
392 |
+
|
393 |
+
# iterating over the metadata lines for each pair to find the filename to
|
394 |
+
# decode and load in memory
|
395 |
+
target = np.zeros(n_pairs, dtype=int)
|
396 |
+
file_paths = list()
|
397 |
+
for i, components in enumerate(pair_specs):
|
398 |
+
if len(components) == 3:
|
399 |
+
target[i] = 1
|
400 |
+
pair = (
|
401 |
+
(components[0], int(components[1]) - 1),
|
402 |
+
(components[0], int(components[2]) - 1),
|
403 |
+
)
|
404 |
+
elif len(components) == 4:
|
405 |
+
target[i] = 0
|
406 |
+
pair = (
|
407 |
+
(components[0], int(components[1]) - 1),
|
408 |
+
(components[2], int(components[3]) - 1),
|
409 |
+
)
|
410 |
+
else:
|
411 |
+
raise ValueError("invalid line %d: %r" % (i + 1, components))
|
412 |
+
for j, (name, idx) in enumerate(pair):
|
413 |
+
try:
|
414 |
+
person_folder = join(data_folder_path, name)
|
415 |
+
except TypeError:
|
416 |
+
person_folder = join(data_folder_path, str(name, "UTF-8"))
|
417 |
+
filenames = list(sorted(listdir(person_folder)))
|
418 |
+
file_path = join(person_folder, filenames[idx])
|
419 |
+
file_paths.append(file_path)
|
420 |
+
|
421 |
+
pairs = _load_imgs(file_paths, slice_, color, resize)
|
422 |
+
shape = list(pairs.shape)
|
423 |
+
n_faces = shape.pop(0)
|
424 |
+
shape.insert(0, 2)
|
425 |
+
shape.insert(0, n_faces // 2)
|
426 |
+
pairs.shape = shape
|
427 |
+
|
428 |
+
return pairs, target, np.array(["Different persons", "Same person"])
|
429 |
+
|
430 |
+
|
431 |
+
@validate_params(
|
432 |
+
{
|
433 |
+
"subset": [StrOptions({"train", "test", "10_folds"})],
|
434 |
+
"data_home": [str, PathLike, None],
|
435 |
+
"funneled": ["boolean"],
|
436 |
+
"resize": [Interval(Real, 0, None, closed="neither"), None],
|
437 |
+
"color": ["boolean"],
|
438 |
+
"slice_": [tuple, Hidden(None)],
|
439 |
+
"download_if_missing": ["boolean"],
|
440 |
+
},
|
441 |
+
prefer_skip_nested_validation=True,
|
442 |
+
)
|
443 |
+
def fetch_lfw_pairs(
|
444 |
+
*,
|
445 |
+
subset="train",
|
446 |
+
data_home=None,
|
447 |
+
funneled=True,
|
448 |
+
resize=0.5,
|
449 |
+
color=False,
|
450 |
+
slice_=(slice(70, 195), slice(78, 172)),
|
451 |
+
download_if_missing=True,
|
452 |
+
):
|
453 |
+
"""Load the Labeled Faces in the Wild (LFW) pairs dataset (classification).
|
454 |
+
|
455 |
+
Download it if necessary.
|
456 |
+
|
457 |
+
================= =======================
|
458 |
+
Classes 2
|
459 |
+
Samples total 13233
|
460 |
+
Dimensionality 5828
|
461 |
+
Features real, between 0 and 255
|
462 |
+
================= =======================
|
463 |
+
|
464 |
+
In the official `README.txt`_ this task is described as the
|
465 |
+
"Restricted" task. As I am not sure as to implement the
|
466 |
+
"Unrestricted" variant correctly, I left it as unsupported for now.
|
467 |
+
|
468 |
+
.. _`README.txt`: http://vis-www.cs.umass.edu/lfw/README.txt
|
469 |
+
|
470 |
+
The original images are 250 x 250 pixels, but the default slice and resize
|
471 |
+
arguments reduce them to 62 x 47.
|
472 |
+
|
473 |
+
Read more in the :ref:`User Guide <labeled_faces_in_the_wild_dataset>`.
|
474 |
+
|
475 |
+
Parameters
|
476 |
+
----------
|
477 |
+
subset : {'train', 'test', '10_folds'}, default='train'
|
478 |
+
Select the dataset to load: 'train' for the development training
|
479 |
+
set, 'test' for the development test set, and '10_folds' for the
|
480 |
+
official evaluation set that is meant to be used with a 10-folds
|
481 |
+
cross validation.
|
482 |
+
|
483 |
+
data_home : str or path-like, default=None
|
484 |
+
Specify another download and cache folder for the datasets. By
|
485 |
+
default all scikit-learn data is stored in '~/scikit_learn_data'
|
486 |
+
subfolders.
|
487 |
+
|
488 |
+
funneled : bool, default=True
|
489 |
+
Download and use the funneled variant of the dataset.
|
490 |
+
|
491 |
+
resize : float, default=0.5
|
492 |
+
Ratio used to resize the each face picture.
|
493 |
+
|
494 |
+
color : bool, default=False
|
495 |
+
Keep the 3 RGB channels instead of averaging them to a single
|
496 |
+
gray level channel. If color is True the shape of the data has
|
497 |
+
one more dimension than the shape with color = False.
|
498 |
+
|
499 |
+
slice_ : tuple of slice, default=(slice(70, 195), slice(78, 172))
|
500 |
+
Provide a custom 2D slice (height, width) to extract the
|
501 |
+
'interesting' part of the jpeg files and avoid use statistical
|
502 |
+
correlation from the background.
|
503 |
+
|
504 |
+
download_if_missing : bool, default=True
|
505 |
+
If False, raise an OSError if the data is not locally available
|
506 |
+
instead of trying to download the data from the source site.
|
507 |
+
|
508 |
+
Returns
|
509 |
+
-------
|
510 |
+
data : :class:`~sklearn.utils.Bunch`
|
511 |
+
Dictionary-like object, with the following attributes.
|
512 |
+
|
513 |
+
data : ndarray of shape (2200, 5828). Shape depends on ``subset``.
|
514 |
+
Each row corresponds to 2 ravel'd face images
|
515 |
+
of original size 62 x 47 pixels.
|
516 |
+
Changing the ``slice_``, ``resize`` or ``subset`` parameters
|
517 |
+
will change the shape of the output.
|
518 |
+
pairs : ndarray of shape (2200, 2, 62, 47). Shape depends on ``subset``
|
519 |
+
Each row has 2 face images corresponding
|
520 |
+
to same or different person from the dataset
|
521 |
+
containing 5749 people. Changing the ``slice_``,
|
522 |
+
``resize`` or ``subset`` parameters will change the shape of the
|
523 |
+
output.
|
524 |
+
target : numpy array of shape (2200,). Shape depends on ``subset``.
|
525 |
+
Labels associated to each pair of images.
|
526 |
+
The two label values being different persons or the same person.
|
527 |
+
target_names : numpy array of shape (2,)
|
528 |
+
Explains the target values of the target array.
|
529 |
+
0 corresponds to "Different person", 1 corresponds to "same person".
|
530 |
+
DESCR : str
|
531 |
+
Description of the Labeled Faces in the Wild (LFW) dataset.
|
532 |
+
"""
|
533 |
+
lfw_home, data_folder_path = _check_fetch_lfw(
|
534 |
+
data_home=data_home, funneled=funneled, download_if_missing=download_if_missing
|
535 |
+
)
|
536 |
+
logger.debug("Loading %s LFW pairs from %s", subset, lfw_home)
|
537 |
+
|
538 |
+
# wrap the loader in a memoizing function that will return memmaped data
|
539 |
+
# arrays for optimal memory usage
|
540 |
+
m = Memory(location=lfw_home, compress=6, verbose=0)
|
541 |
+
load_func = m.cache(_fetch_lfw_pairs)
|
542 |
+
|
543 |
+
# select the right metadata file according to the requested subset
|
544 |
+
label_filenames = {
|
545 |
+
"train": "pairsDevTrain.txt",
|
546 |
+
"test": "pairsDevTest.txt",
|
547 |
+
"10_folds": "pairs.txt",
|
548 |
+
}
|
549 |
+
if subset not in label_filenames:
|
550 |
+
raise ValueError(
|
551 |
+
"subset='%s' is invalid: should be one of %r"
|
552 |
+
% (subset, list(sorted(label_filenames.keys())))
|
553 |
+
)
|
554 |
+
index_file_path = join(lfw_home, label_filenames[subset])
|
555 |
+
|
556 |
+
# load and memoize the pairs as np arrays
|
557 |
+
pairs, target, target_names = load_func(
|
558 |
+
index_file_path, data_folder_path, resize=resize, color=color, slice_=slice_
|
559 |
+
)
|
560 |
+
|
561 |
+
fdescr = load_descr("lfw.rst")
|
562 |
+
|
563 |
+
# pack the results as a Bunch instance
|
564 |
+
return Bunch(
|
565 |
+
data=pairs.reshape(len(pairs), -1),
|
566 |
+
pairs=pairs,
|
567 |
+
target=target,
|
568 |
+
target_names=target_names,
|
569 |
+
DESCR=fdescr,
|
570 |
+
)
|
venv/lib/python3.10/site-packages/sklearn/datasets/_rcv1.py
ADDED
@@ -0,0 +1,306 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""RCV1 dataset.
|
2 |
+
|
3 |
+
The dataset page is available at
|
4 |
+
|
5 |
+
http://jmlr.csail.mit.edu/papers/volume5/lewis04a/
|
6 |
+
"""
|
7 |
+
|
8 |
+
# Author: Tom Dupre la Tour
|
9 |
+
# License: BSD 3 clause
|
10 |
+
|
11 |
+
import logging
|
12 |
+
from gzip import GzipFile
|
13 |
+
from os import PathLike, makedirs, remove
|
14 |
+
from os.path import exists, join
|
15 |
+
|
16 |
+
import joblib
|
17 |
+
import numpy as np
|
18 |
+
import scipy.sparse as sp
|
19 |
+
|
20 |
+
from ..utils import Bunch
|
21 |
+
from ..utils import shuffle as shuffle_
|
22 |
+
from ..utils._param_validation import StrOptions, validate_params
|
23 |
+
from . import get_data_home
|
24 |
+
from ._base import RemoteFileMetadata, _fetch_remote, _pkl_filepath, load_descr
|
25 |
+
from ._svmlight_format_io import load_svmlight_files
|
26 |
+
|
27 |
+
# The original vectorized data can be found at:
|
28 |
+
# http://www.ai.mit.edu/projects/jmlr/papers/volume5/lewis04a/a13-vector-files/lyrl2004_vectors_test_pt0.dat.gz
|
29 |
+
# http://www.ai.mit.edu/projects/jmlr/papers/volume5/lewis04a/a13-vector-files/lyrl2004_vectors_test_pt1.dat.gz
|
30 |
+
# http://www.ai.mit.edu/projects/jmlr/papers/volume5/lewis04a/a13-vector-files/lyrl2004_vectors_test_pt2.dat.gz
|
31 |
+
# http://www.ai.mit.edu/projects/jmlr/papers/volume5/lewis04a/a13-vector-files/lyrl2004_vectors_test_pt3.dat.gz
|
32 |
+
# http://www.ai.mit.edu/projects/jmlr/papers/volume5/lewis04a/a13-vector-files/lyrl2004_vectors_train.dat.gz
|
33 |
+
# while the original stemmed token files can be found
|
34 |
+
# in the README, section B.12.i.:
|
35 |
+
# http://www.ai.mit.edu/projects/jmlr/papers/volume5/lewis04a/lyrl2004_rcv1v2_README.htm
|
36 |
+
XY_METADATA = (
|
37 |
+
RemoteFileMetadata(
|
38 |
+
url="https://ndownloader.figshare.com/files/5976069",
|
39 |
+
checksum="ed40f7e418d10484091b059703eeb95ae3199fe042891dcec4be6696b9968374",
|
40 |
+
filename="lyrl2004_vectors_test_pt0.dat.gz",
|
41 |
+
),
|
42 |
+
RemoteFileMetadata(
|
43 |
+
url="https://ndownloader.figshare.com/files/5976066",
|
44 |
+
checksum="87700668ae45d45d5ca1ef6ae9bd81ab0f5ec88cc95dcef9ae7838f727a13aa6",
|
45 |
+
filename="lyrl2004_vectors_test_pt1.dat.gz",
|
46 |
+
),
|
47 |
+
RemoteFileMetadata(
|
48 |
+
url="https://ndownloader.figshare.com/files/5976063",
|
49 |
+
checksum="48143ac703cbe33299f7ae9f4995db49a258690f60e5debbff8995c34841c7f5",
|
50 |
+
filename="lyrl2004_vectors_test_pt2.dat.gz",
|
51 |
+
),
|
52 |
+
RemoteFileMetadata(
|
53 |
+
url="https://ndownloader.figshare.com/files/5976060",
|
54 |
+
checksum="dfcb0d658311481523c6e6ca0c3f5a3e1d3d12cde5d7a8ce629a9006ec7dbb39",
|
55 |
+
filename="lyrl2004_vectors_test_pt3.dat.gz",
|
56 |
+
),
|
57 |
+
RemoteFileMetadata(
|
58 |
+
url="https://ndownloader.figshare.com/files/5976057",
|
59 |
+
checksum="5468f656d0ba7a83afc7ad44841cf9a53048a5c083eedc005dcdb5cc768924ae",
|
60 |
+
filename="lyrl2004_vectors_train.dat.gz",
|
61 |
+
),
|
62 |
+
)
|
63 |
+
|
64 |
+
# The original data can be found at:
|
65 |
+
# http://jmlr.csail.mit.edu/papers/volume5/lewis04a/a08-topic-qrels/rcv1-v2.topics.qrels.gz
|
66 |
+
TOPICS_METADATA = RemoteFileMetadata(
|
67 |
+
url="https://ndownloader.figshare.com/files/5976048",
|
68 |
+
checksum="2a98e5e5d8b770bded93afc8930d88299474317fe14181aee1466cc754d0d1c1",
|
69 |
+
filename="rcv1v2.topics.qrels.gz",
|
70 |
+
)
|
71 |
+
|
72 |
+
logger = logging.getLogger(__name__)
|
73 |
+
|
74 |
+
|
75 |
+
@validate_params(
|
76 |
+
{
|
77 |
+
"data_home": [str, PathLike, None],
|
78 |
+
"subset": [StrOptions({"train", "test", "all"})],
|
79 |
+
"download_if_missing": ["boolean"],
|
80 |
+
"random_state": ["random_state"],
|
81 |
+
"shuffle": ["boolean"],
|
82 |
+
"return_X_y": ["boolean"],
|
83 |
+
},
|
84 |
+
prefer_skip_nested_validation=True,
|
85 |
+
)
|
86 |
+
def fetch_rcv1(
|
87 |
+
*,
|
88 |
+
data_home=None,
|
89 |
+
subset="all",
|
90 |
+
download_if_missing=True,
|
91 |
+
random_state=None,
|
92 |
+
shuffle=False,
|
93 |
+
return_X_y=False,
|
94 |
+
):
|
95 |
+
"""Load the RCV1 multilabel dataset (classification).
|
96 |
+
|
97 |
+
Download it if necessary.
|
98 |
+
|
99 |
+
Version: RCV1-v2, vectors, full sets, topics multilabels.
|
100 |
+
|
101 |
+
================= =====================
|
102 |
+
Classes 103
|
103 |
+
Samples total 804414
|
104 |
+
Dimensionality 47236
|
105 |
+
Features real, between 0 and 1
|
106 |
+
================= =====================
|
107 |
+
|
108 |
+
Read more in the :ref:`User Guide <rcv1_dataset>`.
|
109 |
+
|
110 |
+
.. versionadded:: 0.17
|
111 |
+
|
112 |
+
Parameters
|
113 |
+
----------
|
114 |
+
data_home : str or path-like, default=None
|
115 |
+
Specify another download and cache folder for the datasets. By default
|
116 |
+
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
|
117 |
+
|
118 |
+
subset : {'train', 'test', 'all'}, default='all'
|
119 |
+
Select the dataset to load: 'train' for the training set
|
120 |
+
(23149 samples), 'test' for the test set (781265 samples),
|
121 |
+
'all' for both, with the training samples first if shuffle is False.
|
122 |
+
This follows the official LYRL2004 chronological split.
|
123 |
+
|
124 |
+
download_if_missing : bool, default=True
|
125 |
+
If False, raise an OSError if the data is not locally available
|
126 |
+
instead of trying to download the data from the source site.
|
127 |
+
|
128 |
+
random_state : int, RandomState instance or None, default=None
|
129 |
+
Determines random number generation for dataset shuffling. Pass an int
|
130 |
+
for reproducible output across multiple function calls.
|
131 |
+
See :term:`Glossary <random_state>`.
|
132 |
+
|
133 |
+
shuffle : bool, default=False
|
134 |
+
Whether to shuffle dataset.
|
135 |
+
|
136 |
+
return_X_y : bool, default=False
|
137 |
+
If True, returns ``(dataset.data, dataset.target)`` instead of a Bunch
|
138 |
+
object. See below for more information about the `dataset.data` and
|
139 |
+
`dataset.target` object.
|
140 |
+
|
141 |
+
.. versionadded:: 0.20
|
142 |
+
|
143 |
+
Returns
|
144 |
+
-------
|
145 |
+
dataset : :class:`~sklearn.utils.Bunch`
|
146 |
+
Dictionary-like object. Returned only if `return_X_y` is False.
|
147 |
+
`dataset` has the following attributes:
|
148 |
+
|
149 |
+
- data : sparse matrix of shape (804414, 47236), dtype=np.float64
|
150 |
+
The array has 0.16% of non zero values. Will be of CSR format.
|
151 |
+
- target : sparse matrix of shape (804414, 103), dtype=np.uint8
|
152 |
+
Each sample has a value of 1 in its categories, and 0 in others.
|
153 |
+
The array has 3.15% of non zero values. Will be of CSR format.
|
154 |
+
- sample_id : ndarray of shape (804414,), dtype=np.uint32,
|
155 |
+
Identification number of each sample, as ordered in dataset.data.
|
156 |
+
- target_names : ndarray of shape (103,), dtype=object
|
157 |
+
Names of each target (RCV1 topics), as ordered in dataset.target.
|
158 |
+
- DESCR : str
|
159 |
+
Description of the RCV1 dataset.
|
160 |
+
|
161 |
+
(data, target) : tuple
|
162 |
+
A tuple consisting of `dataset.data` and `dataset.target`, as
|
163 |
+
described above. Returned only if `return_X_y` is True.
|
164 |
+
|
165 |
+
.. versionadded:: 0.20
|
166 |
+
"""
|
167 |
+
N_SAMPLES = 804414
|
168 |
+
N_FEATURES = 47236
|
169 |
+
N_CATEGORIES = 103
|
170 |
+
N_TRAIN = 23149
|
171 |
+
|
172 |
+
data_home = get_data_home(data_home=data_home)
|
173 |
+
rcv1_dir = join(data_home, "RCV1")
|
174 |
+
if download_if_missing:
|
175 |
+
if not exists(rcv1_dir):
|
176 |
+
makedirs(rcv1_dir)
|
177 |
+
|
178 |
+
samples_path = _pkl_filepath(rcv1_dir, "samples.pkl")
|
179 |
+
sample_id_path = _pkl_filepath(rcv1_dir, "sample_id.pkl")
|
180 |
+
sample_topics_path = _pkl_filepath(rcv1_dir, "sample_topics.pkl")
|
181 |
+
topics_path = _pkl_filepath(rcv1_dir, "topics_names.pkl")
|
182 |
+
|
183 |
+
# load data (X) and sample_id
|
184 |
+
if download_if_missing and (not exists(samples_path) or not exists(sample_id_path)):
|
185 |
+
files = []
|
186 |
+
for each in XY_METADATA:
|
187 |
+
logger.info("Downloading %s" % each.url)
|
188 |
+
file_path = _fetch_remote(each, dirname=rcv1_dir)
|
189 |
+
files.append(GzipFile(filename=file_path))
|
190 |
+
|
191 |
+
Xy = load_svmlight_files(files, n_features=N_FEATURES)
|
192 |
+
|
193 |
+
# Training data is before testing data
|
194 |
+
X = sp.vstack([Xy[8], Xy[0], Xy[2], Xy[4], Xy[6]]).tocsr()
|
195 |
+
sample_id = np.hstack((Xy[9], Xy[1], Xy[3], Xy[5], Xy[7]))
|
196 |
+
sample_id = sample_id.astype(np.uint32, copy=False)
|
197 |
+
|
198 |
+
joblib.dump(X, samples_path, compress=9)
|
199 |
+
joblib.dump(sample_id, sample_id_path, compress=9)
|
200 |
+
|
201 |
+
# delete archives
|
202 |
+
for f in files:
|
203 |
+
f.close()
|
204 |
+
remove(f.name)
|
205 |
+
else:
|
206 |
+
X = joblib.load(samples_path)
|
207 |
+
sample_id = joblib.load(sample_id_path)
|
208 |
+
|
209 |
+
# load target (y), categories, and sample_id_bis
|
210 |
+
if download_if_missing and (
|
211 |
+
not exists(sample_topics_path) or not exists(topics_path)
|
212 |
+
):
|
213 |
+
logger.info("Downloading %s" % TOPICS_METADATA.url)
|
214 |
+
topics_archive_path = _fetch_remote(TOPICS_METADATA, dirname=rcv1_dir)
|
215 |
+
|
216 |
+
# parse the target file
|
217 |
+
n_cat = -1
|
218 |
+
n_doc = -1
|
219 |
+
doc_previous = -1
|
220 |
+
y = np.zeros((N_SAMPLES, N_CATEGORIES), dtype=np.uint8)
|
221 |
+
sample_id_bis = np.zeros(N_SAMPLES, dtype=np.int32)
|
222 |
+
category_names = {}
|
223 |
+
with GzipFile(filename=topics_archive_path, mode="rb") as f:
|
224 |
+
for line in f:
|
225 |
+
line_components = line.decode("ascii").split(" ")
|
226 |
+
if len(line_components) == 3:
|
227 |
+
cat, doc, _ = line_components
|
228 |
+
if cat not in category_names:
|
229 |
+
n_cat += 1
|
230 |
+
category_names[cat] = n_cat
|
231 |
+
|
232 |
+
doc = int(doc)
|
233 |
+
if doc != doc_previous:
|
234 |
+
doc_previous = doc
|
235 |
+
n_doc += 1
|
236 |
+
sample_id_bis[n_doc] = doc
|
237 |
+
y[n_doc, category_names[cat]] = 1
|
238 |
+
|
239 |
+
# delete archive
|
240 |
+
remove(topics_archive_path)
|
241 |
+
|
242 |
+
# Samples in X are ordered with sample_id,
|
243 |
+
# whereas in y, they are ordered with sample_id_bis.
|
244 |
+
permutation = _find_permutation(sample_id_bis, sample_id)
|
245 |
+
y = y[permutation, :]
|
246 |
+
|
247 |
+
# save category names in a list, with same order than y
|
248 |
+
categories = np.empty(N_CATEGORIES, dtype=object)
|
249 |
+
for k in category_names.keys():
|
250 |
+
categories[category_names[k]] = k
|
251 |
+
|
252 |
+
# reorder categories in lexicographic order
|
253 |
+
order = np.argsort(categories)
|
254 |
+
categories = categories[order]
|
255 |
+
y = sp.csr_matrix(y[:, order])
|
256 |
+
|
257 |
+
joblib.dump(y, sample_topics_path, compress=9)
|
258 |
+
joblib.dump(categories, topics_path, compress=9)
|
259 |
+
else:
|
260 |
+
y = joblib.load(sample_topics_path)
|
261 |
+
categories = joblib.load(topics_path)
|
262 |
+
|
263 |
+
if subset == "all":
|
264 |
+
pass
|
265 |
+
elif subset == "train":
|
266 |
+
X = X[:N_TRAIN, :]
|
267 |
+
y = y[:N_TRAIN, :]
|
268 |
+
sample_id = sample_id[:N_TRAIN]
|
269 |
+
elif subset == "test":
|
270 |
+
X = X[N_TRAIN:, :]
|
271 |
+
y = y[N_TRAIN:, :]
|
272 |
+
sample_id = sample_id[N_TRAIN:]
|
273 |
+
else:
|
274 |
+
raise ValueError(
|
275 |
+
"Unknown subset parameter. Got '%s' instead of one"
|
276 |
+
" of ('all', 'train', test')" % subset
|
277 |
+
)
|
278 |
+
|
279 |
+
if shuffle:
|
280 |
+
X, y, sample_id = shuffle_(X, y, sample_id, random_state=random_state)
|
281 |
+
|
282 |
+
fdescr = load_descr("rcv1.rst")
|
283 |
+
|
284 |
+
if return_X_y:
|
285 |
+
return X, y
|
286 |
+
|
287 |
+
return Bunch(
|
288 |
+
data=X, target=y, sample_id=sample_id, target_names=categories, DESCR=fdescr
|
289 |
+
)
|
290 |
+
|
291 |
+
|
292 |
+
def _inverse_permutation(p):
|
293 |
+
"""Inverse permutation p."""
|
294 |
+
n = p.size
|
295 |
+
s = np.zeros(n, dtype=np.int32)
|
296 |
+
i = np.arange(n, dtype=np.int32)
|
297 |
+
np.put(s, p, i) # s[p] = i
|
298 |
+
return s
|
299 |
+
|
300 |
+
|
301 |
+
def _find_permutation(a, b):
|
302 |
+
"""Find the permutation from a to b."""
|
303 |
+
t = np.argsort(a)
|
304 |
+
u = np.argsort(b)
|
305 |
+
u_ = _inverse_permutation(u)
|
306 |
+
return t[u_]
|
venv/lib/python3.10/site-packages/sklearn/datasets/_samples_generator.py
ADDED
@@ -0,0 +1,2284 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Generate samples of synthetic data sets.
|
3 |
+
"""
|
4 |
+
|
5 |
+
# Authors: B. Thirion, G. Varoquaux, A. Gramfort, V. Michel, O. Grisel,
|
6 |
+
# G. Louppe, J. Nothman
|
7 |
+
# License: BSD 3 clause
|
8 |
+
|
9 |
+
import array
|
10 |
+
import numbers
|
11 |
+
import warnings
|
12 |
+
from collections.abc import Iterable
|
13 |
+
from numbers import Integral, Real
|
14 |
+
|
15 |
+
import numpy as np
|
16 |
+
import scipy.sparse as sp
|
17 |
+
from scipy import linalg
|
18 |
+
|
19 |
+
from ..preprocessing import MultiLabelBinarizer
|
20 |
+
from ..utils import check_array, check_random_state
|
21 |
+
from ..utils import shuffle as util_shuffle
|
22 |
+
from ..utils._param_validation import Hidden, Interval, StrOptions, validate_params
|
23 |
+
from ..utils.random import sample_without_replacement
|
24 |
+
|
25 |
+
|
26 |
+
def _generate_hypercube(samples, dimensions, rng):
|
27 |
+
"""Returns distinct binary samples of length dimensions."""
|
28 |
+
if dimensions > 30:
|
29 |
+
return np.hstack(
|
30 |
+
[
|
31 |
+
rng.randint(2, size=(samples, dimensions - 30)),
|
32 |
+
_generate_hypercube(samples, 30, rng),
|
33 |
+
]
|
34 |
+
)
|
35 |
+
out = sample_without_replacement(2**dimensions, samples, random_state=rng).astype(
|
36 |
+
dtype=">u4", copy=False
|
37 |
+
)
|
38 |
+
out = np.unpackbits(out.view(">u1")).reshape((-1, 32))[:, -dimensions:]
|
39 |
+
return out
|
40 |
+
|
41 |
+
|
42 |
+
@validate_params(
|
43 |
+
{
|
44 |
+
"n_samples": [Interval(Integral, 1, None, closed="left")],
|
45 |
+
"n_features": [Interval(Integral, 1, None, closed="left")],
|
46 |
+
"n_informative": [Interval(Integral, 1, None, closed="left")],
|
47 |
+
"n_redundant": [Interval(Integral, 0, None, closed="left")],
|
48 |
+
"n_repeated": [Interval(Integral, 0, None, closed="left")],
|
49 |
+
"n_classes": [Interval(Integral, 1, None, closed="left")],
|
50 |
+
"n_clusters_per_class": [Interval(Integral, 1, None, closed="left")],
|
51 |
+
"weights": ["array-like", None],
|
52 |
+
"flip_y": [Interval(Real, 0, 1, closed="both")],
|
53 |
+
"class_sep": [Interval(Real, 0, None, closed="neither")],
|
54 |
+
"hypercube": ["boolean"],
|
55 |
+
"shift": [Interval(Real, None, None, closed="neither"), "array-like", None],
|
56 |
+
"scale": [Interval(Real, 0, None, closed="neither"), "array-like", None],
|
57 |
+
"shuffle": ["boolean"],
|
58 |
+
"random_state": ["random_state"],
|
59 |
+
},
|
60 |
+
prefer_skip_nested_validation=True,
|
61 |
+
)
|
62 |
+
def make_classification(
|
63 |
+
n_samples=100,
|
64 |
+
n_features=20,
|
65 |
+
*,
|
66 |
+
n_informative=2,
|
67 |
+
n_redundant=2,
|
68 |
+
n_repeated=0,
|
69 |
+
n_classes=2,
|
70 |
+
n_clusters_per_class=2,
|
71 |
+
weights=None,
|
72 |
+
flip_y=0.01,
|
73 |
+
class_sep=1.0,
|
74 |
+
hypercube=True,
|
75 |
+
shift=0.0,
|
76 |
+
scale=1.0,
|
77 |
+
shuffle=True,
|
78 |
+
random_state=None,
|
79 |
+
):
|
80 |
+
"""Generate a random n-class classification problem.
|
81 |
+
|
82 |
+
This initially creates clusters of points normally distributed (std=1)
|
83 |
+
about vertices of an ``n_informative``-dimensional hypercube with sides of
|
84 |
+
length ``2*class_sep`` and assigns an equal number of clusters to each
|
85 |
+
class. It introduces interdependence between these features and adds
|
86 |
+
various types of further noise to the data.
|
87 |
+
|
88 |
+
Without shuffling, ``X`` horizontally stacks features in the following
|
89 |
+
order: the primary ``n_informative`` features, followed by ``n_redundant``
|
90 |
+
linear combinations of the informative features, followed by ``n_repeated``
|
91 |
+
duplicates, drawn randomly with replacement from the informative and
|
92 |
+
redundant features. The remaining features are filled with random noise.
|
93 |
+
Thus, without shuffling, all useful features are contained in the columns
|
94 |
+
``X[:, :n_informative + n_redundant + n_repeated]``.
|
95 |
+
|
96 |
+
For an example of usage, see
|
97 |
+
:ref:`sphx_glr_auto_examples_datasets_plot_random_dataset.py`.
|
98 |
+
|
99 |
+
Read more in the :ref:`User Guide <sample_generators>`.
|
100 |
+
|
101 |
+
Parameters
|
102 |
+
----------
|
103 |
+
n_samples : int, default=100
|
104 |
+
The number of samples.
|
105 |
+
|
106 |
+
n_features : int, default=20
|
107 |
+
The total number of features. These comprise ``n_informative``
|
108 |
+
informative features, ``n_redundant`` redundant features,
|
109 |
+
``n_repeated`` duplicated features and
|
110 |
+
``n_features-n_informative-n_redundant-n_repeated`` useless features
|
111 |
+
drawn at random.
|
112 |
+
|
113 |
+
n_informative : int, default=2
|
114 |
+
The number of informative features. Each class is composed of a number
|
115 |
+
of gaussian clusters each located around the vertices of a hypercube
|
116 |
+
in a subspace of dimension ``n_informative``. For each cluster,
|
117 |
+
informative features are drawn independently from N(0, 1) and then
|
118 |
+
randomly linearly combined within each cluster in order to add
|
119 |
+
covariance. The clusters are then placed on the vertices of the
|
120 |
+
hypercube.
|
121 |
+
|
122 |
+
n_redundant : int, default=2
|
123 |
+
The number of redundant features. These features are generated as
|
124 |
+
random linear combinations of the informative features.
|
125 |
+
|
126 |
+
n_repeated : int, default=0
|
127 |
+
The number of duplicated features, drawn randomly from the informative
|
128 |
+
and the redundant features.
|
129 |
+
|
130 |
+
n_classes : int, default=2
|
131 |
+
The number of classes (or labels) of the classification problem.
|
132 |
+
|
133 |
+
n_clusters_per_class : int, default=2
|
134 |
+
The number of clusters per class.
|
135 |
+
|
136 |
+
weights : array-like of shape (n_classes,) or (n_classes - 1,),\
|
137 |
+
default=None
|
138 |
+
The proportions of samples assigned to each class. If None, then
|
139 |
+
classes are balanced. Note that if ``len(weights) == n_classes - 1``,
|
140 |
+
then the last class weight is automatically inferred.
|
141 |
+
More than ``n_samples`` samples may be returned if the sum of
|
142 |
+
``weights`` exceeds 1. Note that the actual class proportions will
|
143 |
+
not exactly match ``weights`` when ``flip_y`` isn't 0.
|
144 |
+
|
145 |
+
flip_y : float, default=0.01
|
146 |
+
The fraction of samples whose class is assigned randomly. Larger
|
147 |
+
values introduce noise in the labels and make the classification
|
148 |
+
task harder. Note that the default setting flip_y > 0 might lead
|
149 |
+
to less than ``n_classes`` in y in some cases.
|
150 |
+
|
151 |
+
class_sep : float, default=1.0
|
152 |
+
The factor multiplying the hypercube size. Larger values spread
|
153 |
+
out the clusters/classes and make the classification task easier.
|
154 |
+
|
155 |
+
hypercube : bool, default=True
|
156 |
+
If True, the clusters are put on the vertices of a hypercube. If
|
157 |
+
False, the clusters are put on the vertices of a random polytope.
|
158 |
+
|
159 |
+
shift : float, ndarray of shape (n_features,) or None, default=0.0
|
160 |
+
Shift features by the specified value. If None, then features
|
161 |
+
are shifted by a random value drawn in [-class_sep, class_sep].
|
162 |
+
|
163 |
+
scale : float, ndarray of shape (n_features,) or None, default=1.0
|
164 |
+
Multiply features by the specified value. If None, then features
|
165 |
+
are scaled by a random value drawn in [1, 100]. Note that scaling
|
166 |
+
happens after shifting.
|
167 |
+
|
168 |
+
shuffle : bool, default=True
|
169 |
+
Shuffle the samples and the features.
|
170 |
+
|
171 |
+
random_state : int, RandomState instance or None, default=None
|
172 |
+
Determines random number generation for dataset creation. Pass an int
|
173 |
+
for reproducible output across multiple function calls.
|
174 |
+
See :term:`Glossary <random_state>`.
|
175 |
+
|
176 |
+
Returns
|
177 |
+
-------
|
178 |
+
X : ndarray of shape (n_samples, n_features)
|
179 |
+
The generated samples.
|
180 |
+
|
181 |
+
y : ndarray of shape (n_samples,)
|
182 |
+
The integer labels for class membership of each sample.
|
183 |
+
|
184 |
+
See Also
|
185 |
+
--------
|
186 |
+
make_blobs : Simplified variant.
|
187 |
+
make_multilabel_classification : Unrelated generator for multilabel tasks.
|
188 |
+
|
189 |
+
Notes
|
190 |
+
-----
|
191 |
+
The algorithm is adapted from Guyon [1] and was designed to generate
|
192 |
+
the "Madelon" dataset.
|
193 |
+
|
194 |
+
References
|
195 |
+
----------
|
196 |
+
.. [1] I. Guyon, "Design of experiments for the NIPS 2003 variable
|
197 |
+
selection benchmark", 2003.
|
198 |
+
|
199 |
+
Examples
|
200 |
+
--------
|
201 |
+
>>> from sklearn.datasets import make_classification
|
202 |
+
>>> X, y = make_classification(random_state=42)
|
203 |
+
>>> X.shape
|
204 |
+
(100, 20)
|
205 |
+
>>> y.shape
|
206 |
+
(100,)
|
207 |
+
>>> list(y[:5])
|
208 |
+
[0, 0, 1, 1, 0]
|
209 |
+
"""
|
210 |
+
generator = check_random_state(random_state)
|
211 |
+
|
212 |
+
# Count features, clusters and samples
|
213 |
+
if n_informative + n_redundant + n_repeated > n_features:
|
214 |
+
raise ValueError(
|
215 |
+
"Number of informative, redundant and repeated "
|
216 |
+
"features must sum to less than the number of total"
|
217 |
+
" features"
|
218 |
+
)
|
219 |
+
# Use log2 to avoid overflow errors
|
220 |
+
if n_informative < np.log2(n_classes * n_clusters_per_class):
|
221 |
+
msg = "n_classes({}) * n_clusters_per_class({}) must be"
|
222 |
+
msg += " smaller or equal 2**n_informative({})={}"
|
223 |
+
raise ValueError(
|
224 |
+
msg.format(
|
225 |
+
n_classes, n_clusters_per_class, n_informative, 2**n_informative
|
226 |
+
)
|
227 |
+
)
|
228 |
+
|
229 |
+
if weights is not None:
|
230 |
+
if len(weights) not in [n_classes, n_classes - 1]:
|
231 |
+
raise ValueError(
|
232 |
+
"Weights specified but incompatible with number of classes."
|
233 |
+
)
|
234 |
+
if len(weights) == n_classes - 1:
|
235 |
+
if isinstance(weights, list):
|
236 |
+
weights = weights + [1.0 - sum(weights)]
|
237 |
+
else:
|
238 |
+
weights = np.resize(weights, n_classes)
|
239 |
+
weights[-1] = 1.0 - sum(weights[:-1])
|
240 |
+
else:
|
241 |
+
weights = [1.0 / n_classes] * n_classes
|
242 |
+
|
243 |
+
n_useless = n_features - n_informative - n_redundant - n_repeated
|
244 |
+
n_clusters = n_classes * n_clusters_per_class
|
245 |
+
|
246 |
+
# Distribute samples among clusters by weight
|
247 |
+
n_samples_per_cluster = [
|
248 |
+
int(n_samples * weights[k % n_classes] / n_clusters_per_class)
|
249 |
+
for k in range(n_clusters)
|
250 |
+
]
|
251 |
+
|
252 |
+
for i in range(n_samples - sum(n_samples_per_cluster)):
|
253 |
+
n_samples_per_cluster[i % n_clusters] += 1
|
254 |
+
|
255 |
+
# Initialize X and y
|
256 |
+
X = np.zeros((n_samples, n_features))
|
257 |
+
y = np.zeros(n_samples, dtype=int)
|
258 |
+
|
259 |
+
# Build the polytope whose vertices become cluster centroids
|
260 |
+
centroids = _generate_hypercube(n_clusters, n_informative, generator).astype(
|
261 |
+
float, copy=False
|
262 |
+
)
|
263 |
+
centroids *= 2 * class_sep
|
264 |
+
centroids -= class_sep
|
265 |
+
if not hypercube:
|
266 |
+
centroids *= generator.uniform(size=(n_clusters, 1))
|
267 |
+
centroids *= generator.uniform(size=(1, n_informative))
|
268 |
+
|
269 |
+
# Initially draw informative features from the standard normal
|
270 |
+
X[:, :n_informative] = generator.standard_normal(size=(n_samples, n_informative))
|
271 |
+
|
272 |
+
# Create each cluster; a variant of make_blobs
|
273 |
+
stop = 0
|
274 |
+
for k, centroid in enumerate(centroids):
|
275 |
+
start, stop = stop, stop + n_samples_per_cluster[k]
|
276 |
+
y[start:stop] = k % n_classes # assign labels
|
277 |
+
X_k = X[start:stop, :n_informative] # slice a view of the cluster
|
278 |
+
|
279 |
+
A = 2 * generator.uniform(size=(n_informative, n_informative)) - 1
|
280 |
+
X_k[...] = np.dot(X_k, A) # introduce random covariance
|
281 |
+
|
282 |
+
X_k += centroid # shift the cluster to a vertex
|
283 |
+
|
284 |
+
# Create redundant features
|
285 |
+
if n_redundant > 0:
|
286 |
+
B = 2 * generator.uniform(size=(n_informative, n_redundant)) - 1
|
287 |
+
X[:, n_informative : n_informative + n_redundant] = np.dot(
|
288 |
+
X[:, :n_informative], B
|
289 |
+
)
|
290 |
+
|
291 |
+
# Repeat some features
|
292 |
+
if n_repeated > 0:
|
293 |
+
n = n_informative + n_redundant
|
294 |
+
indices = ((n - 1) * generator.uniform(size=n_repeated) + 0.5).astype(np.intp)
|
295 |
+
X[:, n : n + n_repeated] = X[:, indices]
|
296 |
+
|
297 |
+
# Fill useless features
|
298 |
+
if n_useless > 0:
|
299 |
+
X[:, -n_useless:] = generator.standard_normal(size=(n_samples, n_useless))
|
300 |
+
|
301 |
+
# Randomly replace labels
|
302 |
+
if flip_y >= 0.0:
|
303 |
+
flip_mask = generator.uniform(size=n_samples) < flip_y
|
304 |
+
y[flip_mask] = generator.randint(n_classes, size=flip_mask.sum())
|
305 |
+
|
306 |
+
# Randomly shift and scale
|
307 |
+
if shift is None:
|
308 |
+
shift = (2 * generator.uniform(size=n_features) - 1) * class_sep
|
309 |
+
X += shift
|
310 |
+
|
311 |
+
if scale is None:
|
312 |
+
scale = 1 + 100 * generator.uniform(size=n_features)
|
313 |
+
X *= scale
|
314 |
+
|
315 |
+
if shuffle:
|
316 |
+
# Randomly permute samples
|
317 |
+
X, y = util_shuffle(X, y, random_state=generator)
|
318 |
+
|
319 |
+
# Randomly permute features
|
320 |
+
indices = np.arange(n_features)
|
321 |
+
generator.shuffle(indices)
|
322 |
+
X[:, :] = X[:, indices]
|
323 |
+
|
324 |
+
return X, y
|
325 |
+
|
326 |
+
|
327 |
+
@validate_params(
|
328 |
+
{
|
329 |
+
"n_samples": [Interval(Integral, 1, None, closed="left")],
|
330 |
+
"n_features": [Interval(Integral, 1, None, closed="left")],
|
331 |
+
"n_classes": [Interval(Integral, 1, None, closed="left")],
|
332 |
+
"n_labels": [Interval(Integral, 0, None, closed="left")],
|
333 |
+
"length": [Interval(Integral, 1, None, closed="left")],
|
334 |
+
"allow_unlabeled": ["boolean"],
|
335 |
+
"sparse": ["boolean"],
|
336 |
+
"return_indicator": [StrOptions({"dense", "sparse"}), "boolean"],
|
337 |
+
"return_distributions": ["boolean"],
|
338 |
+
"random_state": ["random_state"],
|
339 |
+
},
|
340 |
+
prefer_skip_nested_validation=True,
|
341 |
+
)
|
342 |
+
def make_multilabel_classification(
|
343 |
+
n_samples=100,
|
344 |
+
n_features=20,
|
345 |
+
*,
|
346 |
+
n_classes=5,
|
347 |
+
n_labels=2,
|
348 |
+
length=50,
|
349 |
+
allow_unlabeled=True,
|
350 |
+
sparse=False,
|
351 |
+
return_indicator="dense",
|
352 |
+
return_distributions=False,
|
353 |
+
random_state=None,
|
354 |
+
):
|
355 |
+
"""Generate a random multilabel classification problem.
|
356 |
+
|
357 |
+
For each sample, the generative process is:
|
358 |
+
- pick the number of labels: n ~ Poisson(n_labels)
|
359 |
+
- n times, choose a class c: c ~ Multinomial(theta)
|
360 |
+
- pick the document length: k ~ Poisson(length)
|
361 |
+
- k times, choose a word: w ~ Multinomial(theta_c)
|
362 |
+
|
363 |
+
In the above process, rejection sampling is used to make sure that
|
364 |
+
n is never zero or more than `n_classes`, and that the document length
|
365 |
+
is never zero. Likewise, we reject classes which have already been chosen.
|
366 |
+
|
367 |
+
For an example of usage, see
|
368 |
+
:ref:`sphx_glr_auto_examples_datasets_plot_random_multilabel_dataset.py`.
|
369 |
+
|
370 |
+
Read more in the :ref:`User Guide <sample_generators>`.
|
371 |
+
|
372 |
+
Parameters
|
373 |
+
----------
|
374 |
+
n_samples : int, default=100
|
375 |
+
The number of samples.
|
376 |
+
|
377 |
+
n_features : int, default=20
|
378 |
+
The total number of features.
|
379 |
+
|
380 |
+
n_classes : int, default=5
|
381 |
+
The number of classes of the classification problem.
|
382 |
+
|
383 |
+
n_labels : int, default=2
|
384 |
+
The average number of labels per instance. More precisely, the number
|
385 |
+
of labels per sample is drawn from a Poisson distribution with
|
386 |
+
``n_labels`` as its expected value, but samples are bounded (using
|
387 |
+
rejection sampling) by ``n_classes``, and must be nonzero if
|
388 |
+
``allow_unlabeled`` is False.
|
389 |
+
|
390 |
+
length : int, default=50
|
391 |
+
The sum of the features (number of words if documents) is drawn from
|
392 |
+
a Poisson distribution with this expected value.
|
393 |
+
|
394 |
+
allow_unlabeled : bool, default=True
|
395 |
+
If ``True``, some instances might not belong to any class.
|
396 |
+
|
397 |
+
sparse : bool, default=False
|
398 |
+
If ``True``, return a sparse feature matrix.
|
399 |
+
|
400 |
+
.. versionadded:: 0.17
|
401 |
+
parameter to allow *sparse* output.
|
402 |
+
|
403 |
+
return_indicator : {'dense', 'sparse'} or False, default='dense'
|
404 |
+
If ``'dense'`` return ``Y`` in the dense binary indicator format. If
|
405 |
+
``'sparse'`` return ``Y`` in the sparse binary indicator format.
|
406 |
+
``False`` returns a list of lists of labels.
|
407 |
+
|
408 |
+
return_distributions : bool, default=False
|
409 |
+
If ``True``, return the prior class probability and conditional
|
410 |
+
probabilities of features given classes, from which the data was
|
411 |
+
drawn.
|
412 |
+
|
413 |
+
random_state : int, RandomState instance or None, default=None
|
414 |
+
Determines random number generation for dataset creation. Pass an int
|
415 |
+
for reproducible output across multiple function calls.
|
416 |
+
See :term:`Glossary <random_state>`.
|
417 |
+
|
418 |
+
Returns
|
419 |
+
-------
|
420 |
+
X : ndarray of shape (n_samples, n_features)
|
421 |
+
The generated samples.
|
422 |
+
|
423 |
+
Y : {ndarray, sparse matrix} of shape (n_samples, n_classes)
|
424 |
+
The label sets. Sparse matrix should be of CSR format.
|
425 |
+
|
426 |
+
p_c : ndarray of shape (n_classes,)
|
427 |
+
The probability of each class being drawn. Only returned if
|
428 |
+
``return_distributions=True``.
|
429 |
+
|
430 |
+
p_w_c : ndarray of shape (n_features, n_classes)
|
431 |
+
The probability of each feature being drawn given each class.
|
432 |
+
Only returned if ``return_distributions=True``.
|
433 |
+
|
434 |
+
Examples
|
435 |
+
--------
|
436 |
+
>>> from sklearn.datasets import make_multilabel_classification
|
437 |
+
>>> X, y = make_multilabel_classification(n_labels=3, random_state=42)
|
438 |
+
>>> X.shape
|
439 |
+
(100, 20)
|
440 |
+
>>> y.shape
|
441 |
+
(100, 5)
|
442 |
+
>>> list(y[:3])
|
443 |
+
[array([1, 1, 0, 1, 0]), array([0, 1, 1, 1, 0]), array([0, 1, 0, 0, 0])]
|
444 |
+
"""
|
445 |
+
|
446 |
+
generator = check_random_state(random_state)
|
447 |
+
p_c = generator.uniform(size=n_classes)
|
448 |
+
p_c /= p_c.sum()
|
449 |
+
cumulative_p_c = np.cumsum(p_c)
|
450 |
+
p_w_c = generator.uniform(size=(n_features, n_classes))
|
451 |
+
p_w_c /= np.sum(p_w_c, axis=0)
|
452 |
+
|
453 |
+
def sample_example():
|
454 |
+
_, n_classes = p_w_c.shape
|
455 |
+
|
456 |
+
# pick a nonzero number of labels per document by rejection sampling
|
457 |
+
y_size = n_classes + 1
|
458 |
+
while (not allow_unlabeled and y_size == 0) or y_size > n_classes:
|
459 |
+
y_size = generator.poisson(n_labels)
|
460 |
+
|
461 |
+
# pick n classes
|
462 |
+
y = set()
|
463 |
+
while len(y) != y_size:
|
464 |
+
# pick a class with probability P(c)
|
465 |
+
c = np.searchsorted(cumulative_p_c, generator.uniform(size=y_size - len(y)))
|
466 |
+
y.update(c)
|
467 |
+
y = list(y)
|
468 |
+
|
469 |
+
# pick a non-zero document length by rejection sampling
|
470 |
+
n_words = 0
|
471 |
+
while n_words == 0:
|
472 |
+
n_words = generator.poisson(length)
|
473 |
+
|
474 |
+
# generate a document of length n_words
|
475 |
+
if len(y) == 0:
|
476 |
+
# if sample does not belong to any class, generate noise word
|
477 |
+
words = generator.randint(n_features, size=n_words)
|
478 |
+
return words, y
|
479 |
+
|
480 |
+
# sample words with replacement from selected classes
|
481 |
+
cumulative_p_w_sample = p_w_c.take(y, axis=1).sum(axis=1).cumsum()
|
482 |
+
cumulative_p_w_sample /= cumulative_p_w_sample[-1]
|
483 |
+
words = np.searchsorted(cumulative_p_w_sample, generator.uniform(size=n_words))
|
484 |
+
return words, y
|
485 |
+
|
486 |
+
X_indices = array.array("i")
|
487 |
+
X_indptr = array.array("i", [0])
|
488 |
+
Y = []
|
489 |
+
for i in range(n_samples):
|
490 |
+
words, y = sample_example()
|
491 |
+
X_indices.extend(words)
|
492 |
+
X_indptr.append(len(X_indices))
|
493 |
+
Y.append(y)
|
494 |
+
X_data = np.ones(len(X_indices), dtype=np.float64)
|
495 |
+
X = sp.csr_matrix((X_data, X_indices, X_indptr), shape=(n_samples, n_features))
|
496 |
+
X.sum_duplicates()
|
497 |
+
if not sparse:
|
498 |
+
X = X.toarray()
|
499 |
+
|
500 |
+
# return_indicator can be True due to backward compatibility
|
501 |
+
if return_indicator in (True, "sparse", "dense"):
|
502 |
+
lb = MultiLabelBinarizer(sparse_output=(return_indicator == "sparse"))
|
503 |
+
Y = lb.fit([range(n_classes)]).transform(Y)
|
504 |
+
if return_distributions:
|
505 |
+
return X, Y, p_c, p_w_c
|
506 |
+
return X, Y
|
507 |
+
|
508 |
+
|
509 |
+
@validate_params(
|
510 |
+
{
|
511 |
+
"n_samples": [Interval(Integral, 1, None, closed="left")],
|
512 |
+
"random_state": ["random_state"],
|
513 |
+
},
|
514 |
+
prefer_skip_nested_validation=True,
|
515 |
+
)
|
516 |
+
def make_hastie_10_2(n_samples=12000, *, random_state=None):
|
517 |
+
"""Generate data for binary classification used in Hastie et al. 2009, Example 10.2.
|
518 |
+
|
519 |
+
The ten features are standard independent Gaussian and
|
520 |
+
the target ``y`` is defined by::
|
521 |
+
|
522 |
+
y[i] = 1 if np.sum(X[i] ** 2) > 9.34 else -1
|
523 |
+
|
524 |
+
Read more in the :ref:`User Guide <sample_generators>`.
|
525 |
+
|
526 |
+
Parameters
|
527 |
+
----------
|
528 |
+
n_samples : int, default=12000
|
529 |
+
The number of samples.
|
530 |
+
|
531 |
+
random_state : int, RandomState instance or None, default=None
|
532 |
+
Determines random number generation for dataset creation. Pass an int
|
533 |
+
for reproducible output across multiple function calls.
|
534 |
+
See :term:`Glossary <random_state>`.
|
535 |
+
|
536 |
+
Returns
|
537 |
+
-------
|
538 |
+
X : ndarray of shape (n_samples, 10)
|
539 |
+
The input samples.
|
540 |
+
|
541 |
+
y : ndarray of shape (n_samples,)
|
542 |
+
The output values.
|
543 |
+
|
544 |
+
See Also
|
545 |
+
--------
|
546 |
+
make_gaussian_quantiles : A generalization of this dataset approach.
|
547 |
+
|
548 |
+
References
|
549 |
+
----------
|
550 |
+
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
|
551 |
+
Learning Ed. 2", Springer, 2009.
|
552 |
+
"""
|
553 |
+
rs = check_random_state(random_state)
|
554 |
+
|
555 |
+
shape = (n_samples, 10)
|
556 |
+
X = rs.normal(size=shape).reshape(shape)
|
557 |
+
y = ((X**2.0).sum(axis=1) > 9.34).astype(np.float64, copy=False)
|
558 |
+
y[y == 0.0] = -1.0
|
559 |
+
|
560 |
+
return X, y
|
561 |
+
|
562 |
+
|
563 |
+
@validate_params(
|
564 |
+
{
|
565 |
+
"n_samples": [Interval(Integral, 1, None, closed="left")],
|
566 |
+
"n_features": [Interval(Integral, 1, None, closed="left")],
|
567 |
+
"n_informative": [Interval(Integral, 0, None, closed="left")],
|
568 |
+
"n_targets": [Interval(Integral, 1, None, closed="left")],
|
569 |
+
"bias": [Interval(Real, None, None, closed="neither")],
|
570 |
+
"effective_rank": [Interval(Integral, 1, None, closed="left"), None],
|
571 |
+
"tail_strength": [Interval(Real, 0, 1, closed="both")],
|
572 |
+
"noise": [Interval(Real, 0, None, closed="left")],
|
573 |
+
"shuffle": ["boolean"],
|
574 |
+
"coef": ["boolean"],
|
575 |
+
"random_state": ["random_state"],
|
576 |
+
},
|
577 |
+
prefer_skip_nested_validation=True,
|
578 |
+
)
|
579 |
+
def make_regression(
|
580 |
+
n_samples=100,
|
581 |
+
n_features=100,
|
582 |
+
*,
|
583 |
+
n_informative=10,
|
584 |
+
n_targets=1,
|
585 |
+
bias=0.0,
|
586 |
+
effective_rank=None,
|
587 |
+
tail_strength=0.5,
|
588 |
+
noise=0.0,
|
589 |
+
shuffle=True,
|
590 |
+
coef=False,
|
591 |
+
random_state=None,
|
592 |
+
):
|
593 |
+
"""Generate a random regression problem.
|
594 |
+
|
595 |
+
The input set can either be well conditioned (by default) or have a low
|
596 |
+
rank-fat tail singular profile. See :func:`make_low_rank_matrix` for
|
597 |
+
more details.
|
598 |
+
|
599 |
+
The output is generated by applying a (potentially biased) random linear
|
600 |
+
regression model with `n_informative` nonzero regressors to the previously
|
601 |
+
generated input and some gaussian centered noise with some adjustable
|
602 |
+
scale.
|
603 |
+
|
604 |
+
Read more in the :ref:`User Guide <sample_generators>`.
|
605 |
+
|
606 |
+
Parameters
|
607 |
+
----------
|
608 |
+
n_samples : int, default=100
|
609 |
+
The number of samples.
|
610 |
+
|
611 |
+
n_features : int, default=100
|
612 |
+
The number of features.
|
613 |
+
|
614 |
+
n_informative : int, default=10
|
615 |
+
The number of informative features, i.e., the number of features used
|
616 |
+
to build the linear model used to generate the output.
|
617 |
+
|
618 |
+
n_targets : int, default=1
|
619 |
+
The number of regression targets, i.e., the dimension of the y output
|
620 |
+
vector associated with a sample. By default, the output is a scalar.
|
621 |
+
|
622 |
+
bias : float, default=0.0
|
623 |
+
The bias term in the underlying linear model.
|
624 |
+
|
625 |
+
effective_rank : int, default=None
|
626 |
+
If not None:
|
627 |
+
The approximate number of singular vectors required to explain most
|
628 |
+
of the input data by linear combinations. Using this kind of
|
629 |
+
singular spectrum in the input allows the generator to reproduce
|
630 |
+
the correlations often observed in practice.
|
631 |
+
If None:
|
632 |
+
The input set is well conditioned, centered and gaussian with
|
633 |
+
unit variance.
|
634 |
+
|
635 |
+
tail_strength : float, default=0.5
|
636 |
+
The relative importance of the fat noisy tail of the singular values
|
637 |
+
profile if `effective_rank` is not None. When a float, it should be
|
638 |
+
between 0 and 1.
|
639 |
+
|
640 |
+
noise : float, default=0.0
|
641 |
+
The standard deviation of the gaussian noise applied to the output.
|
642 |
+
|
643 |
+
shuffle : bool, default=True
|
644 |
+
Shuffle the samples and the features.
|
645 |
+
|
646 |
+
coef : bool, default=False
|
647 |
+
If True, the coefficients of the underlying linear model are returned.
|
648 |
+
|
649 |
+
random_state : int, RandomState instance or None, default=None
|
650 |
+
Determines random number generation for dataset creation. Pass an int
|
651 |
+
for reproducible output across multiple function calls.
|
652 |
+
See :term:`Glossary <random_state>`.
|
653 |
+
|
654 |
+
Returns
|
655 |
+
-------
|
656 |
+
X : ndarray of shape (n_samples, n_features)
|
657 |
+
The input samples.
|
658 |
+
|
659 |
+
y : ndarray of shape (n_samples,) or (n_samples, n_targets)
|
660 |
+
The output values.
|
661 |
+
|
662 |
+
coef : ndarray of shape (n_features,) or (n_features, n_targets)
|
663 |
+
The coefficient of the underlying linear model. It is returned only if
|
664 |
+
coef is True.
|
665 |
+
|
666 |
+
Examples
|
667 |
+
--------
|
668 |
+
>>> from sklearn.datasets import make_regression
|
669 |
+
>>> X, y = make_regression(n_samples=5, n_features=2, noise=1, random_state=42)
|
670 |
+
>>> X
|
671 |
+
array([[ 0.4967..., -0.1382... ],
|
672 |
+
[ 0.6476..., 1.523...],
|
673 |
+
[-0.2341..., -0.2341...],
|
674 |
+
[-0.4694..., 0.5425...],
|
675 |
+
[ 1.579..., 0.7674...]])
|
676 |
+
>>> y
|
677 |
+
array([ 6.737..., 37.79..., -10.27..., 0.4017..., 42.22...])
|
678 |
+
"""
|
679 |
+
n_informative = min(n_features, n_informative)
|
680 |
+
generator = check_random_state(random_state)
|
681 |
+
|
682 |
+
if effective_rank is None:
|
683 |
+
# Randomly generate a well conditioned input set
|
684 |
+
X = generator.standard_normal(size=(n_samples, n_features))
|
685 |
+
|
686 |
+
else:
|
687 |
+
# Randomly generate a low rank, fat tail input set
|
688 |
+
X = make_low_rank_matrix(
|
689 |
+
n_samples=n_samples,
|
690 |
+
n_features=n_features,
|
691 |
+
effective_rank=effective_rank,
|
692 |
+
tail_strength=tail_strength,
|
693 |
+
random_state=generator,
|
694 |
+
)
|
695 |
+
|
696 |
+
# Generate a ground truth model with only n_informative features being non
|
697 |
+
# zeros (the other features are not correlated to y and should be ignored
|
698 |
+
# by a sparsifying regularizers such as L1 or elastic net)
|
699 |
+
ground_truth = np.zeros((n_features, n_targets))
|
700 |
+
ground_truth[:n_informative, :] = 100 * generator.uniform(
|
701 |
+
size=(n_informative, n_targets)
|
702 |
+
)
|
703 |
+
|
704 |
+
y = np.dot(X, ground_truth) + bias
|
705 |
+
|
706 |
+
# Add noise
|
707 |
+
if noise > 0.0:
|
708 |
+
y += generator.normal(scale=noise, size=y.shape)
|
709 |
+
|
710 |
+
# Randomly permute samples and features
|
711 |
+
if shuffle:
|
712 |
+
X, y = util_shuffle(X, y, random_state=generator)
|
713 |
+
|
714 |
+
indices = np.arange(n_features)
|
715 |
+
generator.shuffle(indices)
|
716 |
+
X[:, :] = X[:, indices]
|
717 |
+
ground_truth = ground_truth[indices]
|
718 |
+
|
719 |
+
y = np.squeeze(y)
|
720 |
+
|
721 |
+
if coef:
|
722 |
+
return X, y, np.squeeze(ground_truth)
|
723 |
+
|
724 |
+
else:
|
725 |
+
return X, y
|
726 |
+
|
727 |
+
|
728 |
+
@validate_params(
|
729 |
+
{
|
730 |
+
"n_samples": [Interval(Integral, 0, None, closed="left"), tuple],
|
731 |
+
"shuffle": ["boolean"],
|
732 |
+
"noise": [Interval(Real, 0, None, closed="left"), None],
|
733 |
+
"random_state": ["random_state"],
|
734 |
+
"factor": [Interval(Real, 0, 1, closed="left")],
|
735 |
+
},
|
736 |
+
prefer_skip_nested_validation=True,
|
737 |
+
)
|
738 |
+
def make_circles(
|
739 |
+
n_samples=100, *, shuffle=True, noise=None, random_state=None, factor=0.8
|
740 |
+
):
|
741 |
+
"""Make a large circle containing a smaller circle in 2d.
|
742 |
+
|
743 |
+
A simple toy dataset to visualize clustering and classification
|
744 |
+
algorithms.
|
745 |
+
|
746 |
+
Read more in the :ref:`User Guide <sample_generators>`.
|
747 |
+
|
748 |
+
Parameters
|
749 |
+
----------
|
750 |
+
n_samples : int or tuple of shape (2,), dtype=int, default=100
|
751 |
+
If int, it is the total number of points generated.
|
752 |
+
For odd numbers, the inner circle will have one point more than the
|
753 |
+
outer circle.
|
754 |
+
If two-element tuple, number of points in outer circle and inner
|
755 |
+
circle.
|
756 |
+
|
757 |
+
.. versionchanged:: 0.23
|
758 |
+
Added two-element tuple.
|
759 |
+
|
760 |
+
shuffle : bool, default=True
|
761 |
+
Whether to shuffle the samples.
|
762 |
+
|
763 |
+
noise : float, default=None
|
764 |
+
Standard deviation of Gaussian noise added to the data.
|
765 |
+
|
766 |
+
random_state : int, RandomState instance or None, default=None
|
767 |
+
Determines random number generation for dataset shuffling and noise.
|
768 |
+
Pass an int for reproducible output across multiple function calls.
|
769 |
+
See :term:`Glossary <random_state>`.
|
770 |
+
|
771 |
+
factor : float, default=.8
|
772 |
+
Scale factor between inner and outer circle in the range `[0, 1)`.
|
773 |
+
|
774 |
+
Returns
|
775 |
+
-------
|
776 |
+
X : ndarray of shape (n_samples, 2)
|
777 |
+
The generated samples.
|
778 |
+
|
779 |
+
y : ndarray of shape (n_samples,)
|
780 |
+
The integer labels (0 or 1) for class membership of each sample.
|
781 |
+
|
782 |
+
Examples
|
783 |
+
--------
|
784 |
+
>>> from sklearn.datasets import make_circles
|
785 |
+
>>> X, y = make_circles(random_state=42)
|
786 |
+
>>> X.shape
|
787 |
+
(100, 2)
|
788 |
+
>>> y.shape
|
789 |
+
(100,)
|
790 |
+
>>> list(y[:5])
|
791 |
+
[1, 1, 1, 0, 0]
|
792 |
+
"""
|
793 |
+
if isinstance(n_samples, numbers.Integral):
|
794 |
+
n_samples_out = n_samples // 2
|
795 |
+
n_samples_in = n_samples - n_samples_out
|
796 |
+
else: # n_samples is a tuple
|
797 |
+
if len(n_samples) != 2:
|
798 |
+
raise ValueError("When a tuple, n_samples must have exactly two elements.")
|
799 |
+
n_samples_out, n_samples_in = n_samples
|
800 |
+
|
801 |
+
generator = check_random_state(random_state)
|
802 |
+
# so as not to have the first point = last point, we set endpoint=False
|
803 |
+
linspace_out = np.linspace(0, 2 * np.pi, n_samples_out, endpoint=False)
|
804 |
+
linspace_in = np.linspace(0, 2 * np.pi, n_samples_in, endpoint=False)
|
805 |
+
outer_circ_x = np.cos(linspace_out)
|
806 |
+
outer_circ_y = np.sin(linspace_out)
|
807 |
+
inner_circ_x = np.cos(linspace_in) * factor
|
808 |
+
inner_circ_y = np.sin(linspace_in) * factor
|
809 |
+
|
810 |
+
X = np.vstack(
|
811 |
+
[np.append(outer_circ_x, inner_circ_x), np.append(outer_circ_y, inner_circ_y)]
|
812 |
+
).T
|
813 |
+
y = np.hstack(
|
814 |
+
[np.zeros(n_samples_out, dtype=np.intp), np.ones(n_samples_in, dtype=np.intp)]
|
815 |
+
)
|
816 |
+
if shuffle:
|
817 |
+
X, y = util_shuffle(X, y, random_state=generator)
|
818 |
+
|
819 |
+
if noise is not None:
|
820 |
+
X += generator.normal(scale=noise, size=X.shape)
|
821 |
+
|
822 |
+
return X, y
|
823 |
+
|
824 |
+
|
825 |
+
@validate_params(
|
826 |
+
{
|
827 |
+
"n_samples": [Interval(Integral, 1, None, closed="left"), tuple],
|
828 |
+
"shuffle": ["boolean"],
|
829 |
+
"noise": [Interval(Real, 0, None, closed="left"), None],
|
830 |
+
"random_state": ["random_state"],
|
831 |
+
},
|
832 |
+
prefer_skip_nested_validation=True,
|
833 |
+
)
|
834 |
+
def make_moons(n_samples=100, *, shuffle=True, noise=None, random_state=None):
|
835 |
+
"""Make two interleaving half circles.
|
836 |
+
|
837 |
+
A simple toy dataset to visualize clustering and classification
|
838 |
+
algorithms. Read more in the :ref:`User Guide <sample_generators>`.
|
839 |
+
|
840 |
+
Parameters
|
841 |
+
----------
|
842 |
+
n_samples : int or tuple of shape (2,), dtype=int, default=100
|
843 |
+
If int, the total number of points generated.
|
844 |
+
If two-element tuple, number of points in each of two moons.
|
845 |
+
|
846 |
+
.. versionchanged:: 0.23
|
847 |
+
Added two-element tuple.
|
848 |
+
|
849 |
+
shuffle : bool, default=True
|
850 |
+
Whether to shuffle the samples.
|
851 |
+
|
852 |
+
noise : float, default=None
|
853 |
+
Standard deviation of Gaussian noise added to the data.
|
854 |
+
|
855 |
+
random_state : int, RandomState instance or None, default=None
|
856 |
+
Determines random number generation for dataset shuffling and noise.
|
857 |
+
Pass an int for reproducible output across multiple function calls.
|
858 |
+
See :term:`Glossary <random_state>`.
|
859 |
+
|
860 |
+
Returns
|
861 |
+
-------
|
862 |
+
X : ndarray of shape (n_samples, 2)
|
863 |
+
The generated samples.
|
864 |
+
|
865 |
+
y : ndarray of shape (n_samples,)
|
866 |
+
The integer labels (0 or 1) for class membership of each sample.
|
867 |
+
"""
|
868 |
+
|
869 |
+
if isinstance(n_samples, numbers.Integral):
|
870 |
+
n_samples_out = n_samples // 2
|
871 |
+
n_samples_in = n_samples - n_samples_out
|
872 |
+
else:
|
873 |
+
try:
|
874 |
+
n_samples_out, n_samples_in = n_samples
|
875 |
+
except ValueError as e:
|
876 |
+
raise ValueError(
|
877 |
+
"`n_samples` can be either an int or a two-element tuple."
|
878 |
+
) from e
|
879 |
+
|
880 |
+
generator = check_random_state(random_state)
|
881 |
+
|
882 |
+
outer_circ_x = np.cos(np.linspace(0, np.pi, n_samples_out))
|
883 |
+
outer_circ_y = np.sin(np.linspace(0, np.pi, n_samples_out))
|
884 |
+
inner_circ_x = 1 - np.cos(np.linspace(0, np.pi, n_samples_in))
|
885 |
+
inner_circ_y = 1 - np.sin(np.linspace(0, np.pi, n_samples_in)) - 0.5
|
886 |
+
|
887 |
+
X = np.vstack(
|
888 |
+
[np.append(outer_circ_x, inner_circ_x), np.append(outer_circ_y, inner_circ_y)]
|
889 |
+
).T
|
890 |
+
y = np.hstack(
|
891 |
+
[np.zeros(n_samples_out, dtype=np.intp), np.ones(n_samples_in, dtype=np.intp)]
|
892 |
+
)
|
893 |
+
|
894 |
+
if shuffle:
|
895 |
+
X, y = util_shuffle(X, y, random_state=generator)
|
896 |
+
|
897 |
+
if noise is not None:
|
898 |
+
X += generator.normal(scale=noise, size=X.shape)
|
899 |
+
|
900 |
+
return X, y
|
901 |
+
|
902 |
+
|
903 |
+
@validate_params(
|
904 |
+
{
|
905 |
+
"n_samples": [Interval(Integral, 1, None, closed="left"), "array-like"],
|
906 |
+
"n_features": [Interval(Integral, 1, None, closed="left")],
|
907 |
+
"centers": [Interval(Integral, 1, None, closed="left"), "array-like", None],
|
908 |
+
"cluster_std": [Interval(Real, 0, None, closed="left"), "array-like"],
|
909 |
+
"center_box": [tuple],
|
910 |
+
"shuffle": ["boolean"],
|
911 |
+
"random_state": ["random_state"],
|
912 |
+
"return_centers": ["boolean"],
|
913 |
+
},
|
914 |
+
prefer_skip_nested_validation=True,
|
915 |
+
)
|
916 |
+
def make_blobs(
|
917 |
+
n_samples=100,
|
918 |
+
n_features=2,
|
919 |
+
*,
|
920 |
+
centers=None,
|
921 |
+
cluster_std=1.0,
|
922 |
+
center_box=(-10.0, 10.0),
|
923 |
+
shuffle=True,
|
924 |
+
random_state=None,
|
925 |
+
return_centers=False,
|
926 |
+
):
|
927 |
+
"""Generate isotropic Gaussian blobs for clustering.
|
928 |
+
|
929 |
+
For an example of usage, see
|
930 |
+
:ref:`sphx_glr_auto_examples_datasets_plot_random_dataset.py`.
|
931 |
+
|
932 |
+
Read more in the :ref:`User Guide <sample_generators>`.
|
933 |
+
|
934 |
+
Parameters
|
935 |
+
----------
|
936 |
+
n_samples : int or array-like, default=100
|
937 |
+
If int, it is the total number of points equally divided among
|
938 |
+
clusters.
|
939 |
+
If array-like, each element of the sequence indicates
|
940 |
+
the number of samples per cluster.
|
941 |
+
|
942 |
+
.. versionchanged:: v0.20
|
943 |
+
one can now pass an array-like to the ``n_samples`` parameter
|
944 |
+
|
945 |
+
n_features : int, default=2
|
946 |
+
The number of features for each sample.
|
947 |
+
|
948 |
+
centers : int or array-like of shape (n_centers, n_features), default=None
|
949 |
+
The number of centers to generate, or the fixed center locations.
|
950 |
+
If n_samples is an int and centers is None, 3 centers are generated.
|
951 |
+
If n_samples is array-like, centers must be
|
952 |
+
either None or an array of length equal to the length of n_samples.
|
953 |
+
|
954 |
+
cluster_std : float or array-like of float, default=1.0
|
955 |
+
The standard deviation of the clusters.
|
956 |
+
|
957 |
+
center_box : tuple of float (min, max), default=(-10.0, 10.0)
|
958 |
+
The bounding box for each cluster center when centers are
|
959 |
+
generated at random.
|
960 |
+
|
961 |
+
shuffle : bool, default=True
|
962 |
+
Shuffle the samples.
|
963 |
+
|
964 |
+
random_state : int, RandomState instance or None, default=None
|
965 |
+
Determines random number generation for dataset creation. Pass an int
|
966 |
+
for reproducible output across multiple function calls.
|
967 |
+
See :term:`Glossary <random_state>`.
|
968 |
+
|
969 |
+
return_centers : bool, default=False
|
970 |
+
If True, then return the centers of each cluster.
|
971 |
+
|
972 |
+
.. versionadded:: 0.23
|
973 |
+
|
974 |
+
Returns
|
975 |
+
-------
|
976 |
+
X : ndarray of shape (n_samples, n_features)
|
977 |
+
The generated samples.
|
978 |
+
|
979 |
+
y : ndarray of shape (n_samples,)
|
980 |
+
The integer labels for cluster membership of each sample.
|
981 |
+
|
982 |
+
centers : ndarray of shape (n_centers, n_features)
|
983 |
+
The centers of each cluster. Only returned if
|
984 |
+
``return_centers=True``.
|
985 |
+
|
986 |
+
See Also
|
987 |
+
--------
|
988 |
+
make_classification : A more intricate variant.
|
989 |
+
|
990 |
+
Examples
|
991 |
+
--------
|
992 |
+
>>> from sklearn.datasets import make_blobs
|
993 |
+
>>> X, y = make_blobs(n_samples=10, centers=3, n_features=2,
|
994 |
+
... random_state=0)
|
995 |
+
>>> print(X.shape)
|
996 |
+
(10, 2)
|
997 |
+
>>> y
|
998 |
+
array([0, 0, 1, 0, 2, 2, 2, 1, 1, 0])
|
999 |
+
>>> X, y = make_blobs(n_samples=[3, 3, 4], centers=None, n_features=2,
|
1000 |
+
... random_state=0)
|
1001 |
+
>>> print(X.shape)
|
1002 |
+
(10, 2)
|
1003 |
+
>>> y
|
1004 |
+
array([0, 1, 2, 0, 2, 2, 2, 1, 1, 0])
|
1005 |
+
"""
|
1006 |
+
generator = check_random_state(random_state)
|
1007 |
+
|
1008 |
+
if isinstance(n_samples, numbers.Integral):
|
1009 |
+
# Set n_centers by looking at centers arg
|
1010 |
+
if centers is None:
|
1011 |
+
centers = 3
|
1012 |
+
|
1013 |
+
if isinstance(centers, numbers.Integral):
|
1014 |
+
n_centers = centers
|
1015 |
+
centers = generator.uniform(
|
1016 |
+
center_box[0], center_box[1], size=(n_centers, n_features)
|
1017 |
+
)
|
1018 |
+
|
1019 |
+
else:
|
1020 |
+
centers = check_array(centers)
|
1021 |
+
n_features = centers.shape[1]
|
1022 |
+
n_centers = centers.shape[0]
|
1023 |
+
|
1024 |
+
else:
|
1025 |
+
# Set n_centers by looking at [n_samples] arg
|
1026 |
+
n_centers = len(n_samples)
|
1027 |
+
if centers is None:
|
1028 |
+
centers = generator.uniform(
|
1029 |
+
center_box[0], center_box[1], size=(n_centers, n_features)
|
1030 |
+
)
|
1031 |
+
if not isinstance(centers, Iterable):
|
1032 |
+
raise ValueError(
|
1033 |
+
"Parameter `centers` must be array-like. Got {!r} instead".format(
|
1034 |
+
centers
|
1035 |
+
)
|
1036 |
+
)
|
1037 |
+
if len(centers) != n_centers:
|
1038 |
+
raise ValueError(
|
1039 |
+
"Length of `n_samples` not consistent with number of "
|
1040 |
+
f"centers. Got n_samples = {n_samples} and centers = {centers}"
|
1041 |
+
)
|
1042 |
+
centers = check_array(centers)
|
1043 |
+
n_features = centers.shape[1]
|
1044 |
+
|
1045 |
+
# stds: if cluster_std is given as list, it must be consistent
|
1046 |
+
# with the n_centers
|
1047 |
+
if hasattr(cluster_std, "__len__") and len(cluster_std) != n_centers:
|
1048 |
+
raise ValueError(
|
1049 |
+
"Length of `clusters_std` not consistent with "
|
1050 |
+
"number of centers. Got centers = {} "
|
1051 |
+
"and cluster_std = {}".format(centers, cluster_std)
|
1052 |
+
)
|
1053 |
+
|
1054 |
+
if isinstance(cluster_std, numbers.Real):
|
1055 |
+
cluster_std = np.full(len(centers), cluster_std)
|
1056 |
+
|
1057 |
+
if isinstance(n_samples, Iterable):
|
1058 |
+
n_samples_per_center = n_samples
|
1059 |
+
else:
|
1060 |
+
n_samples_per_center = [int(n_samples // n_centers)] * n_centers
|
1061 |
+
|
1062 |
+
for i in range(n_samples % n_centers):
|
1063 |
+
n_samples_per_center[i] += 1
|
1064 |
+
|
1065 |
+
cum_sum_n_samples = np.cumsum(n_samples_per_center)
|
1066 |
+
X = np.empty(shape=(sum(n_samples_per_center), n_features), dtype=np.float64)
|
1067 |
+
y = np.empty(shape=(sum(n_samples_per_center),), dtype=int)
|
1068 |
+
|
1069 |
+
for i, (n, std) in enumerate(zip(n_samples_per_center, cluster_std)):
|
1070 |
+
start_idx = cum_sum_n_samples[i - 1] if i > 0 else 0
|
1071 |
+
end_idx = cum_sum_n_samples[i]
|
1072 |
+
X[start_idx:end_idx] = generator.normal(
|
1073 |
+
loc=centers[i], scale=std, size=(n, n_features)
|
1074 |
+
)
|
1075 |
+
y[start_idx:end_idx] = i
|
1076 |
+
|
1077 |
+
if shuffle:
|
1078 |
+
X, y = util_shuffle(X, y, random_state=generator)
|
1079 |
+
|
1080 |
+
if return_centers:
|
1081 |
+
return X, y, centers
|
1082 |
+
else:
|
1083 |
+
return X, y
|
1084 |
+
|
1085 |
+
|
1086 |
+
@validate_params(
|
1087 |
+
{
|
1088 |
+
"n_samples": [Interval(Integral, 1, None, closed="left")],
|
1089 |
+
"n_features": [Interval(Integral, 5, None, closed="left")],
|
1090 |
+
"noise": [Interval(Real, 0.0, None, closed="left")],
|
1091 |
+
"random_state": ["random_state"],
|
1092 |
+
},
|
1093 |
+
prefer_skip_nested_validation=True,
|
1094 |
+
)
|
1095 |
+
def make_friedman1(n_samples=100, n_features=10, *, noise=0.0, random_state=None):
|
1096 |
+
"""Generate the "Friedman #1" regression problem.
|
1097 |
+
|
1098 |
+
This dataset is described in Friedman [1] and Breiman [2].
|
1099 |
+
|
1100 |
+
Inputs `X` are independent features uniformly distributed on the interval
|
1101 |
+
[0, 1]. The output `y` is created according to the formula::
|
1102 |
+
|
1103 |
+
y(X) = 10 * sin(pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
|
1104 |
+
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * N(0, 1).
|
1105 |
+
|
1106 |
+
Out of the `n_features` features, only 5 are actually used to compute
|
1107 |
+
`y`. The remaining features are independent of `y`.
|
1108 |
+
|
1109 |
+
The number of features has to be >= 5.
|
1110 |
+
|
1111 |
+
Read more in the :ref:`User Guide <sample_generators>`.
|
1112 |
+
|
1113 |
+
Parameters
|
1114 |
+
----------
|
1115 |
+
n_samples : int, default=100
|
1116 |
+
The number of samples.
|
1117 |
+
|
1118 |
+
n_features : int, default=10
|
1119 |
+
The number of features. Should be at least 5.
|
1120 |
+
|
1121 |
+
noise : float, default=0.0
|
1122 |
+
The standard deviation of the gaussian noise applied to the output.
|
1123 |
+
|
1124 |
+
random_state : int, RandomState instance or None, default=None
|
1125 |
+
Determines random number generation for dataset noise. Pass an int
|
1126 |
+
for reproducible output across multiple function calls.
|
1127 |
+
See :term:`Glossary <random_state>`.
|
1128 |
+
|
1129 |
+
Returns
|
1130 |
+
-------
|
1131 |
+
X : ndarray of shape (n_samples, n_features)
|
1132 |
+
The input samples.
|
1133 |
+
|
1134 |
+
y : ndarray of shape (n_samples,)
|
1135 |
+
The output values.
|
1136 |
+
|
1137 |
+
References
|
1138 |
+
----------
|
1139 |
+
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
|
1140 |
+
of Statistics 19 (1), pages 1-67, 1991.
|
1141 |
+
|
1142 |
+
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
|
1143 |
+
pages 123-140, 1996.
|
1144 |
+
|
1145 |
+
Examples
|
1146 |
+
--------
|
1147 |
+
>>> from sklearn.datasets import make_friedman1
|
1148 |
+
>>> X, y = make_friedman1(random_state=42)
|
1149 |
+
>>> X.shape
|
1150 |
+
(100, 10)
|
1151 |
+
>>> y.shape
|
1152 |
+
(100,)
|
1153 |
+
>>> list(y[:3])
|
1154 |
+
[16.8..., 5.8..., 9.4...]
|
1155 |
+
"""
|
1156 |
+
generator = check_random_state(random_state)
|
1157 |
+
|
1158 |
+
X = generator.uniform(size=(n_samples, n_features))
|
1159 |
+
y = (
|
1160 |
+
10 * np.sin(np.pi * X[:, 0] * X[:, 1])
|
1161 |
+
+ 20 * (X[:, 2] - 0.5) ** 2
|
1162 |
+
+ 10 * X[:, 3]
|
1163 |
+
+ 5 * X[:, 4]
|
1164 |
+
+ noise * generator.standard_normal(size=(n_samples))
|
1165 |
+
)
|
1166 |
+
|
1167 |
+
return X, y
|
1168 |
+
|
1169 |
+
|
1170 |
+
@validate_params(
|
1171 |
+
{
|
1172 |
+
"n_samples": [Interval(Integral, 1, None, closed="left")],
|
1173 |
+
"noise": [Interval(Real, 0, None, closed="left")],
|
1174 |
+
"random_state": ["random_state"],
|
1175 |
+
},
|
1176 |
+
prefer_skip_nested_validation=True,
|
1177 |
+
)
|
1178 |
+
def make_friedman2(n_samples=100, *, noise=0.0, random_state=None):
|
1179 |
+
"""Generate the "Friedman #2" regression problem.
|
1180 |
+
|
1181 |
+
This dataset is described in Friedman [1] and Breiman [2].
|
1182 |
+
|
1183 |
+
Inputs `X` are 4 independent features uniformly distributed on the
|
1184 |
+
intervals::
|
1185 |
+
|
1186 |
+
0 <= X[:, 0] <= 100,
|
1187 |
+
40 * pi <= X[:, 1] <= 560 * pi,
|
1188 |
+
0 <= X[:, 2] <= 1,
|
1189 |
+
1 <= X[:, 3] <= 11.
|
1190 |
+
|
1191 |
+
The output `y` is created according to the formula::
|
1192 |
+
|
1193 |
+
y(X) = (X[:, 0] ** 2 + (X[:, 1] * X[:, 2] \
|
1194 |
+
- 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 + noise * N(0, 1).
|
1195 |
+
|
1196 |
+
Read more in the :ref:`User Guide <sample_generators>`.
|
1197 |
+
|
1198 |
+
Parameters
|
1199 |
+
----------
|
1200 |
+
n_samples : int, default=100
|
1201 |
+
The number of samples.
|
1202 |
+
|
1203 |
+
noise : float, default=0.0
|
1204 |
+
The standard deviation of the gaussian noise applied to the output.
|
1205 |
+
|
1206 |
+
random_state : int, RandomState instance or None, default=None
|
1207 |
+
Determines random number generation for dataset noise. Pass an int
|
1208 |
+
for reproducible output across multiple function calls.
|
1209 |
+
See :term:`Glossary <random_state>`.
|
1210 |
+
|
1211 |
+
Returns
|
1212 |
+
-------
|
1213 |
+
X : ndarray of shape (n_samples, 4)
|
1214 |
+
The input samples.
|
1215 |
+
|
1216 |
+
y : ndarray of shape (n_samples,)
|
1217 |
+
The output values.
|
1218 |
+
|
1219 |
+
References
|
1220 |
+
----------
|
1221 |
+
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
|
1222 |
+
of Statistics 19 (1), pages 1-67, 1991.
|
1223 |
+
|
1224 |
+
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
|
1225 |
+
pages 123-140, 1996.
|
1226 |
+
|
1227 |
+
Examples
|
1228 |
+
--------
|
1229 |
+
>>> from sklearn.datasets import make_friedman2
|
1230 |
+
>>> X, y = make_friedman2(random_state=42)
|
1231 |
+
>>> X.shape
|
1232 |
+
(100, 4)
|
1233 |
+
>>> y.shape
|
1234 |
+
(100,)
|
1235 |
+
>>> list(y[:3])
|
1236 |
+
[1229.4..., 27.0..., 65.6...]
|
1237 |
+
"""
|
1238 |
+
generator = check_random_state(random_state)
|
1239 |
+
|
1240 |
+
X = generator.uniform(size=(n_samples, 4))
|
1241 |
+
X[:, 0] *= 100
|
1242 |
+
X[:, 1] *= 520 * np.pi
|
1243 |
+
X[:, 1] += 40 * np.pi
|
1244 |
+
X[:, 3] *= 10
|
1245 |
+
X[:, 3] += 1
|
1246 |
+
|
1247 |
+
y = (
|
1248 |
+
X[:, 0] ** 2 + (X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) ** 2
|
1249 |
+
) ** 0.5 + noise * generator.standard_normal(size=(n_samples))
|
1250 |
+
|
1251 |
+
return X, y
|
1252 |
+
|
1253 |
+
|
1254 |
+
@validate_params(
|
1255 |
+
{
|
1256 |
+
"n_samples": [Interval(Integral, 1, None, closed="left")],
|
1257 |
+
"noise": [Interval(Real, 0, None, closed="left")],
|
1258 |
+
"random_state": ["random_state"],
|
1259 |
+
},
|
1260 |
+
prefer_skip_nested_validation=True,
|
1261 |
+
)
|
1262 |
+
def make_friedman3(n_samples=100, *, noise=0.0, random_state=None):
|
1263 |
+
"""Generate the "Friedman #3" regression problem.
|
1264 |
+
|
1265 |
+
This dataset is described in Friedman [1] and Breiman [2].
|
1266 |
+
|
1267 |
+
Inputs `X` are 4 independent features uniformly distributed on the
|
1268 |
+
intervals::
|
1269 |
+
|
1270 |
+
0 <= X[:, 0] <= 100,
|
1271 |
+
40 * pi <= X[:, 1] <= 560 * pi,
|
1272 |
+
0 <= X[:, 2] <= 1,
|
1273 |
+
1 <= X[:, 3] <= 11.
|
1274 |
+
|
1275 |
+
The output `y` is created according to the formula::
|
1276 |
+
|
1277 |
+
y(X) = arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) \
|
1278 |
+
/ X[:, 0]) + noise * N(0, 1).
|
1279 |
+
|
1280 |
+
Read more in the :ref:`User Guide <sample_generators>`.
|
1281 |
+
|
1282 |
+
Parameters
|
1283 |
+
----------
|
1284 |
+
n_samples : int, default=100
|
1285 |
+
The number of samples.
|
1286 |
+
|
1287 |
+
noise : float, default=0.0
|
1288 |
+
The standard deviation of the gaussian noise applied to the output.
|
1289 |
+
|
1290 |
+
random_state : int, RandomState instance or None, default=None
|
1291 |
+
Determines random number generation for dataset noise. Pass an int
|
1292 |
+
for reproducible output across multiple function calls.
|
1293 |
+
See :term:`Glossary <random_state>`.
|
1294 |
+
|
1295 |
+
Returns
|
1296 |
+
-------
|
1297 |
+
X : ndarray of shape (n_samples, 4)
|
1298 |
+
The input samples.
|
1299 |
+
|
1300 |
+
y : ndarray of shape (n_samples,)
|
1301 |
+
The output values.
|
1302 |
+
|
1303 |
+
References
|
1304 |
+
----------
|
1305 |
+
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
|
1306 |
+
of Statistics 19 (1), pages 1-67, 1991.
|
1307 |
+
|
1308 |
+
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
|
1309 |
+
pages 123-140, 1996.
|
1310 |
+
|
1311 |
+
Examples
|
1312 |
+
--------
|
1313 |
+
>>> from sklearn.datasets import make_friedman3
|
1314 |
+
>>> X, y = make_friedman3(random_state=42)
|
1315 |
+
>>> X.shape
|
1316 |
+
(100, 4)
|
1317 |
+
>>> y.shape
|
1318 |
+
(100,)
|
1319 |
+
>>> list(y[:3])
|
1320 |
+
[1.5..., 0.9..., 0.4...]
|
1321 |
+
"""
|
1322 |
+
generator = check_random_state(random_state)
|
1323 |
+
|
1324 |
+
X = generator.uniform(size=(n_samples, 4))
|
1325 |
+
X[:, 0] *= 100
|
1326 |
+
X[:, 1] *= 520 * np.pi
|
1327 |
+
X[:, 1] += 40 * np.pi
|
1328 |
+
X[:, 3] *= 10
|
1329 |
+
X[:, 3] += 1
|
1330 |
+
|
1331 |
+
y = np.arctan(
|
1332 |
+
(X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) / X[:, 0]
|
1333 |
+
) + noise * generator.standard_normal(size=(n_samples))
|
1334 |
+
|
1335 |
+
return X, y
|
1336 |
+
|
1337 |
+
|
1338 |
+
@validate_params(
|
1339 |
+
{
|
1340 |
+
"n_samples": [Interval(Integral, 1, None, closed="left")],
|
1341 |
+
"n_features": [Interval(Integral, 1, None, closed="left")],
|
1342 |
+
"effective_rank": [Interval(Integral, 1, None, closed="left")],
|
1343 |
+
"tail_strength": [Interval(Real, 0, 1, closed="both")],
|
1344 |
+
"random_state": ["random_state"],
|
1345 |
+
},
|
1346 |
+
prefer_skip_nested_validation=True,
|
1347 |
+
)
|
1348 |
+
def make_low_rank_matrix(
|
1349 |
+
n_samples=100,
|
1350 |
+
n_features=100,
|
1351 |
+
*,
|
1352 |
+
effective_rank=10,
|
1353 |
+
tail_strength=0.5,
|
1354 |
+
random_state=None,
|
1355 |
+
):
|
1356 |
+
"""Generate a mostly low rank matrix with bell-shaped singular values.
|
1357 |
+
|
1358 |
+
Most of the variance can be explained by a bell-shaped curve of width
|
1359 |
+
effective_rank: the low rank part of the singular values profile is::
|
1360 |
+
|
1361 |
+
(1 - tail_strength) * exp(-1.0 * (i / effective_rank) ** 2)
|
1362 |
+
|
1363 |
+
The remaining singular values' tail is fat, decreasing as::
|
1364 |
+
|
1365 |
+
tail_strength * exp(-0.1 * i / effective_rank).
|
1366 |
+
|
1367 |
+
The low rank part of the profile can be considered the structured
|
1368 |
+
signal part of the data while the tail can be considered the noisy
|
1369 |
+
part of the data that cannot be summarized by a low number of linear
|
1370 |
+
components (singular vectors).
|
1371 |
+
|
1372 |
+
This kind of singular profiles is often seen in practice, for instance:
|
1373 |
+
- gray level pictures of faces
|
1374 |
+
- TF-IDF vectors of text documents crawled from the web
|
1375 |
+
|
1376 |
+
Read more in the :ref:`User Guide <sample_generators>`.
|
1377 |
+
|
1378 |
+
Parameters
|
1379 |
+
----------
|
1380 |
+
n_samples : int, default=100
|
1381 |
+
The number of samples.
|
1382 |
+
|
1383 |
+
n_features : int, default=100
|
1384 |
+
The number of features.
|
1385 |
+
|
1386 |
+
effective_rank : int, default=10
|
1387 |
+
The approximate number of singular vectors required to explain most of
|
1388 |
+
the data by linear combinations.
|
1389 |
+
|
1390 |
+
tail_strength : float, default=0.5
|
1391 |
+
The relative importance of the fat noisy tail of the singular values
|
1392 |
+
profile. The value should be between 0 and 1.
|
1393 |
+
|
1394 |
+
random_state : int, RandomState instance or None, default=None
|
1395 |
+
Determines random number generation for dataset creation. Pass an int
|
1396 |
+
for reproducible output across multiple function calls.
|
1397 |
+
See :term:`Glossary <random_state>`.
|
1398 |
+
|
1399 |
+
Returns
|
1400 |
+
-------
|
1401 |
+
X : ndarray of shape (n_samples, n_features)
|
1402 |
+
The matrix.
|
1403 |
+
"""
|
1404 |
+
generator = check_random_state(random_state)
|
1405 |
+
n = min(n_samples, n_features)
|
1406 |
+
|
1407 |
+
# Random (ortho normal) vectors
|
1408 |
+
u, _ = linalg.qr(
|
1409 |
+
generator.standard_normal(size=(n_samples, n)),
|
1410 |
+
mode="economic",
|
1411 |
+
check_finite=False,
|
1412 |
+
)
|
1413 |
+
v, _ = linalg.qr(
|
1414 |
+
generator.standard_normal(size=(n_features, n)),
|
1415 |
+
mode="economic",
|
1416 |
+
check_finite=False,
|
1417 |
+
)
|
1418 |
+
|
1419 |
+
# Index of the singular values
|
1420 |
+
singular_ind = np.arange(n, dtype=np.float64)
|
1421 |
+
|
1422 |
+
# Build the singular profile by assembling signal and noise components
|
1423 |
+
low_rank = (1 - tail_strength) * np.exp(-1.0 * (singular_ind / effective_rank) ** 2)
|
1424 |
+
tail = tail_strength * np.exp(-0.1 * singular_ind / effective_rank)
|
1425 |
+
s = np.identity(n) * (low_rank + tail)
|
1426 |
+
|
1427 |
+
return np.dot(np.dot(u, s), v.T)
|
1428 |
+
|
1429 |
+
|
1430 |
+
@validate_params(
|
1431 |
+
{
|
1432 |
+
"n_samples": [Interval(Integral, 1, None, closed="left")],
|
1433 |
+
"n_components": [Interval(Integral, 1, None, closed="left")],
|
1434 |
+
"n_features": [Interval(Integral, 1, None, closed="left")],
|
1435 |
+
"n_nonzero_coefs": [Interval(Integral, 1, None, closed="left")],
|
1436 |
+
"random_state": ["random_state"],
|
1437 |
+
"data_transposed": ["boolean", Hidden(StrOptions({"deprecated"}))],
|
1438 |
+
},
|
1439 |
+
prefer_skip_nested_validation=True,
|
1440 |
+
)
|
1441 |
+
def make_sparse_coded_signal(
|
1442 |
+
n_samples,
|
1443 |
+
*,
|
1444 |
+
n_components,
|
1445 |
+
n_features,
|
1446 |
+
n_nonzero_coefs,
|
1447 |
+
random_state=None,
|
1448 |
+
data_transposed="deprecated",
|
1449 |
+
):
|
1450 |
+
"""Generate a signal as a sparse combination of dictionary elements.
|
1451 |
+
|
1452 |
+
Returns a matrix `Y = DX`, such that `D` is of shape `(n_features, n_components)`,
|
1453 |
+
`X` is of shape `(n_components, n_samples)` and each column of `X` has exactly
|
1454 |
+
`n_nonzero_coefs` non-zero elements.
|
1455 |
+
|
1456 |
+
Read more in the :ref:`User Guide <sample_generators>`.
|
1457 |
+
|
1458 |
+
Parameters
|
1459 |
+
----------
|
1460 |
+
n_samples : int
|
1461 |
+
Number of samples to generate.
|
1462 |
+
|
1463 |
+
n_components : int
|
1464 |
+
Number of components in the dictionary.
|
1465 |
+
|
1466 |
+
n_features : int
|
1467 |
+
Number of features of the dataset to generate.
|
1468 |
+
|
1469 |
+
n_nonzero_coefs : int
|
1470 |
+
Number of active (non-zero) coefficients in each sample.
|
1471 |
+
|
1472 |
+
random_state : int, RandomState instance or None, default=None
|
1473 |
+
Determines random number generation for dataset creation. Pass an int
|
1474 |
+
for reproducible output across multiple function calls.
|
1475 |
+
See :term:`Glossary <random_state>`.
|
1476 |
+
|
1477 |
+
data_transposed : bool, default=False
|
1478 |
+
By default, Y, D and X are not transposed.
|
1479 |
+
|
1480 |
+
.. versionadded:: 1.1
|
1481 |
+
|
1482 |
+
.. versionchanged:: 1.3
|
1483 |
+
Default value changed from True to False.
|
1484 |
+
|
1485 |
+
.. deprecated:: 1.3
|
1486 |
+
`data_transposed` is deprecated and will be removed in 1.5.
|
1487 |
+
|
1488 |
+
Returns
|
1489 |
+
-------
|
1490 |
+
data : ndarray of shape (n_features, n_samples) or (n_samples, n_features)
|
1491 |
+
The encoded signal (Y). The shape is `(n_samples, n_features)` if
|
1492 |
+
`data_transposed` is False, otherwise it's `(n_features, n_samples)`.
|
1493 |
+
|
1494 |
+
dictionary : ndarray of shape (n_features, n_components) or \
|
1495 |
+
(n_components, n_features)
|
1496 |
+
The dictionary with normalized components (D). The shape is
|
1497 |
+
`(n_components, n_features)` if `data_transposed` is False, otherwise it's
|
1498 |
+
`(n_features, n_components)`.
|
1499 |
+
|
1500 |
+
code : ndarray of shape (n_components, n_samples) or (n_samples, n_components)
|
1501 |
+
The sparse code such that each column of this matrix has exactly
|
1502 |
+
n_nonzero_coefs non-zero items (X). The shape is `(n_samples, n_components)`
|
1503 |
+
if `data_transposed` is False, otherwise it's `(n_components, n_samples)`.
|
1504 |
+
"""
|
1505 |
+
generator = check_random_state(random_state)
|
1506 |
+
|
1507 |
+
# generate dictionary
|
1508 |
+
D = generator.standard_normal(size=(n_features, n_components))
|
1509 |
+
D /= np.sqrt(np.sum((D**2), axis=0))
|
1510 |
+
|
1511 |
+
# generate code
|
1512 |
+
X = np.zeros((n_components, n_samples))
|
1513 |
+
for i in range(n_samples):
|
1514 |
+
idx = np.arange(n_components)
|
1515 |
+
generator.shuffle(idx)
|
1516 |
+
idx = idx[:n_nonzero_coefs]
|
1517 |
+
X[idx, i] = generator.standard_normal(size=n_nonzero_coefs)
|
1518 |
+
|
1519 |
+
# encode signal
|
1520 |
+
Y = np.dot(D, X)
|
1521 |
+
|
1522 |
+
# TODO(1.5) remove data_transposed
|
1523 |
+
# raise warning if data_transposed is not passed explicitly
|
1524 |
+
if data_transposed != "deprecated":
|
1525 |
+
warnings.warn(
|
1526 |
+
"data_transposed was deprecated in version 1.3 and will be removed in 1.5.",
|
1527 |
+
FutureWarning,
|
1528 |
+
)
|
1529 |
+
else:
|
1530 |
+
data_transposed = False
|
1531 |
+
|
1532 |
+
# transpose if needed
|
1533 |
+
if not data_transposed:
|
1534 |
+
Y, D, X = Y.T, D.T, X.T
|
1535 |
+
|
1536 |
+
return map(np.squeeze, (Y, D, X))
|
1537 |
+
|
1538 |
+
|
1539 |
+
@validate_params(
|
1540 |
+
{
|
1541 |
+
"n_samples": [Interval(Integral, 1, None, closed="left")],
|
1542 |
+
"n_features": [Interval(Integral, 1, None, closed="left")],
|
1543 |
+
"random_state": ["random_state"],
|
1544 |
+
},
|
1545 |
+
prefer_skip_nested_validation=True,
|
1546 |
+
)
|
1547 |
+
def make_sparse_uncorrelated(n_samples=100, n_features=10, *, random_state=None):
|
1548 |
+
"""Generate a random regression problem with sparse uncorrelated design.
|
1549 |
+
|
1550 |
+
This dataset is described in Celeux et al [1]. as::
|
1551 |
+
|
1552 |
+
X ~ N(0, 1)
|
1553 |
+
y(X) = X[:, 0] + 2 * X[:, 1] - 2 * X[:, 2] - 1.5 * X[:, 3]
|
1554 |
+
|
1555 |
+
Only the first 4 features are informative. The remaining features are
|
1556 |
+
useless.
|
1557 |
+
|
1558 |
+
Read more in the :ref:`User Guide <sample_generators>`.
|
1559 |
+
|
1560 |
+
Parameters
|
1561 |
+
----------
|
1562 |
+
n_samples : int, default=100
|
1563 |
+
The number of samples.
|
1564 |
+
|
1565 |
+
n_features : int, default=10
|
1566 |
+
The number of features.
|
1567 |
+
|
1568 |
+
random_state : int, RandomState instance or None, default=None
|
1569 |
+
Determines random number generation for dataset creation. Pass an int
|
1570 |
+
for reproducible output across multiple function calls.
|
1571 |
+
See :term:`Glossary <random_state>`.
|
1572 |
+
|
1573 |
+
Returns
|
1574 |
+
-------
|
1575 |
+
X : ndarray of shape (n_samples, n_features)
|
1576 |
+
The input samples.
|
1577 |
+
|
1578 |
+
y : ndarray of shape (n_samples,)
|
1579 |
+
The output values.
|
1580 |
+
|
1581 |
+
References
|
1582 |
+
----------
|
1583 |
+
.. [1] G. Celeux, M. El Anbari, J.-M. Marin, C. P. Robert,
|
1584 |
+
"Regularization in regression: comparing Bayesian and frequentist
|
1585 |
+
methods in a poorly informative situation", 2009.
|
1586 |
+
"""
|
1587 |
+
generator = check_random_state(random_state)
|
1588 |
+
|
1589 |
+
X = generator.normal(loc=0, scale=1, size=(n_samples, n_features))
|
1590 |
+
y = generator.normal(
|
1591 |
+
loc=(X[:, 0] + 2 * X[:, 1] - 2 * X[:, 2] - 1.5 * X[:, 3]),
|
1592 |
+
scale=np.ones(n_samples),
|
1593 |
+
)
|
1594 |
+
|
1595 |
+
return X, y
|
1596 |
+
|
1597 |
+
|
1598 |
+
@validate_params(
|
1599 |
+
{
|
1600 |
+
"n_dim": [Interval(Integral, 1, None, closed="left")],
|
1601 |
+
"random_state": ["random_state"],
|
1602 |
+
},
|
1603 |
+
prefer_skip_nested_validation=True,
|
1604 |
+
)
|
1605 |
+
def make_spd_matrix(n_dim, *, random_state=None):
|
1606 |
+
"""Generate a random symmetric, positive-definite matrix.
|
1607 |
+
|
1608 |
+
Read more in the :ref:`User Guide <sample_generators>`.
|
1609 |
+
|
1610 |
+
Parameters
|
1611 |
+
----------
|
1612 |
+
n_dim : int
|
1613 |
+
The matrix dimension.
|
1614 |
+
|
1615 |
+
random_state : int, RandomState instance or None, default=None
|
1616 |
+
Determines random number generation for dataset creation. Pass an int
|
1617 |
+
for reproducible output across multiple function calls.
|
1618 |
+
See :term:`Glossary <random_state>`.
|
1619 |
+
|
1620 |
+
Returns
|
1621 |
+
-------
|
1622 |
+
X : ndarray of shape (n_dim, n_dim)
|
1623 |
+
The random symmetric, positive-definite matrix.
|
1624 |
+
|
1625 |
+
See Also
|
1626 |
+
--------
|
1627 |
+
make_sparse_spd_matrix: Generate a sparse symmetric definite positive matrix.
|
1628 |
+
|
1629 |
+
Examples
|
1630 |
+
--------
|
1631 |
+
>>> from sklearn.datasets import make_spd_matrix
|
1632 |
+
>>> make_spd_matrix(n_dim=2, random_state=42)
|
1633 |
+
array([[2.09..., 0.34...],
|
1634 |
+
[0.34..., 0.21...]])
|
1635 |
+
"""
|
1636 |
+
generator = check_random_state(random_state)
|
1637 |
+
|
1638 |
+
A = generator.uniform(size=(n_dim, n_dim))
|
1639 |
+
U, _, Vt = linalg.svd(np.dot(A.T, A), check_finite=False)
|
1640 |
+
X = np.dot(np.dot(U, 1.0 + np.diag(generator.uniform(size=n_dim))), Vt)
|
1641 |
+
|
1642 |
+
return X
|
1643 |
+
|
1644 |
+
|
1645 |
+
@validate_params(
|
1646 |
+
{
|
1647 |
+
"n_dim": [Hidden(None), Interval(Integral, 1, None, closed="left")],
|
1648 |
+
"alpha": [Interval(Real, 0, 1, closed="both")],
|
1649 |
+
"norm_diag": ["boolean"],
|
1650 |
+
"smallest_coef": [Interval(Real, 0, 1, closed="both")],
|
1651 |
+
"largest_coef": [Interval(Real, 0, 1, closed="both")],
|
1652 |
+
"sparse_format": [
|
1653 |
+
StrOptions({"bsr", "coo", "csc", "csr", "dia", "dok", "lil"}),
|
1654 |
+
None,
|
1655 |
+
],
|
1656 |
+
"random_state": ["random_state"],
|
1657 |
+
"dim": [
|
1658 |
+
Interval(Integral, 1, None, closed="left"),
|
1659 |
+
Hidden(StrOptions({"deprecated"})),
|
1660 |
+
],
|
1661 |
+
},
|
1662 |
+
prefer_skip_nested_validation=True,
|
1663 |
+
)
|
1664 |
+
def make_sparse_spd_matrix(
|
1665 |
+
n_dim=None,
|
1666 |
+
*,
|
1667 |
+
alpha=0.95,
|
1668 |
+
norm_diag=False,
|
1669 |
+
smallest_coef=0.1,
|
1670 |
+
largest_coef=0.9,
|
1671 |
+
sparse_format=None,
|
1672 |
+
random_state=None,
|
1673 |
+
dim="deprecated",
|
1674 |
+
):
|
1675 |
+
"""Generate a sparse symmetric definite positive matrix.
|
1676 |
+
|
1677 |
+
Read more in the :ref:`User Guide <sample_generators>`.
|
1678 |
+
|
1679 |
+
Parameters
|
1680 |
+
----------
|
1681 |
+
n_dim : int, default=1
|
1682 |
+
The size of the random matrix to generate.
|
1683 |
+
|
1684 |
+
.. versionchanged:: 1.4
|
1685 |
+
Renamed from ``dim`` to ``n_dim``.
|
1686 |
+
|
1687 |
+
alpha : float, default=0.95
|
1688 |
+
The probability that a coefficient is zero (see notes). Larger values
|
1689 |
+
enforce more sparsity. The value should be in the range 0 and 1.
|
1690 |
+
|
1691 |
+
norm_diag : bool, default=False
|
1692 |
+
Whether to normalize the output matrix to make the leading diagonal
|
1693 |
+
elements all 1.
|
1694 |
+
|
1695 |
+
smallest_coef : float, default=0.1
|
1696 |
+
The value of the smallest coefficient between 0 and 1.
|
1697 |
+
|
1698 |
+
largest_coef : float, default=0.9
|
1699 |
+
The value of the largest coefficient between 0 and 1.
|
1700 |
+
|
1701 |
+
sparse_format : str, default=None
|
1702 |
+
String representing the output sparse format, such as 'csc', 'csr', etc.
|
1703 |
+
If ``None``, return a dense numpy ndarray.
|
1704 |
+
|
1705 |
+
.. versionadded:: 1.4
|
1706 |
+
|
1707 |
+
random_state : int, RandomState instance or None, default=None
|
1708 |
+
Determines random number generation for dataset creation. Pass an int
|
1709 |
+
for reproducible output across multiple function calls.
|
1710 |
+
See :term:`Glossary <random_state>`.
|
1711 |
+
|
1712 |
+
dim : int, default=1
|
1713 |
+
The size of the random matrix to generate.
|
1714 |
+
|
1715 |
+
.. deprecated:: 1.4
|
1716 |
+
`dim` is deprecated and will be removed in 1.6.
|
1717 |
+
|
1718 |
+
Returns
|
1719 |
+
-------
|
1720 |
+
prec : ndarray or sparse matrix of shape (dim, dim)
|
1721 |
+
The generated matrix. If ``sparse_format=None``, this would be an ndarray.
|
1722 |
+
Otherwise, this will be a sparse matrix of the specified format.
|
1723 |
+
|
1724 |
+
See Also
|
1725 |
+
--------
|
1726 |
+
make_spd_matrix : Generate a random symmetric, positive-definite matrix.
|
1727 |
+
|
1728 |
+
Notes
|
1729 |
+
-----
|
1730 |
+
The sparsity is actually imposed on the cholesky factor of the matrix.
|
1731 |
+
Thus alpha does not translate directly into the filling fraction of
|
1732 |
+
the matrix itself.
|
1733 |
+
|
1734 |
+
Examples
|
1735 |
+
--------
|
1736 |
+
>>> from sklearn.datasets import make_sparse_spd_matrix
|
1737 |
+
>>> make_sparse_spd_matrix(n_dim=4, norm_diag=False, random_state=42)
|
1738 |
+
array([[1., 0., 0., 0.],
|
1739 |
+
[0., 1., 0., 0.],
|
1740 |
+
[0., 0., 1., 0.],
|
1741 |
+
[0., 0., 0., 1.]])
|
1742 |
+
"""
|
1743 |
+
random_state = check_random_state(random_state)
|
1744 |
+
|
1745 |
+
# TODO(1.6): remove in 1.6
|
1746 |
+
# Also make sure to change `n_dim` default back to 1 and deprecate None
|
1747 |
+
if n_dim is not None and dim != "deprecated":
|
1748 |
+
raise ValueError(
|
1749 |
+
"`dim` and `n_dim` cannot be both specified. Please use `n_dim` only "
|
1750 |
+
"as `dim` is deprecated in v1.4 and will be removed in v1.6."
|
1751 |
+
)
|
1752 |
+
|
1753 |
+
if dim != "deprecated":
|
1754 |
+
warnings.warn(
|
1755 |
+
(
|
1756 |
+
"dim was deprecated in version 1.4 and will be removed in 1.6."
|
1757 |
+
"Please use ``n_dim`` instead."
|
1758 |
+
),
|
1759 |
+
FutureWarning,
|
1760 |
+
)
|
1761 |
+
_n_dim = dim
|
1762 |
+
elif n_dim is None:
|
1763 |
+
_n_dim = 1
|
1764 |
+
else:
|
1765 |
+
_n_dim = n_dim
|
1766 |
+
|
1767 |
+
chol = -sp.eye(_n_dim)
|
1768 |
+
aux = sp.random(
|
1769 |
+
m=_n_dim,
|
1770 |
+
n=_n_dim,
|
1771 |
+
density=1 - alpha,
|
1772 |
+
data_rvs=lambda x: random_state.uniform(
|
1773 |
+
low=smallest_coef, high=largest_coef, size=x
|
1774 |
+
),
|
1775 |
+
random_state=random_state,
|
1776 |
+
)
|
1777 |
+
# We need to avoid "coo" format because it does not support slicing
|
1778 |
+
aux = sp.tril(aux, k=-1, format="csc")
|
1779 |
+
|
1780 |
+
# Permute the lines: we don't want to have asymmetries in the final
|
1781 |
+
# SPD matrix
|
1782 |
+
permutation = random_state.permutation(_n_dim)
|
1783 |
+
aux = aux[permutation].T[permutation]
|
1784 |
+
chol += aux
|
1785 |
+
prec = chol.T @ chol
|
1786 |
+
|
1787 |
+
if norm_diag:
|
1788 |
+
# Form the diagonal vector into a row matrix
|
1789 |
+
d = sp.diags(1.0 / np.sqrt(prec.diagonal()))
|
1790 |
+
prec = d @ prec @ d
|
1791 |
+
|
1792 |
+
if sparse_format is None:
|
1793 |
+
return prec.toarray()
|
1794 |
+
else:
|
1795 |
+
return prec.asformat(sparse_format)
|
1796 |
+
|
1797 |
+
|
1798 |
+
@validate_params(
|
1799 |
+
{
|
1800 |
+
"n_samples": [Interval(Integral, 1, None, closed="left")],
|
1801 |
+
"noise": [Interval(Real, 0, None, closed="left")],
|
1802 |
+
"random_state": ["random_state"],
|
1803 |
+
"hole": ["boolean"],
|
1804 |
+
},
|
1805 |
+
prefer_skip_nested_validation=True,
|
1806 |
+
)
|
1807 |
+
def make_swiss_roll(n_samples=100, *, noise=0.0, random_state=None, hole=False):
|
1808 |
+
"""Generate a swiss roll dataset.
|
1809 |
+
|
1810 |
+
Read more in the :ref:`User Guide <sample_generators>`.
|
1811 |
+
|
1812 |
+
Parameters
|
1813 |
+
----------
|
1814 |
+
n_samples : int, default=100
|
1815 |
+
The number of sample points on the Swiss Roll.
|
1816 |
+
|
1817 |
+
noise : float, default=0.0
|
1818 |
+
The standard deviation of the gaussian noise.
|
1819 |
+
|
1820 |
+
random_state : int, RandomState instance or None, default=None
|
1821 |
+
Determines random number generation for dataset creation. Pass an int
|
1822 |
+
for reproducible output across multiple function calls.
|
1823 |
+
See :term:`Glossary <random_state>`.
|
1824 |
+
|
1825 |
+
hole : bool, default=False
|
1826 |
+
If True generates the swiss roll with hole dataset.
|
1827 |
+
|
1828 |
+
Returns
|
1829 |
+
-------
|
1830 |
+
X : ndarray of shape (n_samples, 3)
|
1831 |
+
The points.
|
1832 |
+
|
1833 |
+
t : ndarray of shape (n_samples,)
|
1834 |
+
The univariate position of the sample according to the main dimension
|
1835 |
+
of the points in the manifold.
|
1836 |
+
|
1837 |
+
Notes
|
1838 |
+
-----
|
1839 |
+
The algorithm is from Marsland [1].
|
1840 |
+
|
1841 |
+
References
|
1842 |
+
----------
|
1843 |
+
.. [1] S. Marsland, "Machine Learning: An Algorithmic Perspective", 2nd edition,
|
1844 |
+
Chapter 6, 2014.
|
1845 |
+
https://homepages.ecs.vuw.ac.nz/~marslast/Code/Ch6/lle.py
|
1846 |
+
"""
|
1847 |
+
generator = check_random_state(random_state)
|
1848 |
+
|
1849 |
+
if not hole:
|
1850 |
+
t = 1.5 * np.pi * (1 + 2 * generator.uniform(size=n_samples))
|
1851 |
+
y = 21 * generator.uniform(size=n_samples)
|
1852 |
+
else:
|
1853 |
+
corners = np.array(
|
1854 |
+
[[np.pi * (1.5 + i), j * 7] for i in range(3) for j in range(3)]
|
1855 |
+
)
|
1856 |
+
corners = np.delete(corners, 4, axis=0)
|
1857 |
+
corner_index = generator.choice(8, n_samples)
|
1858 |
+
parameters = generator.uniform(size=(2, n_samples)) * np.array([[np.pi], [7]])
|
1859 |
+
t, y = corners[corner_index].T + parameters
|
1860 |
+
|
1861 |
+
x = t * np.cos(t)
|
1862 |
+
z = t * np.sin(t)
|
1863 |
+
|
1864 |
+
X = np.vstack((x, y, z))
|
1865 |
+
X += noise * generator.standard_normal(size=(3, n_samples))
|
1866 |
+
X = X.T
|
1867 |
+
t = np.squeeze(t)
|
1868 |
+
|
1869 |
+
return X, t
|
1870 |
+
|
1871 |
+
|
1872 |
+
@validate_params(
|
1873 |
+
{
|
1874 |
+
"n_samples": [Interval(Integral, 1, None, closed="left")],
|
1875 |
+
"noise": [Interval(Real, 0, None, closed="left")],
|
1876 |
+
"random_state": ["random_state"],
|
1877 |
+
},
|
1878 |
+
prefer_skip_nested_validation=True,
|
1879 |
+
)
|
1880 |
+
def make_s_curve(n_samples=100, *, noise=0.0, random_state=None):
|
1881 |
+
"""Generate an S curve dataset.
|
1882 |
+
|
1883 |
+
Read more in the :ref:`User Guide <sample_generators>`.
|
1884 |
+
|
1885 |
+
Parameters
|
1886 |
+
----------
|
1887 |
+
n_samples : int, default=100
|
1888 |
+
The number of sample points on the S curve.
|
1889 |
+
|
1890 |
+
noise : float, default=0.0
|
1891 |
+
The standard deviation of the gaussian noise.
|
1892 |
+
|
1893 |
+
random_state : int, RandomState instance or None, default=None
|
1894 |
+
Determines random number generation for dataset creation. Pass an int
|
1895 |
+
for reproducible output across multiple function calls.
|
1896 |
+
See :term:`Glossary <random_state>`.
|
1897 |
+
|
1898 |
+
Returns
|
1899 |
+
-------
|
1900 |
+
X : ndarray of shape (n_samples, 3)
|
1901 |
+
The points.
|
1902 |
+
|
1903 |
+
t : ndarray of shape (n_samples,)
|
1904 |
+
The univariate position of the sample according to the main dimension
|
1905 |
+
of the points in the manifold.
|
1906 |
+
"""
|
1907 |
+
generator = check_random_state(random_state)
|
1908 |
+
|
1909 |
+
t = 3 * np.pi * (generator.uniform(size=(1, n_samples)) - 0.5)
|
1910 |
+
X = np.empty(shape=(n_samples, 3), dtype=np.float64)
|
1911 |
+
X[:, 0] = np.sin(t)
|
1912 |
+
X[:, 1] = 2.0 * generator.uniform(size=n_samples)
|
1913 |
+
X[:, 2] = np.sign(t) * (np.cos(t) - 1)
|
1914 |
+
X += noise * generator.standard_normal(size=(3, n_samples)).T
|
1915 |
+
t = np.squeeze(t)
|
1916 |
+
|
1917 |
+
return X, t
|
1918 |
+
|
1919 |
+
|
1920 |
+
@validate_params(
|
1921 |
+
{
|
1922 |
+
"mean": ["array-like", None],
|
1923 |
+
"cov": [Interval(Real, 0, None, closed="left")],
|
1924 |
+
"n_samples": [Interval(Integral, 1, None, closed="left")],
|
1925 |
+
"n_features": [Interval(Integral, 1, None, closed="left")],
|
1926 |
+
"n_classes": [Interval(Integral, 1, None, closed="left")],
|
1927 |
+
"shuffle": ["boolean"],
|
1928 |
+
"random_state": ["random_state"],
|
1929 |
+
},
|
1930 |
+
prefer_skip_nested_validation=True,
|
1931 |
+
)
|
1932 |
+
def make_gaussian_quantiles(
|
1933 |
+
*,
|
1934 |
+
mean=None,
|
1935 |
+
cov=1.0,
|
1936 |
+
n_samples=100,
|
1937 |
+
n_features=2,
|
1938 |
+
n_classes=3,
|
1939 |
+
shuffle=True,
|
1940 |
+
random_state=None,
|
1941 |
+
):
|
1942 |
+
r"""Generate isotropic Gaussian and label samples by quantile.
|
1943 |
+
|
1944 |
+
This classification dataset is constructed by taking a multi-dimensional
|
1945 |
+
standard normal distribution and defining classes separated by nested
|
1946 |
+
concentric multi-dimensional spheres such that roughly equal numbers of
|
1947 |
+
samples are in each class (quantiles of the :math:`\chi^2` distribution).
|
1948 |
+
|
1949 |
+
For an example of usage, see
|
1950 |
+
:ref:`sphx_glr_auto_examples_datasets_plot_random_dataset.py`.
|
1951 |
+
|
1952 |
+
Read more in the :ref:`User Guide <sample_generators>`.
|
1953 |
+
|
1954 |
+
Parameters
|
1955 |
+
----------
|
1956 |
+
mean : array-like of shape (n_features,), default=None
|
1957 |
+
The mean of the multi-dimensional normal distribution.
|
1958 |
+
If None then use the origin (0, 0, ...).
|
1959 |
+
|
1960 |
+
cov : float, default=1.0
|
1961 |
+
The covariance matrix will be this value times the unit matrix. This
|
1962 |
+
dataset only produces symmetric normal distributions.
|
1963 |
+
|
1964 |
+
n_samples : int, default=100
|
1965 |
+
The total number of points equally divided among classes.
|
1966 |
+
|
1967 |
+
n_features : int, default=2
|
1968 |
+
The number of features for each sample.
|
1969 |
+
|
1970 |
+
n_classes : int, default=3
|
1971 |
+
The number of classes.
|
1972 |
+
|
1973 |
+
shuffle : bool, default=True
|
1974 |
+
Shuffle the samples.
|
1975 |
+
|
1976 |
+
random_state : int, RandomState instance or None, default=None
|
1977 |
+
Determines random number generation for dataset creation. Pass an int
|
1978 |
+
for reproducible output across multiple function calls.
|
1979 |
+
See :term:`Glossary <random_state>`.
|
1980 |
+
|
1981 |
+
Returns
|
1982 |
+
-------
|
1983 |
+
X : ndarray of shape (n_samples, n_features)
|
1984 |
+
The generated samples.
|
1985 |
+
|
1986 |
+
y : ndarray of shape (n_samples,)
|
1987 |
+
The integer labels for quantile membership of each sample.
|
1988 |
+
|
1989 |
+
Notes
|
1990 |
+
-----
|
1991 |
+
The dataset is from Zhu et al [1].
|
1992 |
+
|
1993 |
+
References
|
1994 |
+
----------
|
1995 |
+
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
|
1996 |
+
|
1997 |
+
Examples
|
1998 |
+
--------
|
1999 |
+
>>> from sklearn.datasets import make_gaussian_quantiles
|
2000 |
+
>>> X, y = make_gaussian_quantiles(random_state=42)
|
2001 |
+
>>> X.shape
|
2002 |
+
(100, 2)
|
2003 |
+
>>> y.shape
|
2004 |
+
(100,)
|
2005 |
+
>>> list(y[:5])
|
2006 |
+
[2, 0, 1, 0, 2]
|
2007 |
+
"""
|
2008 |
+
if n_samples < n_classes:
|
2009 |
+
raise ValueError("n_samples must be at least n_classes")
|
2010 |
+
|
2011 |
+
generator = check_random_state(random_state)
|
2012 |
+
|
2013 |
+
if mean is None:
|
2014 |
+
mean = np.zeros(n_features)
|
2015 |
+
else:
|
2016 |
+
mean = np.array(mean)
|
2017 |
+
|
2018 |
+
# Build multivariate normal distribution
|
2019 |
+
X = generator.multivariate_normal(mean, cov * np.identity(n_features), (n_samples,))
|
2020 |
+
|
2021 |
+
# Sort by distance from origin
|
2022 |
+
idx = np.argsort(np.sum((X - mean[np.newaxis, :]) ** 2, axis=1))
|
2023 |
+
X = X[idx, :]
|
2024 |
+
|
2025 |
+
# Label by quantile
|
2026 |
+
step = n_samples // n_classes
|
2027 |
+
|
2028 |
+
y = np.hstack(
|
2029 |
+
[
|
2030 |
+
np.repeat(np.arange(n_classes), step),
|
2031 |
+
np.repeat(n_classes - 1, n_samples - step * n_classes),
|
2032 |
+
]
|
2033 |
+
)
|
2034 |
+
|
2035 |
+
if shuffle:
|
2036 |
+
X, y = util_shuffle(X, y, random_state=generator)
|
2037 |
+
|
2038 |
+
return X, y
|
2039 |
+
|
2040 |
+
|
2041 |
+
def _shuffle(data, random_state=None):
|
2042 |
+
generator = check_random_state(random_state)
|
2043 |
+
n_rows, n_cols = data.shape
|
2044 |
+
row_idx = generator.permutation(n_rows)
|
2045 |
+
col_idx = generator.permutation(n_cols)
|
2046 |
+
result = data[row_idx][:, col_idx]
|
2047 |
+
return result, row_idx, col_idx
|
2048 |
+
|
2049 |
+
|
2050 |
+
@validate_params(
|
2051 |
+
{
|
2052 |
+
"shape": [tuple],
|
2053 |
+
"n_clusters": [Interval(Integral, 1, None, closed="left")],
|
2054 |
+
"noise": [Interval(Real, 0, None, closed="left")],
|
2055 |
+
"minval": [Interval(Real, None, None, closed="neither")],
|
2056 |
+
"maxval": [Interval(Real, None, None, closed="neither")],
|
2057 |
+
"shuffle": ["boolean"],
|
2058 |
+
"random_state": ["random_state"],
|
2059 |
+
},
|
2060 |
+
prefer_skip_nested_validation=True,
|
2061 |
+
)
|
2062 |
+
def make_biclusters(
|
2063 |
+
shape,
|
2064 |
+
n_clusters,
|
2065 |
+
*,
|
2066 |
+
noise=0.0,
|
2067 |
+
minval=10,
|
2068 |
+
maxval=100,
|
2069 |
+
shuffle=True,
|
2070 |
+
random_state=None,
|
2071 |
+
):
|
2072 |
+
"""Generate a constant block diagonal structure array for biclustering.
|
2073 |
+
|
2074 |
+
Read more in the :ref:`User Guide <sample_generators>`.
|
2075 |
+
|
2076 |
+
Parameters
|
2077 |
+
----------
|
2078 |
+
shape : tuple of shape (n_rows, n_cols)
|
2079 |
+
The shape of the result.
|
2080 |
+
|
2081 |
+
n_clusters : int
|
2082 |
+
The number of biclusters.
|
2083 |
+
|
2084 |
+
noise : float, default=0.0
|
2085 |
+
The standard deviation of the gaussian noise.
|
2086 |
+
|
2087 |
+
minval : float, default=10
|
2088 |
+
Minimum value of a bicluster.
|
2089 |
+
|
2090 |
+
maxval : float, default=100
|
2091 |
+
Maximum value of a bicluster.
|
2092 |
+
|
2093 |
+
shuffle : bool, default=True
|
2094 |
+
Shuffle the samples.
|
2095 |
+
|
2096 |
+
random_state : int, RandomState instance or None, default=None
|
2097 |
+
Determines random number generation for dataset creation. Pass an int
|
2098 |
+
for reproducible output across multiple function calls.
|
2099 |
+
See :term:`Glossary <random_state>`.
|
2100 |
+
|
2101 |
+
Returns
|
2102 |
+
-------
|
2103 |
+
X : ndarray of shape `shape`
|
2104 |
+
The generated array.
|
2105 |
+
|
2106 |
+
rows : ndarray of shape (n_clusters, X.shape[0])
|
2107 |
+
The indicators for cluster membership of each row.
|
2108 |
+
|
2109 |
+
cols : ndarray of shape (n_clusters, X.shape[1])
|
2110 |
+
The indicators for cluster membership of each column.
|
2111 |
+
|
2112 |
+
See Also
|
2113 |
+
--------
|
2114 |
+
make_checkerboard: Generate an array with block checkerboard structure for
|
2115 |
+
biclustering.
|
2116 |
+
|
2117 |
+
References
|
2118 |
+
----------
|
2119 |
+
|
2120 |
+
.. [1] Dhillon, I. S. (2001, August). Co-clustering documents and
|
2121 |
+
words using bipartite spectral graph partitioning. In Proceedings
|
2122 |
+
of the seventh ACM SIGKDD international conference on Knowledge
|
2123 |
+
discovery and data mining (pp. 269-274). ACM.
|
2124 |
+
"""
|
2125 |
+
generator = check_random_state(random_state)
|
2126 |
+
n_rows, n_cols = shape
|
2127 |
+
consts = generator.uniform(minval, maxval, n_clusters)
|
2128 |
+
|
2129 |
+
# row and column clusters of approximately equal sizes
|
2130 |
+
row_sizes = generator.multinomial(n_rows, np.repeat(1.0 / n_clusters, n_clusters))
|
2131 |
+
col_sizes = generator.multinomial(n_cols, np.repeat(1.0 / n_clusters, n_clusters))
|
2132 |
+
|
2133 |
+
row_labels = np.hstack(
|
2134 |
+
[np.repeat(val, rep) for val, rep in zip(range(n_clusters), row_sizes)]
|
2135 |
+
)
|
2136 |
+
col_labels = np.hstack(
|
2137 |
+
[np.repeat(val, rep) for val, rep in zip(range(n_clusters), col_sizes)]
|
2138 |
+
)
|
2139 |
+
|
2140 |
+
result = np.zeros(shape, dtype=np.float64)
|
2141 |
+
for i in range(n_clusters):
|
2142 |
+
selector = np.outer(row_labels == i, col_labels == i)
|
2143 |
+
result[selector] += consts[i]
|
2144 |
+
|
2145 |
+
if noise > 0:
|
2146 |
+
result += generator.normal(scale=noise, size=result.shape)
|
2147 |
+
|
2148 |
+
if shuffle:
|
2149 |
+
result, row_idx, col_idx = _shuffle(result, random_state)
|
2150 |
+
row_labels = row_labels[row_idx]
|
2151 |
+
col_labels = col_labels[col_idx]
|
2152 |
+
|
2153 |
+
rows = np.vstack([row_labels == c for c in range(n_clusters)])
|
2154 |
+
cols = np.vstack([col_labels == c for c in range(n_clusters)])
|
2155 |
+
|
2156 |
+
return result, rows, cols
|
2157 |
+
|
2158 |
+
|
2159 |
+
@validate_params(
|
2160 |
+
{
|
2161 |
+
"shape": [tuple],
|
2162 |
+
"n_clusters": [Interval(Integral, 1, None, closed="left"), "array-like"],
|
2163 |
+
"noise": [Interval(Real, 0, None, closed="left")],
|
2164 |
+
"minval": [Interval(Real, None, None, closed="neither")],
|
2165 |
+
"maxval": [Interval(Real, None, None, closed="neither")],
|
2166 |
+
"shuffle": ["boolean"],
|
2167 |
+
"random_state": ["random_state"],
|
2168 |
+
},
|
2169 |
+
prefer_skip_nested_validation=True,
|
2170 |
+
)
|
2171 |
+
def make_checkerboard(
|
2172 |
+
shape,
|
2173 |
+
n_clusters,
|
2174 |
+
*,
|
2175 |
+
noise=0.0,
|
2176 |
+
minval=10,
|
2177 |
+
maxval=100,
|
2178 |
+
shuffle=True,
|
2179 |
+
random_state=None,
|
2180 |
+
):
|
2181 |
+
"""Generate an array with block checkerboard structure for biclustering.
|
2182 |
+
|
2183 |
+
Read more in the :ref:`User Guide <sample_generators>`.
|
2184 |
+
|
2185 |
+
Parameters
|
2186 |
+
----------
|
2187 |
+
shape : tuple of shape (n_rows, n_cols)
|
2188 |
+
The shape of the result.
|
2189 |
+
|
2190 |
+
n_clusters : int or array-like or shape (n_row_clusters, n_column_clusters)
|
2191 |
+
The number of row and column clusters.
|
2192 |
+
|
2193 |
+
noise : float, default=0.0
|
2194 |
+
The standard deviation of the gaussian noise.
|
2195 |
+
|
2196 |
+
minval : float, default=10
|
2197 |
+
Minimum value of a bicluster.
|
2198 |
+
|
2199 |
+
maxval : float, default=100
|
2200 |
+
Maximum value of a bicluster.
|
2201 |
+
|
2202 |
+
shuffle : bool, default=True
|
2203 |
+
Shuffle the samples.
|
2204 |
+
|
2205 |
+
random_state : int, RandomState instance or None, default=None
|
2206 |
+
Determines random number generation for dataset creation. Pass an int
|
2207 |
+
for reproducible output across multiple function calls.
|
2208 |
+
See :term:`Glossary <random_state>`.
|
2209 |
+
|
2210 |
+
Returns
|
2211 |
+
-------
|
2212 |
+
X : ndarray of shape `shape`
|
2213 |
+
The generated array.
|
2214 |
+
|
2215 |
+
rows : ndarray of shape (n_clusters, X.shape[0])
|
2216 |
+
The indicators for cluster membership of each row.
|
2217 |
+
|
2218 |
+
cols : ndarray of shape (n_clusters, X.shape[1])
|
2219 |
+
The indicators for cluster membership of each column.
|
2220 |
+
|
2221 |
+
See Also
|
2222 |
+
--------
|
2223 |
+
make_biclusters : Generate an array with constant block diagonal structure
|
2224 |
+
for biclustering.
|
2225 |
+
|
2226 |
+
References
|
2227 |
+
----------
|
2228 |
+
.. [1] Kluger, Y., Basri, R., Chang, J. T., & Gerstein, M. (2003).
|
2229 |
+
Spectral biclustering of microarray data: coclustering genes
|
2230 |
+
and conditions. Genome research, 13(4), 703-716.
|
2231 |
+
"""
|
2232 |
+
generator = check_random_state(random_state)
|
2233 |
+
|
2234 |
+
if hasattr(n_clusters, "__len__"):
|
2235 |
+
n_row_clusters, n_col_clusters = n_clusters
|
2236 |
+
else:
|
2237 |
+
n_row_clusters = n_col_clusters = n_clusters
|
2238 |
+
|
2239 |
+
# row and column clusters of approximately equal sizes
|
2240 |
+
n_rows, n_cols = shape
|
2241 |
+
row_sizes = generator.multinomial(
|
2242 |
+
n_rows, np.repeat(1.0 / n_row_clusters, n_row_clusters)
|
2243 |
+
)
|
2244 |
+
col_sizes = generator.multinomial(
|
2245 |
+
n_cols, np.repeat(1.0 / n_col_clusters, n_col_clusters)
|
2246 |
+
)
|
2247 |
+
|
2248 |
+
row_labels = np.hstack(
|
2249 |
+
[np.repeat(val, rep) for val, rep in zip(range(n_row_clusters), row_sizes)]
|
2250 |
+
)
|
2251 |
+
col_labels = np.hstack(
|
2252 |
+
[np.repeat(val, rep) for val, rep in zip(range(n_col_clusters), col_sizes)]
|
2253 |
+
)
|
2254 |
+
|
2255 |
+
result = np.zeros(shape, dtype=np.float64)
|
2256 |
+
for i in range(n_row_clusters):
|
2257 |
+
for j in range(n_col_clusters):
|
2258 |
+
selector = np.outer(row_labels == i, col_labels == j)
|
2259 |
+
result[selector] += generator.uniform(minval, maxval)
|
2260 |
+
|
2261 |
+
if noise > 0:
|
2262 |
+
result += generator.normal(scale=noise, size=result.shape)
|
2263 |
+
|
2264 |
+
if shuffle:
|
2265 |
+
result, row_idx, col_idx = _shuffle(result, random_state)
|
2266 |
+
row_labels = row_labels[row_idx]
|
2267 |
+
col_labels = col_labels[col_idx]
|
2268 |
+
|
2269 |
+
rows = np.vstack(
|
2270 |
+
[
|
2271 |
+
row_labels == label
|
2272 |
+
for label in range(n_row_clusters)
|
2273 |
+
for _ in range(n_col_clusters)
|
2274 |
+
]
|
2275 |
+
)
|
2276 |
+
cols = np.vstack(
|
2277 |
+
[
|
2278 |
+
col_labels == label
|
2279 |
+
for _ in range(n_row_clusters)
|
2280 |
+
for label in range(n_col_clusters)
|
2281 |
+
]
|
2282 |
+
)
|
2283 |
+
|
2284 |
+
return result, rows, cols
|
venv/lib/python3.10/site-packages/sklearn/datasets/_species_distributions.py
ADDED
@@ -0,0 +1,273 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
=============================
|
3 |
+
Species distribution dataset
|
4 |
+
=============================
|
5 |
+
|
6 |
+
This dataset represents the geographic distribution of species.
|
7 |
+
The dataset is provided by Phillips et. al. (2006).
|
8 |
+
|
9 |
+
The two species are:
|
10 |
+
|
11 |
+
- `"Bradypus variegatus"
|
12 |
+
<http://www.iucnredlist.org/details/3038/0>`_ ,
|
13 |
+
the Brown-throated Sloth.
|
14 |
+
|
15 |
+
- `"Microryzomys minutus"
|
16 |
+
<http://www.iucnredlist.org/details/13408/0>`_ ,
|
17 |
+
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
|
18 |
+
Colombia, Ecuador, Peru, and Venezuela.
|
19 |
+
|
20 |
+
References
|
21 |
+
----------
|
22 |
+
|
23 |
+
`"Maximum entropy modeling of species geographic distributions"
|
24 |
+
<http://rob.schapire.net/papers/ecolmod.pdf>`_ S. J. Phillips,
|
25 |
+
R. P. Anderson, R. E. Schapire - Ecological Modelling, 190:231-259, 2006.
|
26 |
+
|
27 |
+
Notes
|
28 |
+
-----
|
29 |
+
|
30 |
+
For an example of using this dataset, see
|
31 |
+
:ref:`examples/applications/plot_species_distribution_modeling.py
|
32 |
+
<sphx_glr_auto_examples_applications_plot_species_distribution_modeling.py>`.
|
33 |
+
"""
|
34 |
+
|
35 |
+
# Authors: Peter Prettenhofer <[email protected]>
|
36 |
+
# Jake Vanderplas <[email protected]>
|
37 |
+
#
|
38 |
+
# License: BSD 3 clause
|
39 |
+
|
40 |
+
import logging
|
41 |
+
from io import BytesIO
|
42 |
+
from os import PathLike, makedirs, remove
|
43 |
+
from os.path import exists
|
44 |
+
|
45 |
+
import joblib
|
46 |
+
import numpy as np
|
47 |
+
|
48 |
+
from ..utils import Bunch
|
49 |
+
from ..utils._param_validation import validate_params
|
50 |
+
from . import get_data_home
|
51 |
+
from ._base import RemoteFileMetadata, _fetch_remote, _pkl_filepath
|
52 |
+
|
53 |
+
# The original data can be found at:
|
54 |
+
# https://biodiversityinformatics.amnh.org/open_source/maxent/samples.zip
|
55 |
+
SAMPLES = RemoteFileMetadata(
|
56 |
+
filename="samples.zip",
|
57 |
+
url="https://ndownloader.figshare.com/files/5976075",
|
58 |
+
checksum="abb07ad284ac50d9e6d20f1c4211e0fd3c098f7f85955e89d321ee8efe37ac28",
|
59 |
+
)
|
60 |
+
|
61 |
+
# The original data can be found at:
|
62 |
+
# https://biodiversityinformatics.amnh.org/open_source/maxent/coverages.zip
|
63 |
+
COVERAGES = RemoteFileMetadata(
|
64 |
+
filename="coverages.zip",
|
65 |
+
url="https://ndownloader.figshare.com/files/5976078",
|
66 |
+
checksum="4d862674d72e79d6cee77e63b98651ec7926043ba7d39dcb31329cf3f6073807",
|
67 |
+
)
|
68 |
+
|
69 |
+
DATA_ARCHIVE_NAME = "species_coverage.pkz"
|
70 |
+
|
71 |
+
|
72 |
+
logger = logging.getLogger(__name__)
|
73 |
+
|
74 |
+
|
75 |
+
def _load_coverage(F, header_length=6, dtype=np.int16):
|
76 |
+
"""Load a coverage file from an open file object.
|
77 |
+
|
78 |
+
This will return a numpy array of the given dtype
|
79 |
+
"""
|
80 |
+
header = [F.readline() for _ in range(header_length)]
|
81 |
+
make_tuple = lambda t: (t.split()[0], float(t.split()[1]))
|
82 |
+
header = dict([make_tuple(line) for line in header])
|
83 |
+
|
84 |
+
M = np.loadtxt(F, dtype=dtype)
|
85 |
+
nodata = int(header[b"NODATA_value"])
|
86 |
+
if nodata != -9999:
|
87 |
+
M[nodata] = -9999
|
88 |
+
return M
|
89 |
+
|
90 |
+
|
91 |
+
def _load_csv(F):
|
92 |
+
"""Load csv file.
|
93 |
+
|
94 |
+
Parameters
|
95 |
+
----------
|
96 |
+
F : file object
|
97 |
+
CSV file open in byte mode.
|
98 |
+
|
99 |
+
Returns
|
100 |
+
-------
|
101 |
+
rec : np.ndarray
|
102 |
+
record array representing the data
|
103 |
+
"""
|
104 |
+
names = F.readline().decode("ascii").strip().split(",")
|
105 |
+
|
106 |
+
rec = np.loadtxt(F, skiprows=0, delimiter=",", dtype="S22,f4,f4")
|
107 |
+
rec.dtype.names = names
|
108 |
+
return rec
|
109 |
+
|
110 |
+
|
111 |
+
def construct_grids(batch):
|
112 |
+
"""Construct the map grid from the batch object
|
113 |
+
|
114 |
+
Parameters
|
115 |
+
----------
|
116 |
+
batch : Batch object
|
117 |
+
The object returned by :func:`fetch_species_distributions`
|
118 |
+
|
119 |
+
Returns
|
120 |
+
-------
|
121 |
+
(xgrid, ygrid) : 1-D arrays
|
122 |
+
The grid corresponding to the values in batch.coverages
|
123 |
+
"""
|
124 |
+
# x,y coordinates for corner cells
|
125 |
+
xmin = batch.x_left_lower_corner + batch.grid_size
|
126 |
+
xmax = xmin + (batch.Nx * batch.grid_size)
|
127 |
+
ymin = batch.y_left_lower_corner + batch.grid_size
|
128 |
+
ymax = ymin + (batch.Ny * batch.grid_size)
|
129 |
+
|
130 |
+
# x coordinates of the grid cells
|
131 |
+
xgrid = np.arange(xmin, xmax, batch.grid_size)
|
132 |
+
# y coordinates of the grid cells
|
133 |
+
ygrid = np.arange(ymin, ymax, batch.grid_size)
|
134 |
+
|
135 |
+
return (xgrid, ygrid)
|
136 |
+
|
137 |
+
|
138 |
+
@validate_params(
|
139 |
+
{"data_home": [str, PathLike, None], "download_if_missing": ["boolean"]},
|
140 |
+
prefer_skip_nested_validation=True,
|
141 |
+
)
|
142 |
+
def fetch_species_distributions(*, data_home=None, download_if_missing=True):
|
143 |
+
"""Loader for species distribution dataset from Phillips et. al. (2006).
|
144 |
+
|
145 |
+
Read more in the :ref:`User Guide <species_distribution_dataset>`.
|
146 |
+
|
147 |
+
Parameters
|
148 |
+
----------
|
149 |
+
data_home : str or path-like, default=None
|
150 |
+
Specify another download and cache folder for the datasets. By default
|
151 |
+
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
|
152 |
+
|
153 |
+
download_if_missing : bool, default=True
|
154 |
+
If False, raise an OSError if the data is not locally available
|
155 |
+
instead of trying to download the data from the source site.
|
156 |
+
|
157 |
+
Returns
|
158 |
+
-------
|
159 |
+
data : :class:`~sklearn.utils.Bunch`
|
160 |
+
Dictionary-like object, with the following attributes.
|
161 |
+
|
162 |
+
coverages : array, shape = [14, 1592, 1212]
|
163 |
+
These represent the 14 features measured
|
164 |
+
at each point of the map grid.
|
165 |
+
The latitude/longitude values for the grid are discussed below.
|
166 |
+
Missing data is represented by the value -9999.
|
167 |
+
train : record array, shape = (1624,)
|
168 |
+
The training points for the data. Each point has three fields:
|
169 |
+
|
170 |
+
- train['species'] is the species name
|
171 |
+
- train['dd long'] is the longitude, in degrees
|
172 |
+
- train['dd lat'] is the latitude, in degrees
|
173 |
+
test : record array, shape = (620,)
|
174 |
+
The test points for the data. Same format as the training data.
|
175 |
+
Nx, Ny : integers
|
176 |
+
The number of longitudes (x) and latitudes (y) in the grid
|
177 |
+
x_left_lower_corner, y_left_lower_corner : floats
|
178 |
+
The (x,y) position of the lower-left corner, in degrees
|
179 |
+
grid_size : float
|
180 |
+
The spacing between points of the grid, in degrees
|
181 |
+
|
182 |
+
Notes
|
183 |
+
-----
|
184 |
+
|
185 |
+
This dataset represents the geographic distribution of species.
|
186 |
+
The dataset is provided by Phillips et. al. (2006).
|
187 |
+
|
188 |
+
The two species are:
|
189 |
+
|
190 |
+
- `"Bradypus variegatus"
|
191 |
+
<http://www.iucnredlist.org/details/3038/0>`_ ,
|
192 |
+
the Brown-throated Sloth.
|
193 |
+
|
194 |
+
- `"Microryzomys minutus"
|
195 |
+
<http://www.iucnredlist.org/details/13408/0>`_ ,
|
196 |
+
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
|
197 |
+
Colombia, Ecuador, Peru, and Venezuela.
|
198 |
+
|
199 |
+
- For an example of using this dataset with scikit-learn, see
|
200 |
+
:ref:`examples/applications/plot_species_distribution_modeling.py
|
201 |
+
<sphx_glr_auto_examples_applications_plot_species_distribution_modeling.py>`.
|
202 |
+
|
203 |
+
References
|
204 |
+
----------
|
205 |
+
|
206 |
+
* `"Maximum entropy modeling of species geographic distributions"
|
207 |
+
<http://rob.schapire.net/papers/ecolmod.pdf>`_
|
208 |
+
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
|
209 |
+
190:231-259, 2006.
|
210 |
+
|
211 |
+
Examples
|
212 |
+
--------
|
213 |
+
>>> from sklearn.datasets import fetch_species_distributions
|
214 |
+
>>> species = fetch_species_distributions()
|
215 |
+
>>> species.train[:5]
|
216 |
+
array([(b'microryzomys_minutus', -64.7 , -17.85 ),
|
217 |
+
(b'microryzomys_minutus', -67.8333, -16.3333),
|
218 |
+
(b'microryzomys_minutus', -67.8833, -16.3 ),
|
219 |
+
(b'microryzomys_minutus', -67.8 , -16.2667),
|
220 |
+
(b'microryzomys_minutus', -67.9833, -15.9 )],
|
221 |
+
dtype=[('species', 'S22'), ('dd long', '<f4'), ('dd lat', '<f4')])
|
222 |
+
"""
|
223 |
+
data_home = get_data_home(data_home)
|
224 |
+
if not exists(data_home):
|
225 |
+
makedirs(data_home)
|
226 |
+
|
227 |
+
# Define parameters for the data files. These should not be changed
|
228 |
+
# unless the data model changes. They will be saved in the npz file
|
229 |
+
# with the downloaded data.
|
230 |
+
extra_params = dict(
|
231 |
+
x_left_lower_corner=-94.8,
|
232 |
+
Nx=1212,
|
233 |
+
y_left_lower_corner=-56.05,
|
234 |
+
Ny=1592,
|
235 |
+
grid_size=0.05,
|
236 |
+
)
|
237 |
+
dtype = np.int16
|
238 |
+
|
239 |
+
archive_path = _pkl_filepath(data_home, DATA_ARCHIVE_NAME)
|
240 |
+
|
241 |
+
if not exists(archive_path):
|
242 |
+
if not download_if_missing:
|
243 |
+
raise OSError("Data not found and `download_if_missing` is False")
|
244 |
+
logger.info("Downloading species data from %s to %s" % (SAMPLES.url, data_home))
|
245 |
+
samples_path = _fetch_remote(SAMPLES, dirname=data_home)
|
246 |
+
with np.load(samples_path) as X: # samples.zip is a valid npz
|
247 |
+
for f in X.files:
|
248 |
+
fhandle = BytesIO(X[f])
|
249 |
+
if "train" in f:
|
250 |
+
train = _load_csv(fhandle)
|
251 |
+
if "test" in f:
|
252 |
+
test = _load_csv(fhandle)
|
253 |
+
remove(samples_path)
|
254 |
+
|
255 |
+
logger.info(
|
256 |
+
"Downloading coverage data from %s to %s" % (COVERAGES.url, data_home)
|
257 |
+
)
|
258 |
+
coverages_path = _fetch_remote(COVERAGES, dirname=data_home)
|
259 |
+
with np.load(coverages_path) as X: # coverages.zip is a valid npz
|
260 |
+
coverages = []
|
261 |
+
for f in X.files:
|
262 |
+
fhandle = BytesIO(X[f])
|
263 |
+
logger.debug(" - converting {}".format(f))
|
264 |
+
coverages.append(_load_coverage(fhandle))
|
265 |
+
coverages = np.asarray(coverages, dtype=dtype)
|
266 |
+
remove(coverages_path)
|
267 |
+
|
268 |
+
bunch = Bunch(coverages=coverages, test=test, train=train, **extra_params)
|
269 |
+
joblib.dump(bunch, archive_path, compress=9)
|
270 |
+
else:
|
271 |
+
bunch = joblib.load(archive_path)
|
272 |
+
|
273 |
+
return bunch
|
venv/lib/python3.10/site-packages/sklearn/datasets/_svmlight_format_fast.cpython-310-x86_64-linux-gnu.so
ADDED
Binary file (590 kB). View file
|
|
venv/lib/python3.10/site-packages/sklearn/datasets/_svmlight_format_io.py
ADDED
@@ -0,0 +1,584 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""This module implements a loader and dumper for the svmlight format
|
2 |
+
|
3 |
+
This format is a text-based format, with one sample per line. It does
|
4 |
+
not store zero valued features hence is suitable for sparse dataset.
|
5 |
+
|
6 |
+
The first element of each line can be used to store a target variable to
|
7 |
+
predict.
|
8 |
+
|
9 |
+
This format is used as the default format for both svmlight and the
|
10 |
+
libsvm command line programs.
|
11 |
+
"""
|
12 |
+
|
13 |
+
# Authors: Mathieu Blondel <[email protected]>
|
14 |
+
# Lars Buitinck
|
15 |
+
# Olivier Grisel <[email protected]>
|
16 |
+
# License: BSD 3 clause
|
17 |
+
|
18 |
+
import os.path
|
19 |
+
from contextlib import closing
|
20 |
+
from numbers import Integral
|
21 |
+
|
22 |
+
import numpy as np
|
23 |
+
import scipy.sparse as sp
|
24 |
+
|
25 |
+
from .. import __version__
|
26 |
+
from ..utils import IS_PYPY, check_array
|
27 |
+
from ..utils._param_validation import HasMethods, Interval, StrOptions, validate_params
|
28 |
+
|
29 |
+
if not IS_PYPY:
|
30 |
+
from ._svmlight_format_fast import (
|
31 |
+
_dump_svmlight_file,
|
32 |
+
_load_svmlight_file,
|
33 |
+
)
|
34 |
+
else:
|
35 |
+
|
36 |
+
def _load_svmlight_file(*args, **kwargs):
|
37 |
+
raise NotImplementedError(
|
38 |
+
"load_svmlight_file is currently not "
|
39 |
+
"compatible with PyPy (see "
|
40 |
+
"https://github.com/scikit-learn/scikit-learn/issues/11543 "
|
41 |
+
"for the status updates)."
|
42 |
+
)
|
43 |
+
|
44 |
+
|
45 |
+
@validate_params(
|
46 |
+
{
|
47 |
+
"f": [
|
48 |
+
str,
|
49 |
+
Interval(Integral, 0, None, closed="left"),
|
50 |
+
os.PathLike,
|
51 |
+
HasMethods("read"),
|
52 |
+
],
|
53 |
+
"n_features": [Interval(Integral, 1, None, closed="left"), None],
|
54 |
+
"dtype": "no_validation", # delegate validation to numpy
|
55 |
+
"multilabel": ["boolean"],
|
56 |
+
"zero_based": ["boolean", StrOptions({"auto"})],
|
57 |
+
"query_id": ["boolean"],
|
58 |
+
"offset": [Interval(Integral, 0, None, closed="left")],
|
59 |
+
"length": [Integral],
|
60 |
+
},
|
61 |
+
prefer_skip_nested_validation=True,
|
62 |
+
)
|
63 |
+
def load_svmlight_file(
|
64 |
+
f,
|
65 |
+
*,
|
66 |
+
n_features=None,
|
67 |
+
dtype=np.float64,
|
68 |
+
multilabel=False,
|
69 |
+
zero_based="auto",
|
70 |
+
query_id=False,
|
71 |
+
offset=0,
|
72 |
+
length=-1,
|
73 |
+
):
|
74 |
+
"""Load datasets in the svmlight / libsvm format into sparse CSR matrix.
|
75 |
+
|
76 |
+
This format is a text-based format, with one sample per line. It does
|
77 |
+
not store zero valued features hence is suitable for sparse dataset.
|
78 |
+
|
79 |
+
The first element of each line can be used to store a target variable
|
80 |
+
to predict.
|
81 |
+
|
82 |
+
This format is used as the default format for both svmlight and the
|
83 |
+
libsvm command line programs.
|
84 |
+
|
85 |
+
Parsing a text based source can be expensive. When repeatedly
|
86 |
+
working on the same dataset, it is recommended to wrap this
|
87 |
+
loader with joblib.Memory.cache to store a memmapped backup of the
|
88 |
+
CSR results of the first call and benefit from the near instantaneous
|
89 |
+
loading of memmapped structures for the subsequent calls.
|
90 |
+
|
91 |
+
In case the file contains a pairwise preference constraint (known
|
92 |
+
as "qid" in the svmlight format) these are ignored unless the
|
93 |
+
query_id parameter is set to True. These pairwise preference
|
94 |
+
constraints can be used to constraint the combination of samples
|
95 |
+
when using pairwise loss functions (as is the case in some
|
96 |
+
learning to rank problems) so that only pairs with the same
|
97 |
+
query_id value are considered.
|
98 |
+
|
99 |
+
This implementation is written in Cython and is reasonably fast.
|
100 |
+
However, a faster API-compatible loader is also available at:
|
101 |
+
|
102 |
+
https://github.com/mblondel/svmlight-loader
|
103 |
+
|
104 |
+
Parameters
|
105 |
+
----------
|
106 |
+
f : str, path-like, file-like or int
|
107 |
+
(Path to) a file to load. If a path ends in ".gz" or ".bz2", it will
|
108 |
+
be uncompressed on the fly. If an integer is passed, it is assumed to
|
109 |
+
be a file descriptor. A file-like or file descriptor will not be closed
|
110 |
+
by this function. A file-like object must be opened in binary mode.
|
111 |
+
|
112 |
+
.. versionchanged:: 1.2
|
113 |
+
Path-like objects are now accepted.
|
114 |
+
|
115 |
+
n_features : int, default=None
|
116 |
+
The number of features to use. If None, it will be inferred. This
|
117 |
+
argument is useful to load several files that are subsets of a
|
118 |
+
bigger sliced dataset: each subset might not have examples of
|
119 |
+
every feature, hence the inferred shape might vary from one
|
120 |
+
slice to another.
|
121 |
+
n_features is only required if ``offset`` or ``length`` are passed a
|
122 |
+
non-default value.
|
123 |
+
|
124 |
+
dtype : numpy data type, default=np.float64
|
125 |
+
Data type of dataset to be loaded. This will be the data type of the
|
126 |
+
output numpy arrays ``X`` and ``y``.
|
127 |
+
|
128 |
+
multilabel : bool, default=False
|
129 |
+
Samples may have several labels each (see
|
130 |
+
https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html).
|
131 |
+
|
132 |
+
zero_based : bool or "auto", default="auto"
|
133 |
+
Whether column indices in f are zero-based (True) or one-based
|
134 |
+
(False). If column indices are one-based, they are transformed to
|
135 |
+
zero-based to match Python/NumPy conventions.
|
136 |
+
If set to "auto", a heuristic check is applied to determine this from
|
137 |
+
the file contents. Both kinds of files occur "in the wild", but they
|
138 |
+
are unfortunately not self-identifying. Using "auto" or True should
|
139 |
+
always be safe when no ``offset`` or ``length`` is passed.
|
140 |
+
If ``offset`` or ``length`` are passed, the "auto" mode falls back
|
141 |
+
to ``zero_based=True`` to avoid having the heuristic check yield
|
142 |
+
inconsistent results on different segments of the file.
|
143 |
+
|
144 |
+
query_id : bool, default=False
|
145 |
+
If True, will return the query_id array for each file.
|
146 |
+
|
147 |
+
offset : int, default=0
|
148 |
+
Ignore the offset first bytes by seeking forward, then
|
149 |
+
discarding the following bytes up until the next new line
|
150 |
+
character.
|
151 |
+
|
152 |
+
length : int, default=-1
|
153 |
+
If strictly positive, stop reading any new line of data once the
|
154 |
+
position in the file has reached the (offset + length) bytes threshold.
|
155 |
+
|
156 |
+
Returns
|
157 |
+
-------
|
158 |
+
X : scipy.sparse matrix of shape (n_samples, n_features)
|
159 |
+
The data matrix.
|
160 |
+
|
161 |
+
y : ndarray of shape (n_samples,), or a list of tuples of length n_samples
|
162 |
+
The target. It is a list of tuples when ``multilabel=True``, else a
|
163 |
+
ndarray.
|
164 |
+
|
165 |
+
query_id : array of shape (n_samples,)
|
166 |
+
The query_id for each sample. Only returned when query_id is set to
|
167 |
+
True.
|
168 |
+
|
169 |
+
See Also
|
170 |
+
--------
|
171 |
+
load_svmlight_files : Similar function for loading multiple files in this
|
172 |
+
format, enforcing the same number of features/columns on all of them.
|
173 |
+
|
174 |
+
Examples
|
175 |
+
--------
|
176 |
+
To use joblib.Memory to cache the svmlight file::
|
177 |
+
|
178 |
+
from joblib import Memory
|
179 |
+
from .datasets import load_svmlight_file
|
180 |
+
mem = Memory("./mycache")
|
181 |
+
|
182 |
+
@mem.cache
|
183 |
+
def get_data():
|
184 |
+
data = load_svmlight_file("mysvmlightfile")
|
185 |
+
return data[0], data[1]
|
186 |
+
|
187 |
+
X, y = get_data()
|
188 |
+
"""
|
189 |
+
return tuple(
|
190 |
+
load_svmlight_files(
|
191 |
+
[f],
|
192 |
+
n_features=n_features,
|
193 |
+
dtype=dtype,
|
194 |
+
multilabel=multilabel,
|
195 |
+
zero_based=zero_based,
|
196 |
+
query_id=query_id,
|
197 |
+
offset=offset,
|
198 |
+
length=length,
|
199 |
+
)
|
200 |
+
)
|
201 |
+
|
202 |
+
|
203 |
+
def _gen_open(f):
|
204 |
+
if isinstance(f, int): # file descriptor
|
205 |
+
return open(f, "rb", closefd=False)
|
206 |
+
elif isinstance(f, os.PathLike):
|
207 |
+
f = os.fspath(f)
|
208 |
+
elif not isinstance(f, str):
|
209 |
+
raise TypeError("expected {str, int, path-like, file-like}, got %s" % type(f))
|
210 |
+
|
211 |
+
_, ext = os.path.splitext(f)
|
212 |
+
if ext == ".gz":
|
213 |
+
import gzip
|
214 |
+
|
215 |
+
return gzip.open(f, "rb")
|
216 |
+
elif ext == ".bz2":
|
217 |
+
from bz2 import BZ2File
|
218 |
+
|
219 |
+
return BZ2File(f, "rb")
|
220 |
+
else:
|
221 |
+
return open(f, "rb")
|
222 |
+
|
223 |
+
|
224 |
+
def _open_and_load(f, dtype, multilabel, zero_based, query_id, offset=0, length=-1):
|
225 |
+
if hasattr(f, "read"):
|
226 |
+
actual_dtype, data, ind, indptr, labels, query = _load_svmlight_file(
|
227 |
+
f, dtype, multilabel, zero_based, query_id, offset, length
|
228 |
+
)
|
229 |
+
else:
|
230 |
+
with closing(_gen_open(f)) as f:
|
231 |
+
actual_dtype, data, ind, indptr, labels, query = _load_svmlight_file(
|
232 |
+
f, dtype, multilabel, zero_based, query_id, offset, length
|
233 |
+
)
|
234 |
+
|
235 |
+
# convert from array.array, give data the right dtype
|
236 |
+
if not multilabel:
|
237 |
+
labels = np.frombuffer(labels, np.float64)
|
238 |
+
data = np.frombuffer(data, actual_dtype)
|
239 |
+
indices = np.frombuffer(ind, np.longlong)
|
240 |
+
indptr = np.frombuffer(indptr, dtype=np.longlong) # never empty
|
241 |
+
query = np.frombuffer(query, np.int64)
|
242 |
+
|
243 |
+
data = np.asarray(data, dtype=dtype) # no-op for float{32,64}
|
244 |
+
return data, indices, indptr, labels, query
|
245 |
+
|
246 |
+
|
247 |
+
@validate_params(
|
248 |
+
{
|
249 |
+
"files": [
|
250 |
+
"array-like",
|
251 |
+
str,
|
252 |
+
os.PathLike,
|
253 |
+
HasMethods("read"),
|
254 |
+
Interval(Integral, 0, None, closed="left"),
|
255 |
+
],
|
256 |
+
"n_features": [Interval(Integral, 1, None, closed="left"), None],
|
257 |
+
"dtype": "no_validation", # delegate validation to numpy
|
258 |
+
"multilabel": ["boolean"],
|
259 |
+
"zero_based": ["boolean", StrOptions({"auto"})],
|
260 |
+
"query_id": ["boolean"],
|
261 |
+
"offset": [Interval(Integral, 0, None, closed="left")],
|
262 |
+
"length": [Integral],
|
263 |
+
},
|
264 |
+
prefer_skip_nested_validation=True,
|
265 |
+
)
|
266 |
+
def load_svmlight_files(
|
267 |
+
files,
|
268 |
+
*,
|
269 |
+
n_features=None,
|
270 |
+
dtype=np.float64,
|
271 |
+
multilabel=False,
|
272 |
+
zero_based="auto",
|
273 |
+
query_id=False,
|
274 |
+
offset=0,
|
275 |
+
length=-1,
|
276 |
+
):
|
277 |
+
"""Load dataset from multiple files in SVMlight format.
|
278 |
+
|
279 |
+
This function is equivalent to mapping load_svmlight_file over a list of
|
280 |
+
files, except that the results are concatenated into a single, flat list
|
281 |
+
and the samples vectors are constrained to all have the same number of
|
282 |
+
features.
|
283 |
+
|
284 |
+
In case the file contains a pairwise preference constraint (known
|
285 |
+
as "qid" in the svmlight format) these are ignored unless the
|
286 |
+
query_id parameter is set to True. These pairwise preference
|
287 |
+
constraints can be used to constraint the combination of samples
|
288 |
+
when using pairwise loss functions (as is the case in some
|
289 |
+
learning to rank problems) so that only pairs with the same
|
290 |
+
query_id value are considered.
|
291 |
+
|
292 |
+
Parameters
|
293 |
+
----------
|
294 |
+
files : array-like, dtype=str, path-like, file-like or int
|
295 |
+
(Paths of) files to load. If a path ends in ".gz" or ".bz2", it will
|
296 |
+
be uncompressed on the fly. If an integer is passed, it is assumed to
|
297 |
+
be a file descriptor. File-likes and file descriptors will not be
|
298 |
+
closed by this function. File-like objects must be opened in binary
|
299 |
+
mode.
|
300 |
+
|
301 |
+
.. versionchanged:: 1.2
|
302 |
+
Path-like objects are now accepted.
|
303 |
+
|
304 |
+
n_features : int, default=None
|
305 |
+
The number of features to use. If None, it will be inferred from the
|
306 |
+
maximum column index occurring in any of the files.
|
307 |
+
|
308 |
+
This can be set to a higher value than the actual number of features
|
309 |
+
in any of the input files, but setting it to a lower value will cause
|
310 |
+
an exception to be raised.
|
311 |
+
|
312 |
+
dtype : numpy data type, default=np.float64
|
313 |
+
Data type of dataset to be loaded. This will be the data type of the
|
314 |
+
output numpy arrays ``X`` and ``y``.
|
315 |
+
|
316 |
+
multilabel : bool, default=False
|
317 |
+
Samples may have several labels each (see
|
318 |
+
https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html).
|
319 |
+
|
320 |
+
zero_based : bool or "auto", default="auto"
|
321 |
+
Whether column indices in f are zero-based (True) or one-based
|
322 |
+
(False). If column indices are one-based, they are transformed to
|
323 |
+
zero-based to match Python/NumPy conventions.
|
324 |
+
If set to "auto", a heuristic check is applied to determine this from
|
325 |
+
the file contents. Both kinds of files occur "in the wild", but they
|
326 |
+
are unfortunately not self-identifying. Using "auto" or True should
|
327 |
+
always be safe when no offset or length is passed.
|
328 |
+
If offset or length are passed, the "auto" mode falls back
|
329 |
+
to zero_based=True to avoid having the heuristic check yield
|
330 |
+
inconsistent results on different segments of the file.
|
331 |
+
|
332 |
+
query_id : bool, default=False
|
333 |
+
If True, will return the query_id array for each file.
|
334 |
+
|
335 |
+
offset : int, default=0
|
336 |
+
Ignore the offset first bytes by seeking forward, then
|
337 |
+
discarding the following bytes up until the next new line
|
338 |
+
character.
|
339 |
+
|
340 |
+
length : int, default=-1
|
341 |
+
If strictly positive, stop reading any new line of data once the
|
342 |
+
position in the file has reached the (offset + length) bytes threshold.
|
343 |
+
|
344 |
+
Returns
|
345 |
+
-------
|
346 |
+
[X1, y1, ..., Xn, yn] or [X1, y1, q1, ..., Xn, yn, qn]: list of arrays
|
347 |
+
Each (Xi, yi) pair is the result from load_svmlight_file(files[i]).
|
348 |
+
If query_id is set to True, this will return instead (Xi, yi, qi)
|
349 |
+
triplets.
|
350 |
+
|
351 |
+
See Also
|
352 |
+
--------
|
353 |
+
load_svmlight_file: Similar function for loading a single file in this
|
354 |
+
format.
|
355 |
+
|
356 |
+
Notes
|
357 |
+
-----
|
358 |
+
When fitting a model to a matrix X_train and evaluating it against a
|
359 |
+
matrix X_test, it is essential that X_train and X_test have the same
|
360 |
+
number of features (X_train.shape[1] == X_test.shape[1]). This may not
|
361 |
+
be the case if you load the files individually with load_svmlight_file.
|
362 |
+
"""
|
363 |
+
if (offset != 0 or length > 0) and zero_based == "auto":
|
364 |
+
# disable heuristic search to avoid getting inconsistent results on
|
365 |
+
# different segments of the file
|
366 |
+
zero_based = True
|
367 |
+
|
368 |
+
if (offset != 0 or length > 0) and n_features is None:
|
369 |
+
raise ValueError("n_features is required when offset or length is specified.")
|
370 |
+
|
371 |
+
r = [
|
372 |
+
_open_and_load(
|
373 |
+
f,
|
374 |
+
dtype,
|
375 |
+
multilabel,
|
376 |
+
bool(zero_based),
|
377 |
+
bool(query_id),
|
378 |
+
offset=offset,
|
379 |
+
length=length,
|
380 |
+
)
|
381 |
+
for f in files
|
382 |
+
]
|
383 |
+
|
384 |
+
if (
|
385 |
+
zero_based is False
|
386 |
+
or zero_based == "auto"
|
387 |
+
and all(len(tmp[1]) and np.min(tmp[1]) > 0 for tmp in r)
|
388 |
+
):
|
389 |
+
for _, indices, _, _, _ in r:
|
390 |
+
indices -= 1
|
391 |
+
|
392 |
+
n_f = max(ind[1].max() if len(ind[1]) else 0 for ind in r) + 1
|
393 |
+
|
394 |
+
if n_features is None:
|
395 |
+
n_features = n_f
|
396 |
+
elif n_features < n_f:
|
397 |
+
raise ValueError(
|
398 |
+
"n_features was set to {}, but input file contains {} features".format(
|
399 |
+
n_features, n_f
|
400 |
+
)
|
401 |
+
)
|
402 |
+
|
403 |
+
result = []
|
404 |
+
for data, indices, indptr, y, query_values in r:
|
405 |
+
shape = (indptr.shape[0] - 1, n_features)
|
406 |
+
X = sp.csr_matrix((data, indices, indptr), shape)
|
407 |
+
X.sort_indices()
|
408 |
+
result += X, y
|
409 |
+
if query_id:
|
410 |
+
result.append(query_values)
|
411 |
+
|
412 |
+
return result
|
413 |
+
|
414 |
+
|
415 |
+
def _dump_svmlight(X, y, f, multilabel, one_based, comment, query_id):
|
416 |
+
if comment:
|
417 |
+
f.write(
|
418 |
+
(
|
419 |
+
"# Generated by dump_svmlight_file from scikit-learn %s\n" % __version__
|
420 |
+
).encode()
|
421 |
+
)
|
422 |
+
f.write(
|
423 |
+
("# Column indices are %s-based\n" % ["zero", "one"][one_based]).encode()
|
424 |
+
)
|
425 |
+
|
426 |
+
f.write(b"#\n")
|
427 |
+
f.writelines(b"# %s\n" % line for line in comment.splitlines())
|
428 |
+
X_is_sp = sp.issparse(X)
|
429 |
+
y_is_sp = sp.issparse(y)
|
430 |
+
if not multilabel and not y_is_sp:
|
431 |
+
y = y[:, np.newaxis]
|
432 |
+
_dump_svmlight_file(
|
433 |
+
X,
|
434 |
+
y,
|
435 |
+
f,
|
436 |
+
multilabel,
|
437 |
+
one_based,
|
438 |
+
query_id,
|
439 |
+
X_is_sp,
|
440 |
+
y_is_sp,
|
441 |
+
)
|
442 |
+
|
443 |
+
|
444 |
+
@validate_params(
|
445 |
+
{
|
446 |
+
"X": ["array-like", "sparse matrix"],
|
447 |
+
"y": ["array-like", "sparse matrix"],
|
448 |
+
"f": [str, HasMethods(["write"])],
|
449 |
+
"zero_based": ["boolean"],
|
450 |
+
"comment": [str, bytes, None],
|
451 |
+
"query_id": ["array-like", None],
|
452 |
+
"multilabel": ["boolean"],
|
453 |
+
},
|
454 |
+
prefer_skip_nested_validation=True,
|
455 |
+
)
|
456 |
+
def dump_svmlight_file(
|
457 |
+
X,
|
458 |
+
y,
|
459 |
+
f,
|
460 |
+
*,
|
461 |
+
zero_based=True,
|
462 |
+
comment=None,
|
463 |
+
query_id=None,
|
464 |
+
multilabel=False,
|
465 |
+
):
|
466 |
+
"""Dump the dataset in svmlight / libsvm file format.
|
467 |
+
|
468 |
+
This format is a text-based format, with one sample per line. It does
|
469 |
+
not store zero valued features hence is suitable for sparse dataset.
|
470 |
+
|
471 |
+
The first element of each line can be used to store a target variable
|
472 |
+
to predict.
|
473 |
+
|
474 |
+
Parameters
|
475 |
+
----------
|
476 |
+
X : {array-like, sparse matrix} of shape (n_samples, n_features)
|
477 |
+
Training vectors, where `n_samples` is the number of samples and
|
478 |
+
`n_features` is the number of features.
|
479 |
+
|
480 |
+
y : {array-like, sparse matrix}, shape = (n_samples,) or (n_samples, n_labels)
|
481 |
+
Target values. Class labels must be an
|
482 |
+
integer or float, or array-like objects of integer or float for
|
483 |
+
multilabel classifications.
|
484 |
+
|
485 |
+
f : str or file-like in binary mode
|
486 |
+
If string, specifies the path that will contain the data.
|
487 |
+
If file-like, data will be written to f. f should be opened in binary
|
488 |
+
mode.
|
489 |
+
|
490 |
+
zero_based : bool, default=True
|
491 |
+
Whether column indices should be written zero-based (True) or one-based
|
492 |
+
(False).
|
493 |
+
|
494 |
+
comment : str or bytes, default=None
|
495 |
+
Comment to insert at the top of the file. This should be either a
|
496 |
+
Unicode string, which will be encoded as UTF-8, or an ASCII byte
|
497 |
+
string.
|
498 |
+
If a comment is given, then it will be preceded by one that identifies
|
499 |
+
the file as having been dumped by scikit-learn. Note that not all
|
500 |
+
tools grok comments in SVMlight files.
|
501 |
+
|
502 |
+
query_id : array-like of shape (n_samples,), default=None
|
503 |
+
Array containing pairwise preference constraints (qid in svmlight
|
504 |
+
format).
|
505 |
+
|
506 |
+
multilabel : bool, default=False
|
507 |
+
Samples may have several labels each (see
|
508 |
+
https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html).
|
509 |
+
|
510 |
+
.. versionadded:: 0.17
|
511 |
+
parameter `multilabel` to support multilabel datasets.
|
512 |
+
|
513 |
+
Examples
|
514 |
+
--------
|
515 |
+
>>> from sklearn.datasets import dump_svmlight_file, make_classification
|
516 |
+
>>> X, y = make_classification(random_state=0)
|
517 |
+
>>> output_file = "my_dataset.svmlight"
|
518 |
+
>>> dump_svmlight_file(X, y, output_file) # doctest: +SKIP
|
519 |
+
"""
|
520 |
+
if comment is not None:
|
521 |
+
# Convert comment string to list of lines in UTF-8.
|
522 |
+
# If a byte string is passed, then check whether it's ASCII;
|
523 |
+
# if a user wants to get fancy, they'll have to decode themselves.
|
524 |
+
if isinstance(comment, bytes):
|
525 |
+
comment.decode("ascii") # just for the exception
|
526 |
+
else:
|
527 |
+
comment = comment.encode("utf-8")
|
528 |
+
if b"\0" in comment:
|
529 |
+
raise ValueError("comment string contains NUL byte")
|
530 |
+
|
531 |
+
yval = check_array(y, accept_sparse="csr", ensure_2d=False)
|
532 |
+
if sp.issparse(yval):
|
533 |
+
if yval.shape[1] != 1 and not multilabel:
|
534 |
+
raise ValueError(
|
535 |
+
"expected y of shape (n_samples, 1), got %r" % (yval.shape,)
|
536 |
+
)
|
537 |
+
else:
|
538 |
+
if yval.ndim != 1 and not multilabel:
|
539 |
+
raise ValueError("expected y of shape (n_samples,), got %r" % (yval.shape,))
|
540 |
+
|
541 |
+
Xval = check_array(X, accept_sparse="csr")
|
542 |
+
if Xval.shape[0] != yval.shape[0]:
|
543 |
+
raise ValueError(
|
544 |
+
"X.shape[0] and y.shape[0] should be the same, got %r and %r instead."
|
545 |
+
% (Xval.shape[0], yval.shape[0])
|
546 |
+
)
|
547 |
+
|
548 |
+
# We had some issues with CSR matrices with unsorted indices (e.g. #1501),
|
549 |
+
# so sort them here, but first make sure we don't modify the user's X.
|
550 |
+
# TODO We can do this cheaper; sorted_indices copies the whole matrix.
|
551 |
+
if yval is y and hasattr(yval, "sorted_indices"):
|
552 |
+
y = yval.sorted_indices()
|
553 |
+
else:
|
554 |
+
y = yval
|
555 |
+
if hasattr(y, "sort_indices"):
|
556 |
+
y.sort_indices()
|
557 |
+
|
558 |
+
if Xval is X and hasattr(Xval, "sorted_indices"):
|
559 |
+
X = Xval.sorted_indices()
|
560 |
+
else:
|
561 |
+
X = Xval
|
562 |
+
if hasattr(X, "sort_indices"):
|
563 |
+
X.sort_indices()
|
564 |
+
|
565 |
+
if query_id is None:
|
566 |
+
# NOTE: query_id is passed to Cython functions using a fused type on query_id.
|
567 |
+
# Yet as of Cython>=3.0, memory views can't be None otherwise the runtime
|
568 |
+
# would not known which concrete implementation to dispatch the Python call to.
|
569 |
+
# TODO: simplify interfaces and implementations in _svmlight_format_fast.pyx.
|
570 |
+
query_id = np.array([], dtype=np.int32)
|
571 |
+
else:
|
572 |
+
query_id = np.asarray(query_id)
|
573 |
+
if query_id.shape[0] != y.shape[0]:
|
574 |
+
raise ValueError(
|
575 |
+
"expected query_id of shape (n_samples,), got %r" % (query_id.shape,)
|
576 |
+
)
|
577 |
+
|
578 |
+
one_based = not zero_based
|
579 |
+
|
580 |
+
if hasattr(f, "write"):
|
581 |
+
_dump_svmlight(X, y, f, multilabel, one_based, comment, query_id)
|
582 |
+
else:
|
583 |
+
with open(f, "wb") as f:
|
584 |
+
_dump_svmlight(X, y, f, multilabel, one_based, comment, query_id)
|
venv/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (1.31 kB). View file
|
|
venv/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_dict_learning.cpython-310.pyc
ADDED
Binary file (62.1 kB). View file
|
|
venv/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_factor_analysis.cpython-310.pyc
ADDED
Binary file (13.5 kB). View file
|
|
venv/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_incremental_pca.cpython-310.pyc
ADDED
Binary file (13.6 kB). View file
|
|
venv/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_kernel_pca.cpython-310.pyc
ADDED
Binary file (18.4 kB). View file
|
|
venv/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_lda.cpython-310.pyc
ADDED
Binary file (25.9 kB). View file
|
|
venv/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_pca.cpython-310.pyc
ADDED
Binary file (21.2 kB). View file
|
|
venv/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_sparse_pca.cpython-310.pyc
ADDED
Binary file (16.6 kB). View file
|
|
venv/lib/python3.10/site-packages/sklearn/decomposition/tests/test_dict_learning.py
ADDED
@@ -0,0 +1,983 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import itertools
|
2 |
+
import warnings
|
3 |
+
from functools import partial
|
4 |
+
|
5 |
+
import numpy as np
|
6 |
+
import pytest
|
7 |
+
|
8 |
+
import sklearn
|
9 |
+
from sklearn.base import clone
|
10 |
+
from sklearn.decomposition import (
|
11 |
+
DictionaryLearning,
|
12 |
+
MiniBatchDictionaryLearning,
|
13 |
+
SparseCoder,
|
14 |
+
dict_learning,
|
15 |
+
dict_learning_online,
|
16 |
+
sparse_encode,
|
17 |
+
)
|
18 |
+
from sklearn.decomposition._dict_learning import _update_dict
|
19 |
+
from sklearn.exceptions import ConvergenceWarning
|
20 |
+
from sklearn.utils import check_array
|
21 |
+
from sklearn.utils._testing import (
|
22 |
+
TempMemmap,
|
23 |
+
assert_allclose,
|
24 |
+
assert_array_almost_equal,
|
25 |
+
assert_array_equal,
|
26 |
+
ignore_warnings,
|
27 |
+
)
|
28 |
+
from sklearn.utils.estimator_checks import (
|
29 |
+
check_transformer_data_not_an_array,
|
30 |
+
check_transformer_general,
|
31 |
+
check_transformers_unfitted,
|
32 |
+
)
|
33 |
+
from sklearn.utils.parallel import Parallel
|
34 |
+
|
35 |
+
rng_global = np.random.RandomState(0)
|
36 |
+
n_samples, n_features = 10, 8
|
37 |
+
X = rng_global.randn(n_samples, n_features)
|
38 |
+
|
39 |
+
|
40 |
+
def test_sparse_encode_shapes_omp():
|
41 |
+
rng = np.random.RandomState(0)
|
42 |
+
algorithms = ["omp", "lasso_lars", "lasso_cd", "lars", "threshold"]
|
43 |
+
for n_components, n_samples in itertools.product([1, 5], [1, 9]):
|
44 |
+
X_ = rng.randn(n_samples, n_features)
|
45 |
+
dictionary = rng.randn(n_components, n_features)
|
46 |
+
for algorithm, n_jobs in itertools.product(algorithms, [1, 2]):
|
47 |
+
code = sparse_encode(X_, dictionary, algorithm=algorithm, n_jobs=n_jobs)
|
48 |
+
assert code.shape == (n_samples, n_components)
|
49 |
+
|
50 |
+
|
51 |
+
def test_dict_learning_shapes():
|
52 |
+
n_components = 5
|
53 |
+
dico = DictionaryLearning(n_components, random_state=0).fit(X)
|
54 |
+
assert dico.components_.shape == (n_components, n_features)
|
55 |
+
|
56 |
+
n_components = 1
|
57 |
+
dico = DictionaryLearning(n_components, random_state=0).fit(X)
|
58 |
+
assert dico.components_.shape == (n_components, n_features)
|
59 |
+
assert dico.transform(X).shape == (X.shape[0], n_components)
|
60 |
+
|
61 |
+
|
62 |
+
def test_dict_learning_overcomplete():
|
63 |
+
n_components = 12
|
64 |
+
dico = DictionaryLearning(n_components, random_state=0).fit(X)
|
65 |
+
assert dico.components_.shape == (n_components, n_features)
|
66 |
+
|
67 |
+
|
68 |
+
def test_max_iter():
|
69 |
+
def ricker_function(resolution, center, width):
|
70 |
+
"""Discrete sub-sampled Ricker (Mexican hat) wavelet"""
|
71 |
+
x = np.linspace(0, resolution - 1, resolution)
|
72 |
+
x = (
|
73 |
+
(2 / (np.sqrt(3 * width) * np.pi**0.25))
|
74 |
+
* (1 - (x - center) ** 2 / width**2)
|
75 |
+
* np.exp(-((x - center) ** 2) / (2 * width**2))
|
76 |
+
)
|
77 |
+
return x
|
78 |
+
|
79 |
+
def ricker_matrix(width, resolution, n_components):
|
80 |
+
"""Dictionary of Ricker (Mexican hat) wavelets"""
|
81 |
+
centers = np.linspace(0, resolution - 1, n_components)
|
82 |
+
D = np.empty((n_components, resolution))
|
83 |
+
for i, center in enumerate(centers):
|
84 |
+
D[i] = ricker_function(resolution, center, width)
|
85 |
+
D /= np.sqrt(np.sum(D**2, axis=1))[:, np.newaxis]
|
86 |
+
return D
|
87 |
+
|
88 |
+
transform_algorithm = "lasso_cd"
|
89 |
+
resolution = 1024
|
90 |
+
subsampling = 3 # subsampling factor
|
91 |
+
n_components = resolution // subsampling
|
92 |
+
|
93 |
+
# Compute a wavelet dictionary
|
94 |
+
D_multi = np.r_[
|
95 |
+
tuple(
|
96 |
+
ricker_matrix(
|
97 |
+
width=w, resolution=resolution, n_components=n_components // 5
|
98 |
+
)
|
99 |
+
for w in (10, 50, 100, 500, 1000)
|
100 |
+
)
|
101 |
+
]
|
102 |
+
|
103 |
+
X = np.linspace(0, resolution - 1, resolution)
|
104 |
+
first_quarter = X < resolution / 4
|
105 |
+
X[first_quarter] = 3.0
|
106 |
+
X[np.logical_not(first_quarter)] = -1.0
|
107 |
+
X = X.reshape(1, -1)
|
108 |
+
|
109 |
+
# check that the underlying model fails to converge
|
110 |
+
with pytest.warns(ConvergenceWarning):
|
111 |
+
model = SparseCoder(
|
112 |
+
D_multi, transform_algorithm=transform_algorithm, transform_max_iter=1
|
113 |
+
)
|
114 |
+
model.fit_transform(X)
|
115 |
+
|
116 |
+
# check that the underlying model converges w/o warnings
|
117 |
+
with warnings.catch_warnings():
|
118 |
+
warnings.simplefilter("error", ConvergenceWarning)
|
119 |
+
model = SparseCoder(
|
120 |
+
D_multi, transform_algorithm=transform_algorithm, transform_max_iter=2000
|
121 |
+
)
|
122 |
+
model.fit_transform(X)
|
123 |
+
|
124 |
+
|
125 |
+
def test_dict_learning_lars_positive_parameter():
|
126 |
+
n_components = 5
|
127 |
+
alpha = 1
|
128 |
+
err_msg = "Positive constraint not supported for 'lars' coding method."
|
129 |
+
with pytest.raises(ValueError, match=err_msg):
|
130 |
+
dict_learning(X, n_components, alpha=alpha, positive_code=True)
|
131 |
+
|
132 |
+
|
133 |
+
@pytest.mark.parametrize(
|
134 |
+
"transform_algorithm",
|
135 |
+
[
|
136 |
+
"lasso_lars",
|
137 |
+
"lasso_cd",
|
138 |
+
"threshold",
|
139 |
+
],
|
140 |
+
)
|
141 |
+
@pytest.mark.parametrize("positive_code", [False, True])
|
142 |
+
@pytest.mark.parametrize("positive_dict", [False, True])
|
143 |
+
def test_dict_learning_positivity(transform_algorithm, positive_code, positive_dict):
|
144 |
+
n_components = 5
|
145 |
+
dico = DictionaryLearning(
|
146 |
+
n_components,
|
147 |
+
transform_algorithm=transform_algorithm,
|
148 |
+
random_state=0,
|
149 |
+
positive_code=positive_code,
|
150 |
+
positive_dict=positive_dict,
|
151 |
+
fit_algorithm="cd",
|
152 |
+
).fit(X)
|
153 |
+
|
154 |
+
code = dico.transform(X)
|
155 |
+
if positive_dict:
|
156 |
+
assert (dico.components_ >= 0).all()
|
157 |
+
else:
|
158 |
+
assert (dico.components_ < 0).any()
|
159 |
+
if positive_code:
|
160 |
+
assert (code >= 0).all()
|
161 |
+
else:
|
162 |
+
assert (code < 0).any()
|
163 |
+
|
164 |
+
|
165 |
+
@pytest.mark.parametrize("positive_dict", [False, True])
|
166 |
+
def test_dict_learning_lars_dict_positivity(positive_dict):
|
167 |
+
n_components = 5
|
168 |
+
dico = DictionaryLearning(
|
169 |
+
n_components,
|
170 |
+
transform_algorithm="lars",
|
171 |
+
random_state=0,
|
172 |
+
positive_dict=positive_dict,
|
173 |
+
fit_algorithm="cd",
|
174 |
+
).fit(X)
|
175 |
+
|
176 |
+
if positive_dict:
|
177 |
+
assert (dico.components_ >= 0).all()
|
178 |
+
else:
|
179 |
+
assert (dico.components_ < 0).any()
|
180 |
+
|
181 |
+
|
182 |
+
def test_dict_learning_lars_code_positivity():
|
183 |
+
n_components = 5
|
184 |
+
dico = DictionaryLearning(
|
185 |
+
n_components,
|
186 |
+
transform_algorithm="lars",
|
187 |
+
random_state=0,
|
188 |
+
positive_code=True,
|
189 |
+
fit_algorithm="cd",
|
190 |
+
).fit(X)
|
191 |
+
|
192 |
+
err_msg = "Positive constraint not supported for '{}' coding method."
|
193 |
+
err_msg = err_msg.format("lars")
|
194 |
+
with pytest.raises(ValueError, match=err_msg):
|
195 |
+
dico.transform(X)
|
196 |
+
|
197 |
+
|
198 |
+
def test_dict_learning_reconstruction():
|
199 |
+
n_components = 12
|
200 |
+
dico = DictionaryLearning(
|
201 |
+
n_components, transform_algorithm="omp", transform_alpha=0.001, random_state=0
|
202 |
+
)
|
203 |
+
code = dico.fit(X).transform(X)
|
204 |
+
assert_array_almost_equal(np.dot(code, dico.components_), X)
|
205 |
+
|
206 |
+
dico.set_params(transform_algorithm="lasso_lars")
|
207 |
+
code = dico.transform(X)
|
208 |
+
assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2)
|
209 |
+
|
210 |
+
# used to test lars here too, but there's no guarantee the number of
|
211 |
+
# nonzero atoms is right.
|
212 |
+
|
213 |
+
|
214 |
+
def test_dict_learning_reconstruction_parallel():
|
215 |
+
# regression test that parallel reconstruction works with n_jobs>1
|
216 |
+
n_components = 12
|
217 |
+
dico = DictionaryLearning(
|
218 |
+
n_components,
|
219 |
+
transform_algorithm="omp",
|
220 |
+
transform_alpha=0.001,
|
221 |
+
random_state=0,
|
222 |
+
n_jobs=4,
|
223 |
+
)
|
224 |
+
code = dico.fit(X).transform(X)
|
225 |
+
assert_array_almost_equal(np.dot(code, dico.components_), X)
|
226 |
+
|
227 |
+
dico.set_params(transform_algorithm="lasso_lars")
|
228 |
+
code = dico.transform(X)
|
229 |
+
assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2)
|
230 |
+
|
231 |
+
|
232 |
+
def test_dict_learning_lassocd_readonly_data():
|
233 |
+
n_components = 12
|
234 |
+
with TempMemmap(X) as X_read_only:
|
235 |
+
dico = DictionaryLearning(
|
236 |
+
n_components,
|
237 |
+
transform_algorithm="lasso_cd",
|
238 |
+
transform_alpha=0.001,
|
239 |
+
random_state=0,
|
240 |
+
n_jobs=4,
|
241 |
+
)
|
242 |
+
with ignore_warnings(category=ConvergenceWarning):
|
243 |
+
code = dico.fit(X_read_only).transform(X_read_only)
|
244 |
+
assert_array_almost_equal(
|
245 |
+
np.dot(code, dico.components_), X_read_only, decimal=2
|
246 |
+
)
|
247 |
+
|
248 |
+
|
249 |
+
def test_dict_learning_nonzero_coefs():
|
250 |
+
n_components = 4
|
251 |
+
dico = DictionaryLearning(
|
252 |
+
n_components,
|
253 |
+
transform_algorithm="lars",
|
254 |
+
transform_n_nonzero_coefs=3,
|
255 |
+
random_state=0,
|
256 |
+
)
|
257 |
+
code = dico.fit(X).transform(X[np.newaxis, 1])
|
258 |
+
assert len(np.flatnonzero(code)) == 3
|
259 |
+
|
260 |
+
dico.set_params(transform_algorithm="omp")
|
261 |
+
code = dico.transform(X[np.newaxis, 1])
|
262 |
+
assert len(np.flatnonzero(code)) == 3
|
263 |
+
|
264 |
+
|
265 |
+
def test_dict_learning_split():
|
266 |
+
n_components = 5
|
267 |
+
dico = DictionaryLearning(
|
268 |
+
n_components, transform_algorithm="threshold", random_state=0
|
269 |
+
)
|
270 |
+
code = dico.fit(X).transform(X)
|
271 |
+
dico.split_sign = True
|
272 |
+
split_code = dico.transform(X)
|
273 |
+
|
274 |
+
assert_array_almost_equal(
|
275 |
+
split_code[:, :n_components] - split_code[:, n_components:], code
|
276 |
+
)
|
277 |
+
|
278 |
+
|
279 |
+
def test_dict_learning_online_shapes():
|
280 |
+
rng = np.random.RandomState(0)
|
281 |
+
n_components = 8
|
282 |
+
|
283 |
+
code, dictionary = dict_learning_online(
|
284 |
+
X,
|
285 |
+
n_components=n_components,
|
286 |
+
batch_size=4,
|
287 |
+
max_iter=10,
|
288 |
+
method="cd",
|
289 |
+
random_state=rng,
|
290 |
+
return_code=True,
|
291 |
+
)
|
292 |
+
assert code.shape == (n_samples, n_components)
|
293 |
+
assert dictionary.shape == (n_components, n_features)
|
294 |
+
assert np.dot(code, dictionary).shape == X.shape
|
295 |
+
|
296 |
+
dictionary = dict_learning_online(
|
297 |
+
X,
|
298 |
+
n_components=n_components,
|
299 |
+
batch_size=4,
|
300 |
+
max_iter=10,
|
301 |
+
method="cd",
|
302 |
+
random_state=rng,
|
303 |
+
return_code=False,
|
304 |
+
)
|
305 |
+
assert dictionary.shape == (n_components, n_features)
|
306 |
+
|
307 |
+
|
308 |
+
def test_dict_learning_online_lars_positive_parameter():
|
309 |
+
err_msg = "Positive constraint not supported for 'lars' coding method."
|
310 |
+
with pytest.raises(ValueError, match=err_msg):
|
311 |
+
dict_learning_online(X, batch_size=4, max_iter=10, positive_code=True)
|
312 |
+
|
313 |
+
|
314 |
+
@pytest.mark.parametrize(
|
315 |
+
"transform_algorithm",
|
316 |
+
[
|
317 |
+
"lasso_lars",
|
318 |
+
"lasso_cd",
|
319 |
+
"threshold",
|
320 |
+
],
|
321 |
+
)
|
322 |
+
@pytest.mark.parametrize("positive_code", [False, True])
|
323 |
+
@pytest.mark.parametrize("positive_dict", [False, True])
|
324 |
+
def test_minibatch_dictionary_learning_positivity(
|
325 |
+
transform_algorithm, positive_code, positive_dict
|
326 |
+
):
|
327 |
+
n_components = 8
|
328 |
+
dico = MiniBatchDictionaryLearning(
|
329 |
+
n_components,
|
330 |
+
batch_size=4,
|
331 |
+
max_iter=10,
|
332 |
+
transform_algorithm=transform_algorithm,
|
333 |
+
random_state=0,
|
334 |
+
positive_code=positive_code,
|
335 |
+
positive_dict=positive_dict,
|
336 |
+
fit_algorithm="cd",
|
337 |
+
).fit(X)
|
338 |
+
|
339 |
+
code = dico.transform(X)
|
340 |
+
if positive_dict:
|
341 |
+
assert (dico.components_ >= 0).all()
|
342 |
+
else:
|
343 |
+
assert (dico.components_ < 0).any()
|
344 |
+
if positive_code:
|
345 |
+
assert (code >= 0).all()
|
346 |
+
else:
|
347 |
+
assert (code < 0).any()
|
348 |
+
|
349 |
+
|
350 |
+
@pytest.mark.parametrize("positive_dict", [False, True])
|
351 |
+
def test_minibatch_dictionary_learning_lars(positive_dict):
|
352 |
+
n_components = 8
|
353 |
+
|
354 |
+
dico = MiniBatchDictionaryLearning(
|
355 |
+
n_components,
|
356 |
+
batch_size=4,
|
357 |
+
max_iter=10,
|
358 |
+
transform_algorithm="lars",
|
359 |
+
random_state=0,
|
360 |
+
positive_dict=positive_dict,
|
361 |
+
fit_algorithm="cd",
|
362 |
+
).fit(X)
|
363 |
+
|
364 |
+
if positive_dict:
|
365 |
+
assert (dico.components_ >= 0).all()
|
366 |
+
else:
|
367 |
+
assert (dico.components_ < 0).any()
|
368 |
+
|
369 |
+
|
370 |
+
@pytest.mark.parametrize("positive_code", [False, True])
|
371 |
+
@pytest.mark.parametrize("positive_dict", [False, True])
|
372 |
+
def test_dict_learning_online_positivity(positive_code, positive_dict):
|
373 |
+
rng = np.random.RandomState(0)
|
374 |
+
n_components = 8
|
375 |
+
|
376 |
+
code, dictionary = dict_learning_online(
|
377 |
+
X,
|
378 |
+
n_components=n_components,
|
379 |
+
batch_size=4,
|
380 |
+
method="cd",
|
381 |
+
alpha=1,
|
382 |
+
random_state=rng,
|
383 |
+
positive_dict=positive_dict,
|
384 |
+
positive_code=positive_code,
|
385 |
+
)
|
386 |
+
if positive_dict:
|
387 |
+
assert (dictionary >= 0).all()
|
388 |
+
else:
|
389 |
+
assert (dictionary < 0).any()
|
390 |
+
if positive_code:
|
391 |
+
assert (code >= 0).all()
|
392 |
+
else:
|
393 |
+
assert (code < 0).any()
|
394 |
+
|
395 |
+
|
396 |
+
def test_dict_learning_online_verbosity():
|
397 |
+
# test verbosity for better coverage
|
398 |
+
n_components = 5
|
399 |
+
import sys
|
400 |
+
from io import StringIO
|
401 |
+
|
402 |
+
old_stdout = sys.stdout
|
403 |
+
try:
|
404 |
+
sys.stdout = StringIO()
|
405 |
+
|
406 |
+
# convergence monitoring verbosity
|
407 |
+
dico = MiniBatchDictionaryLearning(
|
408 |
+
n_components, batch_size=4, max_iter=5, verbose=1, tol=0.1, random_state=0
|
409 |
+
)
|
410 |
+
dico.fit(X)
|
411 |
+
dico = MiniBatchDictionaryLearning(
|
412 |
+
n_components,
|
413 |
+
batch_size=4,
|
414 |
+
max_iter=5,
|
415 |
+
verbose=1,
|
416 |
+
max_no_improvement=2,
|
417 |
+
random_state=0,
|
418 |
+
)
|
419 |
+
dico.fit(X)
|
420 |
+
# higher verbosity level
|
421 |
+
dico = MiniBatchDictionaryLearning(
|
422 |
+
n_components, batch_size=4, max_iter=5, verbose=2, random_state=0
|
423 |
+
)
|
424 |
+
dico.fit(X)
|
425 |
+
|
426 |
+
# function API verbosity
|
427 |
+
dict_learning_online(
|
428 |
+
X,
|
429 |
+
n_components=n_components,
|
430 |
+
batch_size=4,
|
431 |
+
alpha=1,
|
432 |
+
verbose=1,
|
433 |
+
random_state=0,
|
434 |
+
)
|
435 |
+
dict_learning_online(
|
436 |
+
X,
|
437 |
+
n_components=n_components,
|
438 |
+
batch_size=4,
|
439 |
+
alpha=1,
|
440 |
+
verbose=2,
|
441 |
+
random_state=0,
|
442 |
+
)
|
443 |
+
finally:
|
444 |
+
sys.stdout = old_stdout
|
445 |
+
|
446 |
+
assert dico.components_.shape == (n_components, n_features)
|
447 |
+
|
448 |
+
|
449 |
+
def test_dict_learning_online_estimator_shapes():
|
450 |
+
n_components = 5
|
451 |
+
dico = MiniBatchDictionaryLearning(
|
452 |
+
n_components, batch_size=4, max_iter=5, random_state=0
|
453 |
+
)
|
454 |
+
dico.fit(X)
|
455 |
+
assert dico.components_.shape == (n_components, n_features)
|
456 |
+
|
457 |
+
|
458 |
+
def test_dict_learning_online_overcomplete():
|
459 |
+
n_components = 12
|
460 |
+
dico = MiniBatchDictionaryLearning(
|
461 |
+
n_components, batch_size=4, max_iter=5, random_state=0
|
462 |
+
).fit(X)
|
463 |
+
assert dico.components_.shape == (n_components, n_features)
|
464 |
+
|
465 |
+
|
466 |
+
def test_dict_learning_online_initialization():
|
467 |
+
n_components = 12
|
468 |
+
rng = np.random.RandomState(0)
|
469 |
+
V = rng.randn(n_components, n_features)
|
470 |
+
dico = MiniBatchDictionaryLearning(
|
471 |
+
n_components, batch_size=4, max_iter=0, dict_init=V, random_state=0
|
472 |
+
).fit(X)
|
473 |
+
assert_array_equal(dico.components_, V)
|
474 |
+
|
475 |
+
|
476 |
+
def test_dict_learning_online_readonly_initialization():
|
477 |
+
n_components = 12
|
478 |
+
rng = np.random.RandomState(0)
|
479 |
+
V = rng.randn(n_components, n_features)
|
480 |
+
V.setflags(write=False)
|
481 |
+
MiniBatchDictionaryLearning(
|
482 |
+
n_components,
|
483 |
+
batch_size=4,
|
484 |
+
max_iter=1,
|
485 |
+
dict_init=V,
|
486 |
+
random_state=0,
|
487 |
+
shuffle=False,
|
488 |
+
).fit(X)
|
489 |
+
|
490 |
+
|
491 |
+
def test_dict_learning_online_partial_fit():
|
492 |
+
n_components = 12
|
493 |
+
rng = np.random.RandomState(0)
|
494 |
+
V = rng.randn(n_components, n_features) # random init
|
495 |
+
V /= np.sum(V**2, axis=1)[:, np.newaxis]
|
496 |
+
dict1 = MiniBatchDictionaryLearning(
|
497 |
+
n_components,
|
498 |
+
max_iter=10,
|
499 |
+
batch_size=1,
|
500 |
+
alpha=1,
|
501 |
+
shuffle=False,
|
502 |
+
dict_init=V,
|
503 |
+
max_no_improvement=None,
|
504 |
+
tol=0.0,
|
505 |
+
random_state=0,
|
506 |
+
).fit(X)
|
507 |
+
dict2 = MiniBatchDictionaryLearning(
|
508 |
+
n_components, alpha=1, dict_init=V, random_state=0
|
509 |
+
)
|
510 |
+
for i in range(10):
|
511 |
+
for sample in X:
|
512 |
+
dict2.partial_fit(sample[np.newaxis, :])
|
513 |
+
|
514 |
+
assert not np.all(sparse_encode(X, dict1.components_, alpha=1) == 0)
|
515 |
+
assert_array_almost_equal(dict1.components_, dict2.components_, decimal=2)
|
516 |
+
|
517 |
+
# partial_fit should ignore max_iter (#17433)
|
518 |
+
assert dict1.n_steps_ == dict2.n_steps_ == 100
|
519 |
+
|
520 |
+
|
521 |
+
def test_sparse_encode_shapes():
|
522 |
+
n_components = 12
|
523 |
+
rng = np.random.RandomState(0)
|
524 |
+
V = rng.randn(n_components, n_features) # random init
|
525 |
+
V /= np.sum(V**2, axis=1)[:, np.newaxis]
|
526 |
+
for algo in ("lasso_lars", "lasso_cd", "lars", "omp", "threshold"):
|
527 |
+
code = sparse_encode(X, V, algorithm=algo)
|
528 |
+
assert code.shape == (n_samples, n_components)
|
529 |
+
|
530 |
+
|
531 |
+
@pytest.mark.parametrize("algo", ["lasso_lars", "lasso_cd", "threshold"])
|
532 |
+
@pytest.mark.parametrize("positive", [False, True])
|
533 |
+
def test_sparse_encode_positivity(algo, positive):
|
534 |
+
n_components = 12
|
535 |
+
rng = np.random.RandomState(0)
|
536 |
+
V = rng.randn(n_components, n_features) # random init
|
537 |
+
V /= np.sum(V**2, axis=1)[:, np.newaxis]
|
538 |
+
code = sparse_encode(X, V, algorithm=algo, positive=positive)
|
539 |
+
if positive:
|
540 |
+
assert (code >= 0).all()
|
541 |
+
else:
|
542 |
+
assert (code < 0).any()
|
543 |
+
|
544 |
+
|
545 |
+
@pytest.mark.parametrize("algo", ["lars", "omp"])
|
546 |
+
def test_sparse_encode_unavailable_positivity(algo):
|
547 |
+
n_components = 12
|
548 |
+
rng = np.random.RandomState(0)
|
549 |
+
V = rng.randn(n_components, n_features) # random init
|
550 |
+
V /= np.sum(V**2, axis=1)[:, np.newaxis]
|
551 |
+
err_msg = "Positive constraint not supported for '{}' coding method."
|
552 |
+
err_msg = err_msg.format(algo)
|
553 |
+
with pytest.raises(ValueError, match=err_msg):
|
554 |
+
sparse_encode(X, V, algorithm=algo, positive=True)
|
555 |
+
|
556 |
+
|
557 |
+
def test_sparse_encode_input():
|
558 |
+
n_components = 100
|
559 |
+
rng = np.random.RandomState(0)
|
560 |
+
V = rng.randn(n_components, n_features) # random init
|
561 |
+
V /= np.sum(V**2, axis=1)[:, np.newaxis]
|
562 |
+
Xf = check_array(X, order="F")
|
563 |
+
for algo in ("lasso_lars", "lasso_cd", "lars", "omp", "threshold"):
|
564 |
+
a = sparse_encode(X, V, algorithm=algo)
|
565 |
+
b = sparse_encode(Xf, V, algorithm=algo)
|
566 |
+
assert_array_almost_equal(a, b)
|
567 |
+
|
568 |
+
|
569 |
+
def test_sparse_encode_error():
|
570 |
+
n_components = 12
|
571 |
+
rng = np.random.RandomState(0)
|
572 |
+
V = rng.randn(n_components, n_features) # random init
|
573 |
+
V /= np.sum(V**2, axis=1)[:, np.newaxis]
|
574 |
+
code = sparse_encode(X, V, alpha=0.001)
|
575 |
+
assert not np.all(code == 0)
|
576 |
+
assert np.sqrt(np.sum((np.dot(code, V) - X) ** 2)) < 0.1
|
577 |
+
|
578 |
+
|
579 |
+
def test_sparse_encode_error_default_sparsity():
|
580 |
+
rng = np.random.RandomState(0)
|
581 |
+
X = rng.randn(100, 64)
|
582 |
+
D = rng.randn(2, 64)
|
583 |
+
code = ignore_warnings(sparse_encode)(X, D, algorithm="omp", n_nonzero_coefs=None)
|
584 |
+
assert code.shape == (100, 2)
|
585 |
+
|
586 |
+
|
587 |
+
def test_sparse_coder_estimator():
|
588 |
+
n_components = 12
|
589 |
+
rng = np.random.RandomState(0)
|
590 |
+
V = rng.randn(n_components, n_features) # random init
|
591 |
+
V /= np.sum(V**2, axis=1)[:, np.newaxis]
|
592 |
+
coder = SparseCoder(
|
593 |
+
dictionary=V, transform_algorithm="lasso_lars", transform_alpha=0.001
|
594 |
+
).transform(X)
|
595 |
+
assert not np.all(coder == 0)
|
596 |
+
assert np.sqrt(np.sum((np.dot(coder, V) - X) ** 2)) < 0.1
|
597 |
+
|
598 |
+
|
599 |
+
def test_sparse_coder_estimator_clone():
|
600 |
+
n_components = 12
|
601 |
+
rng = np.random.RandomState(0)
|
602 |
+
V = rng.randn(n_components, n_features) # random init
|
603 |
+
V /= np.sum(V**2, axis=1)[:, np.newaxis]
|
604 |
+
coder = SparseCoder(
|
605 |
+
dictionary=V, transform_algorithm="lasso_lars", transform_alpha=0.001
|
606 |
+
)
|
607 |
+
cloned = clone(coder)
|
608 |
+
assert id(cloned) != id(coder)
|
609 |
+
np.testing.assert_allclose(cloned.dictionary, coder.dictionary)
|
610 |
+
assert id(cloned.dictionary) != id(coder.dictionary)
|
611 |
+
assert cloned.n_components_ == coder.n_components_
|
612 |
+
assert cloned.n_features_in_ == coder.n_features_in_
|
613 |
+
data = np.random.rand(n_samples, n_features).astype(np.float32)
|
614 |
+
np.testing.assert_allclose(cloned.transform(data), coder.transform(data))
|
615 |
+
|
616 |
+
|
617 |
+
def test_sparse_coder_parallel_mmap():
|
618 |
+
# Non-regression test for:
|
619 |
+
# https://github.com/scikit-learn/scikit-learn/issues/5956
|
620 |
+
# Test that SparseCoder does not error by passing reading only
|
621 |
+
# arrays to child processes
|
622 |
+
|
623 |
+
rng = np.random.RandomState(777)
|
624 |
+
n_components, n_features = 40, 64
|
625 |
+
init_dict = rng.rand(n_components, n_features)
|
626 |
+
# Ensure that `data` is >2M. Joblib memory maps arrays
|
627 |
+
# if they are larger than 1MB. The 4 accounts for float32
|
628 |
+
# data type
|
629 |
+
n_samples = int(2e6) // (4 * n_features)
|
630 |
+
data = np.random.rand(n_samples, n_features).astype(np.float32)
|
631 |
+
|
632 |
+
sc = SparseCoder(init_dict, transform_algorithm="omp", n_jobs=2)
|
633 |
+
sc.fit_transform(data)
|
634 |
+
|
635 |
+
|
636 |
+
def test_sparse_coder_common_transformer():
|
637 |
+
rng = np.random.RandomState(777)
|
638 |
+
n_components, n_features = 40, 3
|
639 |
+
init_dict = rng.rand(n_components, n_features)
|
640 |
+
|
641 |
+
sc = SparseCoder(init_dict)
|
642 |
+
|
643 |
+
check_transformer_data_not_an_array(sc.__class__.__name__, sc)
|
644 |
+
check_transformer_general(sc.__class__.__name__, sc)
|
645 |
+
check_transformer_general_memmap = partial(
|
646 |
+
check_transformer_general, readonly_memmap=True
|
647 |
+
)
|
648 |
+
check_transformer_general_memmap(sc.__class__.__name__, sc)
|
649 |
+
check_transformers_unfitted(sc.__class__.__name__, sc)
|
650 |
+
|
651 |
+
|
652 |
+
def test_sparse_coder_n_features_in():
|
653 |
+
d = np.array([[1, 2, 3], [1, 2, 3]])
|
654 |
+
sc = SparseCoder(d)
|
655 |
+
assert sc.n_features_in_ == d.shape[1]
|
656 |
+
|
657 |
+
|
658 |
+
def test_update_dict():
|
659 |
+
# Check the dict update in batch mode vs online mode
|
660 |
+
# Non-regression test for #4866
|
661 |
+
rng = np.random.RandomState(0)
|
662 |
+
|
663 |
+
code = np.array([[0.5, -0.5], [0.1, 0.9]])
|
664 |
+
dictionary = np.array([[1.0, 0.0], [0.6, 0.8]])
|
665 |
+
|
666 |
+
X = np.dot(code, dictionary) + rng.randn(2, 2)
|
667 |
+
|
668 |
+
# full batch update
|
669 |
+
newd_batch = dictionary.copy()
|
670 |
+
_update_dict(newd_batch, X, code)
|
671 |
+
|
672 |
+
# online update
|
673 |
+
A = np.dot(code.T, code)
|
674 |
+
B = np.dot(X.T, code)
|
675 |
+
newd_online = dictionary.copy()
|
676 |
+
_update_dict(newd_online, X, code, A, B)
|
677 |
+
|
678 |
+
assert_allclose(newd_batch, newd_online)
|
679 |
+
|
680 |
+
|
681 |
+
@pytest.mark.parametrize(
|
682 |
+
"algorithm", ("lasso_lars", "lasso_cd", "lars", "threshold", "omp")
|
683 |
+
)
|
684 |
+
@pytest.mark.parametrize("data_type", (np.float32, np.float64))
|
685 |
+
# Note: do not check integer input because `lasso_lars` and `lars` fail with
|
686 |
+
# `ValueError` in `_lars_path_solver`
|
687 |
+
def test_sparse_encode_dtype_match(data_type, algorithm):
|
688 |
+
n_components = 6
|
689 |
+
rng = np.random.RandomState(0)
|
690 |
+
dictionary = rng.randn(n_components, n_features)
|
691 |
+
code = sparse_encode(
|
692 |
+
X.astype(data_type), dictionary.astype(data_type), algorithm=algorithm
|
693 |
+
)
|
694 |
+
assert code.dtype == data_type
|
695 |
+
|
696 |
+
|
697 |
+
@pytest.mark.parametrize(
|
698 |
+
"algorithm", ("lasso_lars", "lasso_cd", "lars", "threshold", "omp")
|
699 |
+
)
|
700 |
+
def test_sparse_encode_numerical_consistency(algorithm):
|
701 |
+
# verify numerical consistency among np.float32 and np.float64
|
702 |
+
rtol = 1e-4
|
703 |
+
n_components = 6
|
704 |
+
rng = np.random.RandomState(0)
|
705 |
+
dictionary = rng.randn(n_components, n_features)
|
706 |
+
code_32 = sparse_encode(
|
707 |
+
X.astype(np.float32), dictionary.astype(np.float32), algorithm=algorithm
|
708 |
+
)
|
709 |
+
code_64 = sparse_encode(
|
710 |
+
X.astype(np.float64), dictionary.astype(np.float64), algorithm=algorithm
|
711 |
+
)
|
712 |
+
assert_allclose(code_32, code_64, rtol=rtol)
|
713 |
+
|
714 |
+
|
715 |
+
@pytest.mark.parametrize(
|
716 |
+
"transform_algorithm", ("lasso_lars", "lasso_cd", "lars", "threshold", "omp")
|
717 |
+
)
|
718 |
+
@pytest.mark.parametrize("data_type", (np.float32, np.float64))
|
719 |
+
# Note: do not check integer input because `lasso_lars` and `lars` fail with
|
720 |
+
# `ValueError` in `_lars_path_solver`
|
721 |
+
def test_sparse_coder_dtype_match(data_type, transform_algorithm):
|
722 |
+
# Verify preserving dtype for transform in sparse coder
|
723 |
+
n_components = 6
|
724 |
+
rng = np.random.RandomState(0)
|
725 |
+
dictionary = rng.randn(n_components, n_features)
|
726 |
+
coder = SparseCoder(
|
727 |
+
dictionary.astype(data_type), transform_algorithm=transform_algorithm
|
728 |
+
)
|
729 |
+
code = coder.transform(X.astype(data_type))
|
730 |
+
assert code.dtype == data_type
|
731 |
+
|
732 |
+
|
733 |
+
@pytest.mark.parametrize("fit_algorithm", ("lars", "cd"))
|
734 |
+
@pytest.mark.parametrize(
|
735 |
+
"transform_algorithm", ("lasso_lars", "lasso_cd", "lars", "threshold", "omp")
|
736 |
+
)
|
737 |
+
@pytest.mark.parametrize(
|
738 |
+
"data_type, expected_type",
|
739 |
+
(
|
740 |
+
(np.float32, np.float32),
|
741 |
+
(np.float64, np.float64),
|
742 |
+
(np.int32, np.float64),
|
743 |
+
(np.int64, np.float64),
|
744 |
+
),
|
745 |
+
)
|
746 |
+
def test_dictionary_learning_dtype_match(
|
747 |
+
data_type,
|
748 |
+
expected_type,
|
749 |
+
fit_algorithm,
|
750 |
+
transform_algorithm,
|
751 |
+
):
|
752 |
+
# Verify preserving dtype for fit and transform in dictionary learning class
|
753 |
+
dict_learner = DictionaryLearning(
|
754 |
+
n_components=8,
|
755 |
+
fit_algorithm=fit_algorithm,
|
756 |
+
transform_algorithm=transform_algorithm,
|
757 |
+
random_state=0,
|
758 |
+
)
|
759 |
+
dict_learner.fit(X.astype(data_type))
|
760 |
+
assert dict_learner.components_.dtype == expected_type
|
761 |
+
assert dict_learner.transform(X.astype(data_type)).dtype == expected_type
|
762 |
+
|
763 |
+
|
764 |
+
@pytest.mark.parametrize("fit_algorithm", ("lars", "cd"))
|
765 |
+
@pytest.mark.parametrize(
|
766 |
+
"transform_algorithm", ("lasso_lars", "lasso_cd", "lars", "threshold", "omp")
|
767 |
+
)
|
768 |
+
@pytest.mark.parametrize(
|
769 |
+
"data_type, expected_type",
|
770 |
+
(
|
771 |
+
(np.float32, np.float32),
|
772 |
+
(np.float64, np.float64),
|
773 |
+
(np.int32, np.float64),
|
774 |
+
(np.int64, np.float64),
|
775 |
+
),
|
776 |
+
)
|
777 |
+
def test_minibatch_dictionary_learning_dtype_match(
|
778 |
+
data_type,
|
779 |
+
expected_type,
|
780 |
+
fit_algorithm,
|
781 |
+
transform_algorithm,
|
782 |
+
):
|
783 |
+
# Verify preserving dtype for fit and transform in minibatch dictionary learning
|
784 |
+
dict_learner = MiniBatchDictionaryLearning(
|
785 |
+
n_components=8,
|
786 |
+
batch_size=10,
|
787 |
+
fit_algorithm=fit_algorithm,
|
788 |
+
transform_algorithm=transform_algorithm,
|
789 |
+
max_iter=100,
|
790 |
+
tol=1e-1,
|
791 |
+
random_state=0,
|
792 |
+
)
|
793 |
+
dict_learner.fit(X.astype(data_type))
|
794 |
+
|
795 |
+
assert dict_learner.components_.dtype == expected_type
|
796 |
+
assert dict_learner.transform(X.astype(data_type)).dtype == expected_type
|
797 |
+
assert dict_learner._A.dtype == expected_type
|
798 |
+
assert dict_learner._B.dtype == expected_type
|
799 |
+
|
800 |
+
|
801 |
+
@pytest.mark.parametrize("method", ("lars", "cd"))
|
802 |
+
@pytest.mark.parametrize(
|
803 |
+
"data_type, expected_type",
|
804 |
+
(
|
805 |
+
(np.float32, np.float32),
|
806 |
+
(np.float64, np.float64),
|
807 |
+
(np.int32, np.float64),
|
808 |
+
(np.int64, np.float64),
|
809 |
+
),
|
810 |
+
)
|
811 |
+
def test_dict_learning_dtype_match(data_type, expected_type, method):
|
812 |
+
# Verify output matrix dtype
|
813 |
+
rng = np.random.RandomState(0)
|
814 |
+
n_components = 8
|
815 |
+
code, dictionary, _ = dict_learning(
|
816 |
+
X.astype(data_type),
|
817 |
+
n_components=n_components,
|
818 |
+
alpha=1,
|
819 |
+
random_state=rng,
|
820 |
+
method=method,
|
821 |
+
)
|
822 |
+
assert code.dtype == expected_type
|
823 |
+
assert dictionary.dtype == expected_type
|
824 |
+
|
825 |
+
|
826 |
+
@pytest.mark.parametrize("method", ("lars", "cd"))
|
827 |
+
def test_dict_learning_numerical_consistency(method):
|
828 |
+
# verify numerically consistent among np.float32 and np.float64
|
829 |
+
rtol = 1e-6
|
830 |
+
n_components = 4
|
831 |
+
alpha = 2
|
832 |
+
|
833 |
+
U_64, V_64, _ = dict_learning(
|
834 |
+
X.astype(np.float64),
|
835 |
+
n_components=n_components,
|
836 |
+
alpha=alpha,
|
837 |
+
random_state=0,
|
838 |
+
method=method,
|
839 |
+
)
|
840 |
+
U_32, V_32, _ = dict_learning(
|
841 |
+
X.astype(np.float32),
|
842 |
+
n_components=n_components,
|
843 |
+
alpha=alpha,
|
844 |
+
random_state=0,
|
845 |
+
method=method,
|
846 |
+
)
|
847 |
+
|
848 |
+
# Optimal solution (U*, V*) is not unique.
|
849 |
+
# If (U*, V*) is optimal solution, (-U*,-V*) is also optimal,
|
850 |
+
# and (column permutated U*, row permutated V*) are also optional
|
851 |
+
# as long as holding UV.
|
852 |
+
# So here UV, ||U||_1,1 and sum(||V_k||_2^2) are verified
|
853 |
+
# instead of comparing directly U and V.
|
854 |
+
assert_allclose(np.matmul(U_64, V_64), np.matmul(U_32, V_32), rtol=rtol)
|
855 |
+
assert_allclose(np.sum(np.abs(U_64)), np.sum(np.abs(U_32)), rtol=rtol)
|
856 |
+
assert_allclose(np.sum(V_64**2), np.sum(V_32**2), rtol=rtol)
|
857 |
+
# verify an obtained solution is not degenerate
|
858 |
+
assert np.mean(U_64 != 0.0) > 0.05
|
859 |
+
assert np.count_nonzero(U_64 != 0.0) == np.count_nonzero(U_32 != 0.0)
|
860 |
+
|
861 |
+
|
862 |
+
@pytest.mark.parametrize("method", ("lars", "cd"))
|
863 |
+
@pytest.mark.parametrize(
|
864 |
+
"data_type, expected_type",
|
865 |
+
(
|
866 |
+
(np.float32, np.float32),
|
867 |
+
(np.float64, np.float64),
|
868 |
+
(np.int32, np.float64),
|
869 |
+
(np.int64, np.float64),
|
870 |
+
),
|
871 |
+
)
|
872 |
+
def test_dict_learning_online_dtype_match(data_type, expected_type, method):
|
873 |
+
# Verify output matrix dtype
|
874 |
+
rng = np.random.RandomState(0)
|
875 |
+
n_components = 8
|
876 |
+
code, dictionary = dict_learning_online(
|
877 |
+
X.astype(data_type),
|
878 |
+
n_components=n_components,
|
879 |
+
alpha=1,
|
880 |
+
batch_size=10,
|
881 |
+
random_state=rng,
|
882 |
+
method=method,
|
883 |
+
)
|
884 |
+
assert code.dtype == expected_type
|
885 |
+
assert dictionary.dtype == expected_type
|
886 |
+
|
887 |
+
|
888 |
+
@pytest.mark.parametrize("method", ("lars", "cd"))
|
889 |
+
def test_dict_learning_online_numerical_consistency(method):
|
890 |
+
# verify numerically consistent among np.float32 and np.float64
|
891 |
+
rtol = 1e-4
|
892 |
+
n_components = 4
|
893 |
+
alpha = 1
|
894 |
+
|
895 |
+
U_64, V_64 = dict_learning_online(
|
896 |
+
X.astype(np.float64),
|
897 |
+
n_components=n_components,
|
898 |
+
max_iter=1_000,
|
899 |
+
alpha=alpha,
|
900 |
+
batch_size=10,
|
901 |
+
random_state=0,
|
902 |
+
method=method,
|
903 |
+
tol=0.0,
|
904 |
+
max_no_improvement=None,
|
905 |
+
)
|
906 |
+
U_32, V_32 = dict_learning_online(
|
907 |
+
X.astype(np.float32),
|
908 |
+
n_components=n_components,
|
909 |
+
max_iter=1_000,
|
910 |
+
alpha=alpha,
|
911 |
+
batch_size=10,
|
912 |
+
random_state=0,
|
913 |
+
method=method,
|
914 |
+
tol=0.0,
|
915 |
+
max_no_improvement=None,
|
916 |
+
)
|
917 |
+
|
918 |
+
# Optimal solution (U*, V*) is not unique.
|
919 |
+
# If (U*, V*) is optimal solution, (-U*,-V*) is also optimal,
|
920 |
+
# and (column permutated U*, row permutated V*) are also optional
|
921 |
+
# as long as holding UV.
|
922 |
+
# So here UV, ||U||_1,1 and sum(||V_k||_2) are verified
|
923 |
+
# instead of comparing directly U and V.
|
924 |
+
assert_allclose(np.matmul(U_64, V_64), np.matmul(U_32, V_32), rtol=rtol)
|
925 |
+
assert_allclose(np.sum(np.abs(U_64)), np.sum(np.abs(U_32)), rtol=rtol)
|
926 |
+
assert_allclose(np.sum(V_64**2), np.sum(V_32**2), rtol=rtol)
|
927 |
+
# verify an obtained solution is not degenerate
|
928 |
+
assert np.mean(U_64 != 0.0) > 0.05
|
929 |
+
assert np.count_nonzero(U_64 != 0.0) == np.count_nonzero(U_32 != 0.0)
|
930 |
+
|
931 |
+
|
932 |
+
@pytest.mark.parametrize(
|
933 |
+
"estimator",
|
934 |
+
[
|
935 |
+
SparseCoder(X.T),
|
936 |
+
DictionaryLearning(),
|
937 |
+
MiniBatchDictionaryLearning(batch_size=4, max_iter=10),
|
938 |
+
],
|
939 |
+
ids=lambda x: x.__class__.__name__,
|
940 |
+
)
|
941 |
+
def test_get_feature_names_out(estimator):
|
942 |
+
"""Check feature names for dict learning estimators."""
|
943 |
+
estimator.fit(X)
|
944 |
+
n_components = X.shape[1]
|
945 |
+
|
946 |
+
feature_names_out = estimator.get_feature_names_out()
|
947 |
+
estimator_name = estimator.__class__.__name__.lower()
|
948 |
+
assert_array_equal(
|
949 |
+
feature_names_out,
|
950 |
+
[f"{estimator_name}{i}" for i in range(n_components)],
|
951 |
+
)
|
952 |
+
|
953 |
+
|
954 |
+
def test_cd_work_on_joblib_memmapped_data(monkeypatch):
|
955 |
+
monkeypatch.setattr(
|
956 |
+
sklearn.decomposition._dict_learning,
|
957 |
+
"Parallel",
|
958 |
+
partial(Parallel, max_nbytes=100),
|
959 |
+
)
|
960 |
+
|
961 |
+
rng = np.random.RandomState(0)
|
962 |
+
X_train = rng.randn(10, 10)
|
963 |
+
|
964 |
+
dict_learner = DictionaryLearning(
|
965 |
+
n_components=5,
|
966 |
+
random_state=0,
|
967 |
+
n_jobs=2,
|
968 |
+
fit_algorithm="cd",
|
969 |
+
max_iter=50,
|
970 |
+
verbose=True,
|
971 |
+
)
|
972 |
+
|
973 |
+
# This must run and complete without error.
|
974 |
+
dict_learner.fit(X_train)
|
975 |
+
|
976 |
+
|
977 |
+
# TODO(1.6): remove in 1.6
|
978 |
+
def test_xxx():
|
979 |
+
warn_msg = "`max_iter=None` is deprecated in version 1.4 and will be removed"
|
980 |
+
with pytest.warns(FutureWarning, match=warn_msg):
|
981 |
+
MiniBatchDictionaryLearning(max_iter=None, random_state=0).fit(X)
|
982 |
+
with pytest.warns(FutureWarning, match=warn_msg):
|
983 |
+
dict_learning_online(X, max_iter=None, random_state=0)
|
venv/lib/python3.10/site-packages/sklearn/decomposition/tests/test_sparse_pca.py
ADDED
@@ -0,0 +1,367 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Author: Vlad Niculae
|
2 |
+
# License: BSD 3 clause
|
3 |
+
|
4 |
+
import sys
|
5 |
+
|
6 |
+
import numpy as np
|
7 |
+
import pytest
|
8 |
+
from numpy.testing import assert_array_equal
|
9 |
+
|
10 |
+
from sklearn.decomposition import PCA, MiniBatchSparsePCA, SparsePCA
|
11 |
+
from sklearn.utils import check_random_state
|
12 |
+
from sklearn.utils._testing import (
|
13 |
+
assert_allclose,
|
14 |
+
assert_array_almost_equal,
|
15 |
+
if_safe_multiprocessing_with_blas,
|
16 |
+
)
|
17 |
+
|
18 |
+
|
19 |
+
def generate_toy_data(n_components, n_samples, image_size, random_state=None):
|
20 |
+
n_features = image_size[0] * image_size[1]
|
21 |
+
|
22 |
+
rng = check_random_state(random_state)
|
23 |
+
U = rng.randn(n_samples, n_components)
|
24 |
+
V = rng.randn(n_components, n_features)
|
25 |
+
|
26 |
+
centers = [(3, 3), (6, 7), (8, 1)]
|
27 |
+
sz = [1, 2, 1]
|
28 |
+
for k in range(n_components):
|
29 |
+
img = np.zeros(image_size)
|
30 |
+
xmin, xmax = centers[k][0] - sz[k], centers[k][0] + sz[k]
|
31 |
+
ymin, ymax = centers[k][1] - sz[k], centers[k][1] + sz[k]
|
32 |
+
img[xmin:xmax][:, ymin:ymax] = 1.0
|
33 |
+
V[k, :] = img.ravel()
|
34 |
+
|
35 |
+
# Y is defined by : Y = UV + noise
|
36 |
+
Y = np.dot(U, V)
|
37 |
+
Y += 0.1 * rng.randn(Y.shape[0], Y.shape[1]) # Add noise
|
38 |
+
return Y, U, V
|
39 |
+
|
40 |
+
|
41 |
+
# SparsePCA can be a bit slow. To avoid having test times go up, we
|
42 |
+
# test different aspects of the code in the same test
|
43 |
+
|
44 |
+
|
45 |
+
def test_correct_shapes():
|
46 |
+
rng = np.random.RandomState(0)
|
47 |
+
X = rng.randn(12, 10)
|
48 |
+
spca = SparsePCA(n_components=8, random_state=rng)
|
49 |
+
U = spca.fit_transform(X)
|
50 |
+
assert spca.components_.shape == (8, 10)
|
51 |
+
assert U.shape == (12, 8)
|
52 |
+
# test overcomplete decomposition
|
53 |
+
spca = SparsePCA(n_components=13, random_state=rng)
|
54 |
+
U = spca.fit_transform(X)
|
55 |
+
assert spca.components_.shape == (13, 10)
|
56 |
+
assert U.shape == (12, 13)
|
57 |
+
|
58 |
+
|
59 |
+
def test_fit_transform():
|
60 |
+
alpha = 1
|
61 |
+
rng = np.random.RandomState(0)
|
62 |
+
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
|
63 |
+
spca_lars = SparsePCA(n_components=3, method="lars", alpha=alpha, random_state=0)
|
64 |
+
spca_lars.fit(Y)
|
65 |
+
|
66 |
+
# Test that CD gives similar results
|
67 |
+
spca_lasso = SparsePCA(n_components=3, method="cd", random_state=0, alpha=alpha)
|
68 |
+
spca_lasso.fit(Y)
|
69 |
+
assert_array_almost_equal(spca_lasso.components_, spca_lars.components_)
|
70 |
+
|
71 |
+
|
72 |
+
@if_safe_multiprocessing_with_blas
|
73 |
+
def test_fit_transform_parallel():
|
74 |
+
alpha = 1
|
75 |
+
rng = np.random.RandomState(0)
|
76 |
+
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
|
77 |
+
spca_lars = SparsePCA(n_components=3, method="lars", alpha=alpha, random_state=0)
|
78 |
+
spca_lars.fit(Y)
|
79 |
+
U1 = spca_lars.transform(Y)
|
80 |
+
# Test multiple CPUs
|
81 |
+
spca = SparsePCA(
|
82 |
+
n_components=3, n_jobs=2, method="lars", alpha=alpha, random_state=0
|
83 |
+
).fit(Y)
|
84 |
+
U2 = spca.transform(Y)
|
85 |
+
assert not np.all(spca_lars.components_ == 0)
|
86 |
+
assert_array_almost_equal(U1, U2)
|
87 |
+
|
88 |
+
|
89 |
+
def test_transform_nan():
|
90 |
+
# Test that SparsePCA won't return NaN when there is 0 feature in all
|
91 |
+
# samples.
|
92 |
+
rng = np.random.RandomState(0)
|
93 |
+
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
|
94 |
+
Y[:, 0] = 0
|
95 |
+
estimator = SparsePCA(n_components=8)
|
96 |
+
assert not np.any(np.isnan(estimator.fit_transform(Y)))
|
97 |
+
|
98 |
+
|
99 |
+
def test_fit_transform_tall():
|
100 |
+
rng = np.random.RandomState(0)
|
101 |
+
Y, _, _ = generate_toy_data(3, 65, (8, 8), random_state=rng) # tall array
|
102 |
+
spca_lars = SparsePCA(n_components=3, method="lars", random_state=rng)
|
103 |
+
U1 = spca_lars.fit_transform(Y)
|
104 |
+
spca_lasso = SparsePCA(n_components=3, method="cd", random_state=rng)
|
105 |
+
U2 = spca_lasso.fit(Y).transform(Y)
|
106 |
+
assert_array_almost_equal(U1, U2)
|
107 |
+
|
108 |
+
|
109 |
+
def test_initialization():
|
110 |
+
rng = np.random.RandomState(0)
|
111 |
+
U_init = rng.randn(5, 3)
|
112 |
+
V_init = rng.randn(3, 4)
|
113 |
+
model = SparsePCA(
|
114 |
+
n_components=3, U_init=U_init, V_init=V_init, max_iter=0, random_state=rng
|
115 |
+
)
|
116 |
+
model.fit(rng.randn(5, 4))
|
117 |
+
assert_allclose(model.components_, V_init / np.linalg.norm(V_init, axis=1)[:, None])
|
118 |
+
|
119 |
+
|
120 |
+
def test_mini_batch_correct_shapes():
|
121 |
+
rng = np.random.RandomState(0)
|
122 |
+
X = rng.randn(12, 10)
|
123 |
+
pca = MiniBatchSparsePCA(n_components=8, max_iter=1, random_state=rng)
|
124 |
+
U = pca.fit_transform(X)
|
125 |
+
assert pca.components_.shape == (8, 10)
|
126 |
+
assert U.shape == (12, 8)
|
127 |
+
# test overcomplete decomposition
|
128 |
+
pca = MiniBatchSparsePCA(n_components=13, max_iter=1, random_state=rng)
|
129 |
+
U = pca.fit_transform(X)
|
130 |
+
assert pca.components_.shape == (13, 10)
|
131 |
+
assert U.shape == (12, 13)
|
132 |
+
|
133 |
+
|
134 |
+
# XXX: test always skipped
|
135 |
+
@pytest.mark.skipif(True, reason="skipping mini_batch_fit_transform.")
|
136 |
+
def test_mini_batch_fit_transform():
|
137 |
+
alpha = 1
|
138 |
+
rng = np.random.RandomState(0)
|
139 |
+
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
|
140 |
+
spca_lars = MiniBatchSparsePCA(n_components=3, random_state=0, alpha=alpha).fit(Y)
|
141 |
+
U1 = spca_lars.transform(Y)
|
142 |
+
# Test multiple CPUs
|
143 |
+
if sys.platform == "win32": # fake parallelism for win32
|
144 |
+
import joblib
|
145 |
+
|
146 |
+
_mp = joblib.parallel.multiprocessing
|
147 |
+
joblib.parallel.multiprocessing = None
|
148 |
+
try:
|
149 |
+
spca = MiniBatchSparsePCA(
|
150 |
+
n_components=3, n_jobs=2, alpha=alpha, random_state=0
|
151 |
+
)
|
152 |
+
U2 = spca.fit(Y).transform(Y)
|
153 |
+
finally:
|
154 |
+
joblib.parallel.multiprocessing = _mp
|
155 |
+
else: # we can efficiently use parallelism
|
156 |
+
spca = MiniBatchSparsePCA(n_components=3, n_jobs=2, alpha=alpha, random_state=0)
|
157 |
+
U2 = spca.fit(Y).transform(Y)
|
158 |
+
assert not np.all(spca_lars.components_ == 0)
|
159 |
+
assert_array_almost_equal(U1, U2)
|
160 |
+
# Test that CD gives similar results
|
161 |
+
spca_lasso = MiniBatchSparsePCA(
|
162 |
+
n_components=3, method="cd", alpha=alpha, random_state=0
|
163 |
+
).fit(Y)
|
164 |
+
assert_array_almost_equal(spca_lasso.components_, spca_lars.components_)
|
165 |
+
|
166 |
+
|
167 |
+
def test_scaling_fit_transform():
|
168 |
+
alpha = 1
|
169 |
+
rng = np.random.RandomState(0)
|
170 |
+
Y, _, _ = generate_toy_data(3, 1000, (8, 8), random_state=rng)
|
171 |
+
spca_lars = SparsePCA(n_components=3, method="lars", alpha=alpha, random_state=rng)
|
172 |
+
results_train = spca_lars.fit_transform(Y)
|
173 |
+
results_test = spca_lars.transform(Y[:10])
|
174 |
+
assert_allclose(results_train[0], results_test[0])
|
175 |
+
|
176 |
+
|
177 |
+
def test_pca_vs_spca():
|
178 |
+
rng = np.random.RandomState(0)
|
179 |
+
Y, _, _ = generate_toy_data(3, 1000, (8, 8), random_state=rng)
|
180 |
+
Z, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng)
|
181 |
+
spca = SparsePCA(alpha=0, ridge_alpha=0, n_components=2)
|
182 |
+
pca = PCA(n_components=2)
|
183 |
+
pca.fit(Y)
|
184 |
+
spca.fit(Y)
|
185 |
+
results_test_pca = pca.transform(Z)
|
186 |
+
results_test_spca = spca.transform(Z)
|
187 |
+
assert_allclose(
|
188 |
+
np.abs(spca.components_.dot(pca.components_.T)), np.eye(2), atol=1e-5
|
189 |
+
)
|
190 |
+
results_test_pca *= np.sign(results_test_pca[0, :])
|
191 |
+
results_test_spca *= np.sign(results_test_spca[0, :])
|
192 |
+
assert_allclose(results_test_pca, results_test_spca)
|
193 |
+
|
194 |
+
|
195 |
+
@pytest.mark.parametrize("SPCA", [SparsePCA, MiniBatchSparsePCA])
|
196 |
+
@pytest.mark.parametrize("n_components", [None, 3])
|
197 |
+
def test_spca_n_components_(SPCA, n_components):
|
198 |
+
rng = np.random.RandomState(0)
|
199 |
+
n_samples, n_features = 12, 10
|
200 |
+
X = rng.randn(n_samples, n_features)
|
201 |
+
|
202 |
+
model = SPCA(n_components=n_components).fit(X)
|
203 |
+
|
204 |
+
if n_components is not None:
|
205 |
+
assert model.n_components_ == n_components
|
206 |
+
else:
|
207 |
+
assert model.n_components_ == n_features
|
208 |
+
|
209 |
+
|
210 |
+
@pytest.mark.parametrize("SPCA", (SparsePCA, MiniBatchSparsePCA))
|
211 |
+
@pytest.mark.parametrize("method", ("lars", "cd"))
|
212 |
+
@pytest.mark.parametrize(
|
213 |
+
"data_type, expected_type",
|
214 |
+
(
|
215 |
+
(np.float32, np.float32),
|
216 |
+
(np.float64, np.float64),
|
217 |
+
(np.int32, np.float64),
|
218 |
+
(np.int64, np.float64),
|
219 |
+
),
|
220 |
+
)
|
221 |
+
def test_sparse_pca_dtype_match(SPCA, method, data_type, expected_type):
|
222 |
+
# Verify output matrix dtype
|
223 |
+
n_samples, n_features, n_components = 12, 10, 3
|
224 |
+
rng = np.random.RandomState(0)
|
225 |
+
input_array = rng.randn(n_samples, n_features).astype(data_type)
|
226 |
+
model = SPCA(n_components=n_components, method=method)
|
227 |
+
transformed = model.fit_transform(input_array)
|
228 |
+
|
229 |
+
assert transformed.dtype == expected_type
|
230 |
+
assert model.components_.dtype == expected_type
|
231 |
+
|
232 |
+
|
233 |
+
@pytest.mark.parametrize("SPCA", (SparsePCA, MiniBatchSparsePCA))
|
234 |
+
@pytest.mark.parametrize("method", ("lars", "cd"))
|
235 |
+
def test_sparse_pca_numerical_consistency(SPCA, method):
|
236 |
+
# Verify numericall consistentency among np.float32 and np.float64
|
237 |
+
rtol = 1e-3
|
238 |
+
alpha = 2
|
239 |
+
n_samples, n_features, n_components = 12, 10, 3
|
240 |
+
rng = np.random.RandomState(0)
|
241 |
+
input_array = rng.randn(n_samples, n_features)
|
242 |
+
|
243 |
+
model_32 = SPCA(
|
244 |
+
n_components=n_components, alpha=alpha, method=method, random_state=0
|
245 |
+
)
|
246 |
+
transformed_32 = model_32.fit_transform(input_array.astype(np.float32))
|
247 |
+
|
248 |
+
model_64 = SPCA(
|
249 |
+
n_components=n_components, alpha=alpha, method=method, random_state=0
|
250 |
+
)
|
251 |
+
transformed_64 = model_64.fit_transform(input_array.astype(np.float64))
|
252 |
+
|
253 |
+
assert_allclose(transformed_64, transformed_32, rtol=rtol)
|
254 |
+
assert_allclose(model_64.components_, model_32.components_, rtol=rtol)
|
255 |
+
|
256 |
+
|
257 |
+
@pytest.mark.parametrize("SPCA", [SparsePCA, MiniBatchSparsePCA])
|
258 |
+
def test_spca_feature_names_out(SPCA):
|
259 |
+
"""Check feature names out for *SparsePCA."""
|
260 |
+
rng = np.random.RandomState(0)
|
261 |
+
n_samples, n_features = 12, 10
|
262 |
+
X = rng.randn(n_samples, n_features)
|
263 |
+
|
264 |
+
model = SPCA(n_components=4).fit(X)
|
265 |
+
names = model.get_feature_names_out()
|
266 |
+
|
267 |
+
estimator_name = SPCA.__name__.lower()
|
268 |
+
assert_array_equal([f"{estimator_name}{i}" for i in range(4)], names)
|
269 |
+
|
270 |
+
|
271 |
+
# TODO(1.6): remove in 1.6
|
272 |
+
def test_spca_max_iter_None_deprecation():
|
273 |
+
"""Check that we raise a warning for the deprecation of `max_iter=None`."""
|
274 |
+
rng = np.random.RandomState(0)
|
275 |
+
n_samples, n_features = 12, 10
|
276 |
+
X = rng.randn(n_samples, n_features)
|
277 |
+
|
278 |
+
warn_msg = "`max_iter=None` is deprecated in version 1.4 and will be removed"
|
279 |
+
with pytest.warns(FutureWarning, match=warn_msg):
|
280 |
+
MiniBatchSparsePCA(max_iter=None).fit(X)
|
281 |
+
|
282 |
+
|
283 |
+
def test_spca_early_stopping(global_random_seed):
|
284 |
+
"""Check that `tol` and `max_no_improvement` act as early stopping."""
|
285 |
+
rng = np.random.RandomState(global_random_seed)
|
286 |
+
n_samples, n_features = 50, 10
|
287 |
+
X = rng.randn(n_samples, n_features)
|
288 |
+
|
289 |
+
# vary the tolerance to force the early stopping of one of the model
|
290 |
+
model_early_stopped = MiniBatchSparsePCA(
|
291 |
+
max_iter=100, tol=0.5, random_state=global_random_seed
|
292 |
+
).fit(X)
|
293 |
+
model_not_early_stopped = MiniBatchSparsePCA(
|
294 |
+
max_iter=100, tol=1e-3, random_state=global_random_seed
|
295 |
+
).fit(X)
|
296 |
+
assert model_early_stopped.n_iter_ < model_not_early_stopped.n_iter_
|
297 |
+
|
298 |
+
# force the max number of no improvement to a large value to check that
|
299 |
+
# it does help to early stop
|
300 |
+
model_early_stopped = MiniBatchSparsePCA(
|
301 |
+
max_iter=100, tol=1e-6, max_no_improvement=2, random_state=global_random_seed
|
302 |
+
).fit(X)
|
303 |
+
model_not_early_stopped = MiniBatchSparsePCA(
|
304 |
+
max_iter=100, tol=1e-6, max_no_improvement=100, random_state=global_random_seed
|
305 |
+
).fit(X)
|
306 |
+
assert model_early_stopped.n_iter_ < model_not_early_stopped.n_iter_
|
307 |
+
|
308 |
+
|
309 |
+
def test_equivalence_components_pca_spca(global_random_seed):
|
310 |
+
"""Check the equivalence of the components found by PCA and SparsePCA.
|
311 |
+
|
312 |
+
Non-regression test for:
|
313 |
+
https://github.com/scikit-learn/scikit-learn/issues/23932
|
314 |
+
"""
|
315 |
+
rng = np.random.RandomState(global_random_seed)
|
316 |
+
X = rng.randn(50, 4)
|
317 |
+
|
318 |
+
n_components = 2
|
319 |
+
pca = PCA(
|
320 |
+
n_components=n_components,
|
321 |
+
svd_solver="randomized",
|
322 |
+
random_state=0,
|
323 |
+
).fit(X)
|
324 |
+
spca = SparsePCA(
|
325 |
+
n_components=n_components,
|
326 |
+
method="lars",
|
327 |
+
ridge_alpha=0,
|
328 |
+
alpha=0,
|
329 |
+
random_state=0,
|
330 |
+
).fit(X)
|
331 |
+
|
332 |
+
assert_allclose(pca.components_, spca.components_)
|
333 |
+
|
334 |
+
|
335 |
+
def test_sparse_pca_inverse_transform():
|
336 |
+
"""Check that `inverse_transform` in `SparsePCA` and `PCA` are similar."""
|
337 |
+
rng = np.random.RandomState(0)
|
338 |
+
n_samples, n_features = 10, 5
|
339 |
+
X = rng.randn(n_samples, n_features)
|
340 |
+
|
341 |
+
n_components = 2
|
342 |
+
spca = SparsePCA(
|
343 |
+
n_components=n_components, alpha=1e-12, ridge_alpha=1e-12, random_state=0
|
344 |
+
)
|
345 |
+
pca = PCA(n_components=n_components, random_state=0)
|
346 |
+
X_trans_spca = spca.fit_transform(X)
|
347 |
+
X_trans_pca = pca.fit_transform(X)
|
348 |
+
assert_allclose(
|
349 |
+
spca.inverse_transform(X_trans_spca), pca.inverse_transform(X_trans_pca)
|
350 |
+
)
|
351 |
+
|
352 |
+
|
353 |
+
@pytest.mark.parametrize("SPCA", [SparsePCA, MiniBatchSparsePCA])
|
354 |
+
def test_transform_inverse_transform_round_trip(SPCA):
|
355 |
+
"""Check the `transform` and `inverse_transform` round trip with no loss of
|
356 |
+
information.
|
357 |
+
"""
|
358 |
+
rng = np.random.RandomState(0)
|
359 |
+
n_samples, n_features = 10, 5
|
360 |
+
X = rng.randn(n_samples, n_features)
|
361 |
+
|
362 |
+
n_components = n_features
|
363 |
+
spca = SPCA(
|
364 |
+
n_components=n_components, alpha=1e-12, ridge_alpha=1e-12, random_state=0
|
365 |
+
)
|
366 |
+
X_trans_spca = spca.fit_transform(X)
|
367 |
+
assert_allclose(spca.inverse_transform(X_trans_spca), X)
|
venv/lib/python3.10/site-packages/sklearn/metrics/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (3.47 kB). View file
|
|
venv/lib/python3.10/site-packages/sklearn/metrics/__pycache__/_base.cpython-310.pyc
ADDED
Binary file (5.5 kB). View file
|
|
venv/lib/python3.10/site-packages/sklearn/metrics/__pycache__/_classification.cpython-310.pyc
ADDED
Binary file (101 kB). View file
|
|
venv/lib/python3.10/site-packages/sklearn/metrics/__pycache__/_ranking.cpython-310.pyc
ADDED
Binary file (62.4 kB). View file
|
|
venv/lib/python3.10/site-packages/sklearn/metrics/__pycache__/_regression.cpython-310.pyc
ADDED
Binary file (48.4 kB). View file
|
|
venv/lib/python3.10/site-packages/sklearn/metrics/__pycache__/_scorer.cpython-310.pyc
ADDED
Binary file (28.2 kB). View file
|
|
venv/lib/python3.10/site-packages/sklearn/metrics/__pycache__/pairwise.cpython-310.pyc
ADDED
Binary file (70.1 kB). View file
|
|
venv/lib/python3.10/site-packages/sklearn/metrics/cluster/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (1.27 kB). View file
|
|
venv/lib/python3.10/site-packages/sklearn/metrics/cluster/__pycache__/_bicluster.cpython-310.pyc
ADDED
Binary file (3.91 kB). View file
|
|
venv/lib/python3.10/site-packages/sklearn/metrics/cluster/__pycache__/_supervised.cpython-310.pyc
ADDED
Binary file (38 kB). View file
|
|
venv/lib/python3.10/site-packages/sklearn/metrics/cluster/__pycache__/_unsupervised.cpython-310.pyc
ADDED
Binary file (14.7 kB). View file
|
|
venv/lib/python3.10/site-packages/sklearn/metrics/cluster/_supervised.py
ADDED
@@ -0,0 +1,1298 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Utilities to evaluate the clustering performance of models.
|
2 |
+
|
3 |
+
Functions named as *_score return a scalar value to maximize: the higher the
|
4 |
+
better.
|
5 |
+
"""
|
6 |
+
|
7 |
+
# Authors: Olivier Grisel <[email protected]>
|
8 |
+
# Wei LI <[email protected]>
|
9 |
+
# Diego Molla <[email protected]>
|
10 |
+
# Arnaud Fouchet <[email protected]>
|
11 |
+
# Thierry Guillemot <[email protected]>
|
12 |
+
# Gregory Stupp <[email protected]>
|
13 |
+
# Joel Nothman <[email protected]>
|
14 |
+
# Arya McCarthy <[email protected]>
|
15 |
+
# Uwe F Mayer <[email protected]>
|
16 |
+
# License: BSD 3 clause
|
17 |
+
|
18 |
+
|
19 |
+
import warnings
|
20 |
+
from math import log
|
21 |
+
from numbers import Real
|
22 |
+
|
23 |
+
import numpy as np
|
24 |
+
from scipy import sparse as sp
|
25 |
+
|
26 |
+
from ...utils._param_validation import Interval, StrOptions, validate_params
|
27 |
+
from ...utils.multiclass import type_of_target
|
28 |
+
from ...utils.validation import check_array, check_consistent_length
|
29 |
+
from ._expected_mutual_info_fast import expected_mutual_information
|
30 |
+
|
31 |
+
|
32 |
+
def check_clusterings(labels_true, labels_pred):
|
33 |
+
"""Check that the labels arrays are 1D and of same dimension.
|
34 |
+
|
35 |
+
Parameters
|
36 |
+
----------
|
37 |
+
labels_true : array-like of shape (n_samples,)
|
38 |
+
The true labels.
|
39 |
+
|
40 |
+
labels_pred : array-like of shape (n_samples,)
|
41 |
+
The predicted labels.
|
42 |
+
"""
|
43 |
+
labels_true = check_array(
|
44 |
+
labels_true,
|
45 |
+
ensure_2d=False,
|
46 |
+
ensure_min_samples=0,
|
47 |
+
dtype=None,
|
48 |
+
)
|
49 |
+
|
50 |
+
labels_pred = check_array(
|
51 |
+
labels_pred,
|
52 |
+
ensure_2d=False,
|
53 |
+
ensure_min_samples=0,
|
54 |
+
dtype=None,
|
55 |
+
)
|
56 |
+
|
57 |
+
type_label = type_of_target(labels_true)
|
58 |
+
type_pred = type_of_target(labels_pred)
|
59 |
+
|
60 |
+
if "continuous" in (type_pred, type_label):
|
61 |
+
msg = (
|
62 |
+
"Clustering metrics expects discrete values but received"
|
63 |
+
f" {type_label} values for label, and {type_pred} values "
|
64 |
+
"for target"
|
65 |
+
)
|
66 |
+
warnings.warn(msg, UserWarning)
|
67 |
+
|
68 |
+
# input checks
|
69 |
+
if labels_true.ndim != 1:
|
70 |
+
raise ValueError("labels_true must be 1D: shape is %r" % (labels_true.shape,))
|
71 |
+
if labels_pred.ndim != 1:
|
72 |
+
raise ValueError("labels_pred must be 1D: shape is %r" % (labels_pred.shape,))
|
73 |
+
check_consistent_length(labels_true, labels_pred)
|
74 |
+
|
75 |
+
return labels_true, labels_pred
|
76 |
+
|
77 |
+
|
78 |
+
def _generalized_average(U, V, average_method):
|
79 |
+
"""Return a particular mean of two numbers."""
|
80 |
+
if average_method == "min":
|
81 |
+
return min(U, V)
|
82 |
+
elif average_method == "geometric":
|
83 |
+
return np.sqrt(U * V)
|
84 |
+
elif average_method == "arithmetic":
|
85 |
+
return np.mean([U, V])
|
86 |
+
elif average_method == "max":
|
87 |
+
return max(U, V)
|
88 |
+
else:
|
89 |
+
raise ValueError(
|
90 |
+
"'average_method' must be 'min', 'geometric', 'arithmetic', or 'max'"
|
91 |
+
)
|
92 |
+
|
93 |
+
|
94 |
+
@validate_params(
|
95 |
+
{
|
96 |
+
"labels_true": ["array-like", None],
|
97 |
+
"labels_pred": ["array-like", None],
|
98 |
+
"eps": [Interval(Real, 0, None, closed="left"), None],
|
99 |
+
"sparse": ["boolean"],
|
100 |
+
"dtype": "no_validation", # delegate the validation to SciPy
|
101 |
+
},
|
102 |
+
prefer_skip_nested_validation=True,
|
103 |
+
)
|
104 |
+
def contingency_matrix(
|
105 |
+
labels_true, labels_pred, *, eps=None, sparse=False, dtype=np.int64
|
106 |
+
):
|
107 |
+
"""Build a contingency matrix describing the relationship between labels.
|
108 |
+
|
109 |
+
Parameters
|
110 |
+
----------
|
111 |
+
labels_true : array-like of shape (n_samples,)
|
112 |
+
Ground truth class labels to be used as a reference.
|
113 |
+
|
114 |
+
labels_pred : array-like of shape (n_samples,)
|
115 |
+
Cluster labels to evaluate.
|
116 |
+
|
117 |
+
eps : float, default=None
|
118 |
+
If a float, that value is added to all values in the contingency
|
119 |
+
matrix. This helps to stop NaN propagation.
|
120 |
+
If ``None``, nothing is adjusted.
|
121 |
+
|
122 |
+
sparse : bool, default=False
|
123 |
+
If `True`, return a sparse CSR continency matrix. If `eps` is not
|
124 |
+
`None` and `sparse` is `True` will raise ValueError.
|
125 |
+
|
126 |
+
.. versionadded:: 0.18
|
127 |
+
|
128 |
+
dtype : numeric type, default=np.int64
|
129 |
+
Output dtype. Ignored if `eps` is not `None`.
|
130 |
+
|
131 |
+
.. versionadded:: 0.24
|
132 |
+
|
133 |
+
Returns
|
134 |
+
-------
|
135 |
+
contingency : {array-like, sparse}, shape=[n_classes_true, n_classes_pred]
|
136 |
+
Matrix :math:`C` such that :math:`C_{i, j}` is the number of samples in
|
137 |
+
true class :math:`i` and in predicted class :math:`j`. If
|
138 |
+
``eps is None``, the dtype of this array will be integer unless set
|
139 |
+
otherwise with the ``dtype`` argument. If ``eps`` is given, the dtype
|
140 |
+
will be float.
|
141 |
+
Will be a ``sklearn.sparse.csr_matrix`` if ``sparse=True``.
|
142 |
+
|
143 |
+
Examples
|
144 |
+
--------
|
145 |
+
>>> from sklearn.metrics.cluster import contingency_matrix
|
146 |
+
>>> labels_true = [0, 0, 1, 1, 2, 2]
|
147 |
+
>>> labels_pred = [1, 0, 2, 1, 0, 2]
|
148 |
+
>>> contingency_matrix(labels_true, labels_pred)
|
149 |
+
array([[1, 1, 0],
|
150 |
+
[0, 1, 1],
|
151 |
+
[1, 0, 1]])
|
152 |
+
"""
|
153 |
+
|
154 |
+
if eps is not None and sparse:
|
155 |
+
raise ValueError("Cannot set 'eps' when sparse=True")
|
156 |
+
|
157 |
+
classes, class_idx = np.unique(labels_true, return_inverse=True)
|
158 |
+
clusters, cluster_idx = np.unique(labels_pred, return_inverse=True)
|
159 |
+
n_classes = classes.shape[0]
|
160 |
+
n_clusters = clusters.shape[0]
|
161 |
+
# Using coo_matrix to accelerate simple histogram calculation,
|
162 |
+
# i.e. bins are consecutive integers
|
163 |
+
# Currently, coo_matrix is faster than histogram2d for simple cases
|
164 |
+
contingency = sp.coo_matrix(
|
165 |
+
(np.ones(class_idx.shape[0]), (class_idx, cluster_idx)),
|
166 |
+
shape=(n_classes, n_clusters),
|
167 |
+
dtype=dtype,
|
168 |
+
)
|
169 |
+
if sparse:
|
170 |
+
contingency = contingency.tocsr()
|
171 |
+
contingency.sum_duplicates()
|
172 |
+
else:
|
173 |
+
contingency = contingency.toarray()
|
174 |
+
if eps is not None:
|
175 |
+
# don't use += as contingency is integer
|
176 |
+
contingency = contingency + eps
|
177 |
+
return contingency
|
178 |
+
|
179 |
+
|
180 |
+
# clustering measures
|
181 |
+
|
182 |
+
|
183 |
+
@validate_params(
|
184 |
+
{
|
185 |
+
"labels_true": ["array-like"],
|
186 |
+
"labels_pred": ["array-like"],
|
187 |
+
},
|
188 |
+
prefer_skip_nested_validation=True,
|
189 |
+
)
|
190 |
+
def pair_confusion_matrix(labels_true, labels_pred):
|
191 |
+
"""Pair confusion matrix arising from two clusterings [1]_.
|
192 |
+
|
193 |
+
The pair confusion matrix :math:`C` computes a 2 by 2 similarity matrix
|
194 |
+
between two clusterings by considering all pairs of samples and counting
|
195 |
+
pairs that are assigned into the same or into different clusters under
|
196 |
+
the true and predicted clusterings.
|
197 |
+
|
198 |
+
Considering a pair of samples that is clustered together a positive pair,
|
199 |
+
then as in binary classification the count of true negatives is
|
200 |
+
:math:`C_{00}`, false negatives is :math:`C_{10}`, true positives is
|
201 |
+
:math:`C_{11}` and false positives is :math:`C_{01}`.
|
202 |
+
|
203 |
+
Read more in the :ref:`User Guide <pair_confusion_matrix>`.
|
204 |
+
|
205 |
+
Parameters
|
206 |
+
----------
|
207 |
+
labels_true : array-like of shape (n_samples,), dtype=integral
|
208 |
+
Ground truth class labels to be used as a reference.
|
209 |
+
|
210 |
+
labels_pred : array-like of shape (n_samples,), dtype=integral
|
211 |
+
Cluster labels to evaluate.
|
212 |
+
|
213 |
+
Returns
|
214 |
+
-------
|
215 |
+
C : ndarray of shape (2, 2), dtype=np.int64
|
216 |
+
The contingency matrix.
|
217 |
+
|
218 |
+
See Also
|
219 |
+
--------
|
220 |
+
sklearn.metrics.rand_score : Rand Score.
|
221 |
+
sklearn.metrics.adjusted_rand_score : Adjusted Rand Score.
|
222 |
+
sklearn.metrics.adjusted_mutual_info_score : Adjusted Mutual Information.
|
223 |
+
|
224 |
+
References
|
225 |
+
----------
|
226 |
+
.. [1] :doi:`Hubert, L., Arabie, P. "Comparing partitions."
|
227 |
+
Journal of Classification 2, 193–218 (1985).
|
228 |
+
<10.1007/BF01908075>`
|
229 |
+
|
230 |
+
Examples
|
231 |
+
--------
|
232 |
+
Perfectly matching labelings have all non-zero entries on the
|
233 |
+
diagonal regardless of actual label values:
|
234 |
+
|
235 |
+
>>> from sklearn.metrics.cluster import pair_confusion_matrix
|
236 |
+
>>> pair_confusion_matrix([0, 0, 1, 1], [1, 1, 0, 0])
|
237 |
+
array([[8, 0],
|
238 |
+
[0, 4]]...
|
239 |
+
|
240 |
+
Labelings that assign all classes members to the same clusters
|
241 |
+
are complete but may be not always pure, hence penalized, and
|
242 |
+
have some off-diagonal non-zero entries:
|
243 |
+
|
244 |
+
>>> pair_confusion_matrix([0, 0, 1, 2], [0, 0, 1, 1])
|
245 |
+
array([[8, 2],
|
246 |
+
[0, 2]]...
|
247 |
+
|
248 |
+
Note that the matrix is not symmetric.
|
249 |
+
"""
|
250 |
+
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
|
251 |
+
n_samples = np.int64(labels_true.shape[0])
|
252 |
+
|
253 |
+
# Computation using the contingency data
|
254 |
+
contingency = contingency_matrix(
|
255 |
+
labels_true, labels_pred, sparse=True, dtype=np.int64
|
256 |
+
)
|
257 |
+
n_c = np.ravel(contingency.sum(axis=1))
|
258 |
+
n_k = np.ravel(contingency.sum(axis=0))
|
259 |
+
sum_squares = (contingency.data**2).sum()
|
260 |
+
C = np.empty((2, 2), dtype=np.int64)
|
261 |
+
C[1, 1] = sum_squares - n_samples
|
262 |
+
C[0, 1] = contingency.dot(n_k).sum() - sum_squares
|
263 |
+
C[1, 0] = contingency.transpose().dot(n_c).sum() - sum_squares
|
264 |
+
C[0, 0] = n_samples**2 - C[0, 1] - C[1, 0] - sum_squares
|
265 |
+
return C
|
266 |
+
|
267 |
+
|
268 |
+
@validate_params(
|
269 |
+
{
|
270 |
+
"labels_true": ["array-like"],
|
271 |
+
"labels_pred": ["array-like"],
|
272 |
+
},
|
273 |
+
prefer_skip_nested_validation=True,
|
274 |
+
)
|
275 |
+
def rand_score(labels_true, labels_pred):
|
276 |
+
"""Rand index.
|
277 |
+
|
278 |
+
The Rand Index computes a similarity measure between two clusterings
|
279 |
+
by considering all pairs of samples and counting pairs that are
|
280 |
+
assigned in the same or different clusters in the predicted and
|
281 |
+
true clusterings [1]_ [2]_.
|
282 |
+
|
283 |
+
The raw RI score [3]_ is:
|
284 |
+
|
285 |
+
RI = (number of agreeing pairs) / (number of pairs)
|
286 |
+
|
287 |
+
Read more in the :ref:`User Guide <rand_score>`.
|
288 |
+
|
289 |
+
Parameters
|
290 |
+
----------
|
291 |
+
labels_true : array-like of shape (n_samples,), dtype=integral
|
292 |
+
Ground truth class labels to be used as a reference.
|
293 |
+
|
294 |
+
labels_pred : array-like of shape (n_samples,), dtype=integral
|
295 |
+
Cluster labels to evaluate.
|
296 |
+
|
297 |
+
Returns
|
298 |
+
-------
|
299 |
+
RI : float
|
300 |
+
Similarity score between 0.0 and 1.0, inclusive, 1.0 stands for
|
301 |
+
perfect match.
|
302 |
+
|
303 |
+
See Also
|
304 |
+
--------
|
305 |
+
adjusted_rand_score: Adjusted Rand Score.
|
306 |
+
adjusted_mutual_info_score: Adjusted Mutual Information.
|
307 |
+
|
308 |
+
References
|
309 |
+
----------
|
310 |
+
.. [1] :doi:`Hubert, L., Arabie, P. "Comparing partitions."
|
311 |
+
Journal of Classification 2, 193–218 (1985).
|
312 |
+
<10.1007/BF01908075>`.
|
313 |
+
|
314 |
+
.. [2] `Wikipedia: Simple Matching Coefficient
|
315 |
+
<https://en.wikipedia.org/wiki/Simple_matching_coefficient>`_
|
316 |
+
|
317 |
+
.. [3] `Wikipedia: Rand Index <https://en.wikipedia.org/wiki/Rand_index>`_
|
318 |
+
|
319 |
+
Examples
|
320 |
+
--------
|
321 |
+
Perfectly matching labelings have a score of 1 even
|
322 |
+
|
323 |
+
>>> from sklearn.metrics.cluster import rand_score
|
324 |
+
>>> rand_score([0, 0, 1, 1], [1, 1, 0, 0])
|
325 |
+
1.0
|
326 |
+
|
327 |
+
Labelings that assign all classes members to the same clusters
|
328 |
+
are complete but may not always be pure, hence penalized:
|
329 |
+
|
330 |
+
>>> rand_score([0, 0, 1, 2], [0, 0, 1, 1])
|
331 |
+
0.83...
|
332 |
+
"""
|
333 |
+
contingency = pair_confusion_matrix(labels_true, labels_pred)
|
334 |
+
numerator = contingency.diagonal().sum()
|
335 |
+
denominator = contingency.sum()
|
336 |
+
|
337 |
+
if numerator == denominator or denominator == 0:
|
338 |
+
# Special limit cases: no clustering since the data is not split;
|
339 |
+
# or trivial clustering where each document is assigned a unique
|
340 |
+
# cluster. These are perfect matches hence return 1.0.
|
341 |
+
return 1.0
|
342 |
+
|
343 |
+
return numerator / denominator
|
344 |
+
|
345 |
+
|
346 |
+
@validate_params(
|
347 |
+
{
|
348 |
+
"labels_true": ["array-like"],
|
349 |
+
"labels_pred": ["array-like"],
|
350 |
+
},
|
351 |
+
prefer_skip_nested_validation=True,
|
352 |
+
)
|
353 |
+
def adjusted_rand_score(labels_true, labels_pred):
|
354 |
+
"""Rand index adjusted for chance.
|
355 |
+
|
356 |
+
The Rand Index computes a similarity measure between two clusterings
|
357 |
+
by considering all pairs of samples and counting pairs that are
|
358 |
+
assigned in the same or different clusters in the predicted and
|
359 |
+
true clusterings.
|
360 |
+
|
361 |
+
The raw RI score is then "adjusted for chance" into the ARI score
|
362 |
+
using the following scheme::
|
363 |
+
|
364 |
+
ARI = (RI - Expected_RI) / (max(RI) - Expected_RI)
|
365 |
+
|
366 |
+
The adjusted Rand index is thus ensured to have a value close to
|
367 |
+
0.0 for random labeling independently of the number of clusters and
|
368 |
+
samples and exactly 1.0 when the clusterings are identical (up to
|
369 |
+
a permutation). The adjusted Rand index is bounded below by -0.5 for
|
370 |
+
especially discordant clusterings.
|
371 |
+
|
372 |
+
ARI is a symmetric measure::
|
373 |
+
|
374 |
+
adjusted_rand_score(a, b) == adjusted_rand_score(b, a)
|
375 |
+
|
376 |
+
Read more in the :ref:`User Guide <adjusted_rand_score>`.
|
377 |
+
|
378 |
+
Parameters
|
379 |
+
----------
|
380 |
+
labels_true : array-like of shape (n_samples,), dtype=int
|
381 |
+
Ground truth class labels to be used as a reference.
|
382 |
+
|
383 |
+
labels_pred : array-like of shape (n_samples,), dtype=int
|
384 |
+
Cluster labels to evaluate.
|
385 |
+
|
386 |
+
Returns
|
387 |
+
-------
|
388 |
+
ARI : float
|
389 |
+
Similarity score between -0.5 and 1.0. Random labelings have an ARI
|
390 |
+
close to 0.0. 1.0 stands for perfect match.
|
391 |
+
|
392 |
+
See Also
|
393 |
+
--------
|
394 |
+
adjusted_mutual_info_score : Adjusted Mutual Information.
|
395 |
+
|
396 |
+
References
|
397 |
+
----------
|
398 |
+
.. [Hubert1985] L. Hubert and P. Arabie, Comparing Partitions,
|
399 |
+
Journal of Classification 1985
|
400 |
+
https://link.springer.com/article/10.1007%2FBF01908075
|
401 |
+
|
402 |
+
.. [Steinley2004] D. Steinley, Properties of the Hubert-Arabie
|
403 |
+
adjusted Rand index, Psychological Methods 2004
|
404 |
+
|
405 |
+
.. [wk] https://en.wikipedia.org/wiki/Rand_index#Adjusted_Rand_index
|
406 |
+
|
407 |
+
.. [Chacon] :doi:`Minimum adjusted Rand index for two clusterings of a given size,
|
408 |
+
2022, J. E. Chacón and A. I. Rastrojo <10.1007/s11634-022-00491-w>`
|
409 |
+
|
410 |
+
Examples
|
411 |
+
--------
|
412 |
+
Perfectly matching labelings have a score of 1 even
|
413 |
+
|
414 |
+
>>> from sklearn.metrics.cluster import adjusted_rand_score
|
415 |
+
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 1])
|
416 |
+
1.0
|
417 |
+
>>> adjusted_rand_score([0, 0, 1, 1], [1, 1, 0, 0])
|
418 |
+
1.0
|
419 |
+
|
420 |
+
Labelings that assign all classes members to the same clusters
|
421 |
+
are complete but may not always be pure, hence penalized::
|
422 |
+
|
423 |
+
>>> adjusted_rand_score([0, 0, 1, 2], [0, 0, 1, 1])
|
424 |
+
0.57...
|
425 |
+
|
426 |
+
ARI is symmetric, so labelings that have pure clusters with members
|
427 |
+
coming from the same classes but unnecessary splits are penalized::
|
428 |
+
|
429 |
+
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 2])
|
430 |
+
0.57...
|
431 |
+
|
432 |
+
If classes members are completely split across different clusters, the
|
433 |
+
assignment is totally incomplete, hence the ARI is very low::
|
434 |
+
|
435 |
+
>>> adjusted_rand_score([0, 0, 0, 0], [0, 1, 2, 3])
|
436 |
+
0.0
|
437 |
+
|
438 |
+
ARI may take a negative value for especially discordant labelings that
|
439 |
+
are a worse choice than the expected value of random labels::
|
440 |
+
|
441 |
+
>>> adjusted_rand_score([0, 0, 1, 1], [0, 1, 0, 1])
|
442 |
+
-0.5
|
443 |
+
"""
|
444 |
+
(tn, fp), (fn, tp) = pair_confusion_matrix(labels_true, labels_pred)
|
445 |
+
# convert to Python integer types, to avoid overflow or underflow
|
446 |
+
tn, fp, fn, tp = int(tn), int(fp), int(fn), int(tp)
|
447 |
+
|
448 |
+
# Special cases: empty data or full agreement
|
449 |
+
if fn == 0 and fp == 0:
|
450 |
+
return 1.0
|
451 |
+
|
452 |
+
return 2.0 * (tp * tn - fn * fp) / ((tp + fn) * (fn + tn) + (tp + fp) * (fp + tn))
|
453 |
+
|
454 |
+
|
455 |
+
@validate_params(
|
456 |
+
{
|
457 |
+
"labels_true": ["array-like"],
|
458 |
+
"labels_pred": ["array-like"],
|
459 |
+
"beta": [Interval(Real, 0, None, closed="left")],
|
460 |
+
},
|
461 |
+
prefer_skip_nested_validation=True,
|
462 |
+
)
|
463 |
+
def homogeneity_completeness_v_measure(labels_true, labels_pred, *, beta=1.0):
|
464 |
+
"""Compute the homogeneity and completeness and V-Measure scores at once.
|
465 |
+
|
466 |
+
Those metrics are based on normalized conditional entropy measures of
|
467 |
+
the clustering labeling to evaluate given the knowledge of a Ground
|
468 |
+
Truth class labels of the same samples.
|
469 |
+
|
470 |
+
A clustering result satisfies homogeneity if all of its clusters
|
471 |
+
contain only data points which are members of a single class.
|
472 |
+
|
473 |
+
A clustering result satisfies completeness if all the data points
|
474 |
+
that are members of a given class are elements of the same cluster.
|
475 |
+
|
476 |
+
Both scores have positive values between 0.0 and 1.0, larger values
|
477 |
+
being desirable.
|
478 |
+
|
479 |
+
Those 3 metrics are independent of the absolute values of the labels:
|
480 |
+
a permutation of the class or cluster label values won't change the
|
481 |
+
score values in any way.
|
482 |
+
|
483 |
+
V-Measure is furthermore symmetric: swapping ``labels_true`` and
|
484 |
+
``label_pred`` will give the same score. This does not hold for
|
485 |
+
homogeneity and completeness. V-Measure is identical to
|
486 |
+
:func:`normalized_mutual_info_score` with the arithmetic averaging
|
487 |
+
method.
|
488 |
+
|
489 |
+
Read more in the :ref:`User Guide <homogeneity_completeness>`.
|
490 |
+
|
491 |
+
Parameters
|
492 |
+
----------
|
493 |
+
labels_true : array-like of shape (n_samples,)
|
494 |
+
Ground truth class labels to be used as a reference.
|
495 |
+
|
496 |
+
labels_pred : array-like of shape (n_samples,)
|
497 |
+
Gluster labels to evaluate.
|
498 |
+
|
499 |
+
beta : float, default=1.0
|
500 |
+
Ratio of weight attributed to ``homogeneity`` vs ``completeness``.
|
501 |
+
If ``beta`` is greater than 1, ``completeness`` is weighted more
|
502 |
+
strongly in the calculation. If ``beta`` is less than 1,
|
503 |
+
``homogeneity`` is weighted more strongly.
|
504 |
+
|
505 |
+
Returns
|
506 |
+
-------
|
507 |
+
homogeneity : float
|
508 |
+
Score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling.
|
509 |
+
|
510 |
+
completeness : float
|
511 |
+
Score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling.
|
512 |
+
|
513 |
+
v_measure : float
|
514 |
+
Harmonic mean of the first two.
|
515 |
+
|
516 |
+
See Also
|
517 |
+
--------
|
518 |
+
homogeneity_score : Homogeneity metric of cluster labeling.
|
519 |
+
completeness_score : Completeness metric of cluster labeling.
|
520 |
+
v_measure_score : V-Measure (NMI with arithmetic mean option).
|
521 |
+
|
522 |
+
Examples
|
523 |
+
--------
|
524 |
+
>>> from sklearn.metrics import homogeneity_completeness_v_measure
|
525 |
+
>>> y_true, y_pred = [0, 0, 1, 1, 2, 2], [0, 0, 1, 2, 2, 2]
|
526 |
+
>>> homogeneity_completeness_v_measure(y_true, y_pred)
|
527 |
+
(0.71..., 0.77..., 0.73...)
|
528 |
+
"""
|
529 |
+
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
|
530 |
+
|
531 |
+
if len(labels_true) == 0:
|
532 |
+
return 1.0, 1.0, 1.0
|
533 |
+
|
534 |
+
entropy_C = entropy(labels_true)
|
535 |
+
entropy_K = entropy(labels_pred)
|
536 |
+
|
537 |
+
contingency = contingency_matrix(labels_true, labels_pred, sparse=True)
|
538 |
+
MI = mutual_info_score(None, None, contingency=contingency)
|
539 |
+
|
540 |
+
homogeneity = MI / (entropy_C) if entropy_C else 1.0
|
541 |
+
completeness = MI / (entropy_K) if entropy_K else 1.0
|
542 |
+
|
543 |
+
if homogeneity + completeness == 0.0:
|
544 |
+
v_measure_score = 0.0
|
545 |
+
else:
|
546 |
+
v_measure_score = (
|
547 |
+
(1 + beta)
|
548 |
+
* homogeneity
|
549 |
+
* completeness
|
550 |
+
/ (beta * homogeneity + completeness)
|
551 |
+
)
|
552 |
+
|
553 |
+
return homogeneity, completeness, v_measure_score
|
554 |
+
|
555 |
+
|
556 |
+
@validate_params(
|
557 |
+
{
|
558 |
+
"labels_true": ["array-like"],
|
559 |
+
"labels_pred": ["array-like"],
|
560 |
+
},
|
561 |
+
prefer_skip_nested_validation=True,
|
562 |
+
)
|
563 |
+
def homogeneity_score(labels_true, labels_pred):
|
564 |
+
"""Homogeneity metric of a cluster labeling given a ground truth.
|
565 |
+
|
566 |
+
A clustering result satisfies homogeneity if all of its clusters
|
567 |
+
contain only data points which are members of a single class.
|
568 |
+
|
569 |
+
This metric is independent of the absolute values of the labels:
|
570 |
+
a permutation of the class or cluster label values won't change the
|
571 |
+
score value in any way.
|
572 |
+
|
573 |
+
This metric is not symmetric: switching ``label_true`` with ``label_pred``
|
574 |
+
will return the :func:`completeness_score` which will be different in
|
575 |
+
general.
|
576 |
+
|
577 |
+
Read more in the :ref:`User Guide <homogeneity_completeness>`.
|
578 |
+
|
579 |
+
Parameters
|
580 |
+
----------
|
581 |
+
labels_true : array-like of shape (n_samples,)
|
582 |
+
Ground truth class labels to be used as a reference.
|
583 |
+
|
584 |
+
labels_pred : array-like of shape (n_samples,)
|
585 |
+
Cluster labels to evaluate.
|
586 |
+
|
587 |
+
Returns
|
588 |
+
-------
|
589 |
+
homogeneity : float
|
590 |
+
Score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling.
|
591 |
+
|
592 |
+
See Also
|
593 |
+
--------
|
594 |
+
completeness_score : Completeness metric of cluster labeling.
|
595 |
+
v_measure_score : V-Measure (NMI with arithmetic mean option).
|
596 |
+
|
597 |
+
References
|
598 |
+
----------
|
599 |
+
|
600 |
+
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
|
601 |
+
conditional entropy-based external cluster evaluation measure
|
602 |
+
<https://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
|
603 |
+
|
604 |
+
Examples
|
605 |
+
--------
|
606 |
+
|
607 |
+
Perfect labelings are homogeneous::
|
608 |
+
|
609 |
+
>>> from sklearn.metrics.cluster import homogeneity_score
|
610 |
+
>>> homogeneity_score([0, 0, 1, 1], [1, 1, 0, 0])
|
611 |
+
1.0
|
612 |
+
|
613 |
+
Non-perfect labelings that further split classes into more clusters can be
|
614 |
+
perfectly homogeneous::
|
615 |
+
|
616 |
+
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 1, 2]))
|
617 |
+
1.000000
|
618 |
+
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 2, 3]))
|
619 |
+
1.000000
|
620 |
+
|
621 |
+
Clusters that include samples from different classes do not make for an
|
622 |
+
homogeneous labeling::
|
623 |
+
|
624 |
+
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 0, 1]))
|
625 |
+
0.0...
|
626 |
+
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 0, 0]))
|
627 |
+
0.0...
|
628 |
+
"""
|
629 |
+
return homogeneity_completeness_v_measure(labels_true, labels_pred)[0]
|
630 |
+
|
631 |
+
|
632 |
+
@validate_params(
|
633 |
+
{
|
634 |
+
"labels_true": ["array-like"],
|
635 |
+
"labels_pred": ["array-like"],
|
636 |
+
},
|
637 |
+
prefer_skip_nested_validation=True,
|
638 |
+
)
|
639 |
+
def completeness_score(labels_true, labels_pred):
|
640 |
+
"""Compute completeness metric of a cluster labeling given a ground truth.
|
641 |
+
|
642 |
+
A clustering result satisfies completeness if all the data points
|
643 |
+
that are members of a given class are elements of the same cluster.
|
644 |
+
|
645 |
+
This metric is independent of the absolute values of the labels:
|
646 |
+
a permutation of the class or cluster label values won't change the
|
647 |
+
score value in any way.
|
648 |
+
|
649 |
+
This metric is not symmetric: switching ``label_true`` with ``label_pred``
|
650 |
+
will return the :func:`homogeneity_score` which will be different in
|
651 |
+
general.
|
652 |
+
|
653 |
+
Read more in the :ref:`User Guide <homogeneity_completeness>`.
|
654 |
+
|
655 |
+
Parameters
|
656 |
+
----------
|
657 |
+
labels_true : array-like of shape (n_samples,)
|
658 |
+
Ground truth class labels to be used as a reference.
|
659 |
+
|
660 |
+
labels_pred : array-like of shape (n_samples,)
|
661 |
+
Cluster labels to evaluate.
|
662 |
+
|
663 |
+
Returns
|
664 |
+
-------
|
665 |
+
completeness : float
|
666 |
+
Score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling.
|
667 |
+
|
668 |
+
See Also
|
669 |
+
--------
|
670 |
+
homogeneity_score : Homogeneity metric of cluster labeling.
|
671 |
+
v_measure_score : V-Measure (NMI with arithmetic mean option).
|
672 |
+
|
673 |
+
References
|
674 |
+
----------
|
675 |
+
|
676 |
+
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
|
677 |
+
conditional entropy-based external cluster evaluation measure
|
678 |
+
<https://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
|
679 |
+
|
680 |
+
Examples
|
681 |
+
--------
|
682 |
+
|
683 |
+
Perfect labelings are complete::
|
684 |
+
|
685 |
+
>>> from sklearn.metrics.cluster import completeness_score
|
686 |
+
>>> completeness_score([0, 0, 1, 1], [1, 1, 0, 0])
|
687 |
+
1.0
|
688 |
+
|
689 |
+
Non-perfect labelings that assign all classes members to the same clusters
|
690 |
+
are still complete::
|
691 |
+
|
692 |
+
>>> print(completeness_score([0, 0, 1, 1], [0, 0, 0, 0]))
|
693 |
+
1.0
|
694 |
+
>>> print(completeness_score([0, 1, 2, 3], [0, 0, 1, 1]))
|
695 |
+
0.999...
|
696 |
+
|
697 |
+
If classes members are split across different clusters, the
|
698 |
+
assignment cannot be complete::
|
699 |
+
|
700 |
+
>>> print(completeness_score([0, 0, 1, 1], [0, 1, 0, 1]))
|
701 |
+
0.0
|
702 |
+
>>> print(completeness_score([0, 0, 0, 0], [0, 1, 2, 3]))
|
703 |
+
0.0
|
704 |
+
"""
|
705 |
+
return homogeneity_completeness_v_measure(labels_true, labels_pred)[1]
|
706 |
+
|
707 |
+
|
708 |
+
@validate_params(
|
709 |
+
{
|
710 |
+
"labels_true": ["array-like"],
|
711 |
+
"labels_pred": ["array-like"],
|
712 |
+
"beta": [Interval(Real, 0, None, closed="left")],
|
713 |
+
},
|
714 |
+
prefer_skip_nested_validation=True,
|
715 |
+
)
|
716 |
+
def v_measure_score(labels_true, labels_pred, *, beta=1.0):
|
717 |
+
"""V-measure cluster labeling given a ground truth.
|
718 |
+
|
719 |
+
This score is identical to :func:`normalized_mutual_info_score` with
|
720 |
+
the ``'arithmetic'`` option for averaging.
|
721 |
+
|
722 |
+
The V-measure is the harmonic mean between homogeneity and completeness::
|
723 |
+
|
724 |
+
v = (1 + beta) * homogeneity * completeness
|
725 |
+
/ (beta * homogeneity + completeness)
|
726 |
+
|
727 |
+
This metric is independent of the absolute values of the labels:
|
728 |
+
a permutation of the class or cluster label values won't change the
|
729 |
+
score value in any way.
|
730 |
+
|
731 |
+
This metric is furthermore symmetric: switching ``label_true`` with
|
732 |
+
``label_pred`` will return the same score value. This can be useful to
|
733 |
+
measure the agreement of two independent label assignments strategies
|
734 |
+
on the same dataset when the real ground truth is not known.
|
735 |
+
|
736 |
+
Read more in the :ref:`User Guide <homogeneity_completeness>`.
|
737 |
+
|
738 |
+
Parameters
|
739 |
+
----------
|
740 |
+
labels_true : array-like of shape (n_samples,)
|
741 |
+
Ground truth class labels to be used as a reference.
|
742 |
+
|
743 |
+
labels_pred : array-like of shape (n_samples,)
|
744 |
+
Cluster labels to evaluate.
|
745 |
+
|
746 |
+
beta : float, default=1.0
|
747 |
+
Ratio of weight attributed to ``homogeneity`` vs ``completeness``.
|
748 |
+
If ``beta`` is greater than 1, ``completeness`` is weighted more
|
749 |
+
strongly in the calculation. If ``beta`` is less than 1,
|
750 |
+
``homogeneity`` is weighted more strongly.
|
751 |
+
|
752 |
+
Returns
|
753 |
+
-------
|
754 |
+
v_measure : float
|
755 |
+
Score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling.
|
756 |
+
|
757 |
+
See Also
|
758 |
+
--------
|
759 |
+
homogeneity_score : Homogeneity metric of cluster labeling.
|
760 |
+
completeness_score : Completeness metric of cluster labeling.
|
761 |
+
normalized_mutual_info_score : Normalized Mutual Information.
|
762 |
+
|
763 |
+
References
|
764 |
+
----------
|
765 |
+
|
766 |
+
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
|
767 |
+
conditional entropy-based external cluster evaluation measure
|
768 |
+
<https://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
|
769 |
+
|
770 |
+
Examples
|
771 |
+
--------
|
772 |
+
Perfect labelings are both homogeneous and complete, hence have score 1.0::
|
773 |
+
|
774 |
+
>>> from sklearn.metrics.cluster import v_measure_score
|
775 |
+
>>> v_measure_score([0, 0, 1, 1], [0, 0, 1, 1])
|
776 |
+
1.0
|
777 |
+
>>> v_measure_score([0, 0, 1, 1], [1, 1, 0, 0])
|
778 |
+
1.0
|
779 |
+
|
780 |
+
Labelings that assign all classes members to the same clusters
|
781 |
+
are complete but not homogeneous, hence penalized::
|
782 |
+
|
783 |
+
>>> print("%.6f" % v_measure_score([0, 0, 1, 2], [0, 0, 1, 1]))
|
784 |
+
0.8...
|
785 |
+
>>> print("%.6f" % v_measure_score([0, 1, 2, 3], [0, 0, 1, 1]))
|
786 |
+
0.66...
|
787 |
+
|
788 |
+
Labelings that have pure clusters with members coming from the same
|
789 |
+
classes are homogeneous but un-necessary splits harm completeness
|
790 |
+
and thus penalize V-measure as well::
|
791 |
+
|
792 |
+
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 1, 2]))
|
793 |
+
0.8...
|
794 |
+
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 1, 2, 3]))
|
795 |
+
0.66...
|
796 |
+
|
797 |
+
If classes members are completely split across different clusters,
|
798 |
+
the assignment is totally incomplete, hence the V-Measure is null::
|
799 |
+
|
800 |
+
>>> print("%.6f" % v_measure_score([0, 0, 0, 0], [0, 1, 2, 3]))
|
801 |
+
0.0...
|
802 |
+
|
803 |
+
Clusters that include samples from totally different classes totally
|
804 |
+
destroy the homogeneity of the labeling, hence::
|
805 |
+
|
806 |
+
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 0, 0]))
|
807 |
+
0.0...
|
808 |
+
"""
|
809 |
+
return homogeneity_completeness_v_measure(labels_true, labels_pred, beta=beta)[2]
|
810 |
+
|
811 |
+
|
812 |
+
@validate_params(
|
813 |
+
{
|
814 |
+
"labels_true": ["array-like", None],
|
815 |
+
"labels_pred": ["array-like", None],
|
816 |
+
"contingency": ["array-like", "sparse matrix", None],
|
817 |
+
},
|
818 |
+
prefer_skip_nested_validation=True,
|
819 |
+
)
|
820 |
+
def mutual_info_score(labels_true, labels_pred, *, contingency=None):
|
821 |
+
"""Mutual Information between two clusterings.
|
822 |
+
|
823 |
+
The Mutual Information is a measure of the similarity between two labels
|
824 |
+
of the same data. Where :math:`|U_i|` is the number of the samples
|
825 |
+
in cluster :math:`U_i` and :math:`|V_j|` is the number of the
|
826 |
+
samples in cluster :math:`V_j`, the Mutual Information
|
827 |
+
between clusterings :math:`U` and :math:`V` is given as:
|
828 |
+
|
829 |
+
.. math::
|
830 |
+
|
831 |
+
MI(U,V)=\\sum_{i=1}^{|U|} \\sum_{j=1}^{|V|} \\frac{|U_i\\cap V_j|}{N}
|
832 |
+
\\log\\frac{N|U_i \\cap V_j|}{|U_i||V_j|}
|
833 |
+
|
834 |
+
This metric is independent of the absolute values of the labels:
|
835 |
+
a permutation of the class or cluster label values won't change the
|
836 |
+
score value in any way.
|
837 |
+
|
838 |
+
This metric is furthermore symmetric: switching :math:`U` (i.e
|
839 |
+
``label_true``) with :math:`V` (i.e. ``label_pred``) will return the
|
840 |
+
same score value. This can be useful to measure the agreement of two
|
841 |
+
independent label assignments strategies on the same dataset when the
|
842 |
+
real ground truth is not known.
|
843 |
+
|
844 |
+
Read more in the :ref:`User Guide <mutual_info_score>`.
|
845 |
+
|
846 |
+
Parameters
|
847 |
+
----------
|
848 |
+
labels_true : array-like of shape (n_samples,), dtype=integral
|
849 |
+
A clustering of the data into disjoint subsets, called :math:`U` in
|
850 |
+
the above formula.
|
851 |
+
|
852 |
+
labels_pred : array-like of shape (n_samples,), dtype=integral
|
853 |
+
A clustering of the data into disjoint subsets, called :math:`V` in
|
854 |
+
the above formula.
|
855 |
+
|
856 |
+
contingency : {array-like, sparse matrix} of shape \
|
857 |
+
(n_classes_true, n_classes_pred), default=None
|
858 |
+
A contingency matrix given by the
|
859 |
+
:func:`~sklearn.metrics.cluster.contingency_matrix` function. If value
|
860 |
+
is ``None``, it will be computed, otherwise the given value is used,
|
861 |
+
with ``labels_true`` and ``labels_pred`` ignored.
|
862 |
+
|
863 |
+
Returns
|
864 |
+
-------
|
865 |
+
mi : float
|
866 |
+
Mutual information, a non-negative value, measured in nats using the
|
867 |
+
natural logarithm.
|
868 |
+
|
869 |
+
See Also
|
870 |
+
--------
|
871 |
+
adjusted_mutual_info_score : Adjusted against chance Mutual Information.
|
872 |
+
normalized_mutual_info_score : Normalized Mutual Information.
|
873 |
+
|
874 |
+
Notes
|
875 |
+
-----
|
876 |
+
The logarithm used is the natural logarithm (base-e).
|
877 |
+
|
878 |
+
Examples
|
879 |
+
--------
|
880 |
+
>>> from sklearn.metrics import mutual_info_score
|
881 |
+
>>> labels_true = [0, 1, 1, 0, 1, 0]
|
882 |
+
>>> labels_pred = [0, 1, 0, 0, 1, 1]
|
883 |
+
>>> mutual_info_score(labels_true, labels_pred)
|
884 |
+
0.056...
|
885 |
+
"""
|
886 |
+
if contingency is None:
|
887 |
+
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
|
888 |
+
contingency = contingency_matrix(labels_true, labels_pred, sparse=True)
|
889 |
+
else:
|
890 |
+
contingency = check_array(
|
891 |
+
contingency,
|
892 |
+
accept_sparse=["csr", "csc", "coo"],
|
893 |
+
dtype=[int, np.int32, np.int64],
|
894 |
+
)
|
895 |
+
|
896 |
+
if isinstance(contingency, np.ndarray):
|
897 |
+
# For an array
|
898 |
+
nzx, nzy = np.nonzero(contingency)
|
899 |
+
nz_val = contingency[nzx, nzy]
|
900 |
+
else:
|
901 |
+
# For a sparse matrix
|
902 |
+
nzx, nzy, nz_val = sp.find(contingency)
|
903 |
+
|
904 |
+
contingency_sum = contingency.sum()
|
905 |
+
pi = np.ravel(contingency.sum(axis=1))
|
906 |
+
pj = np.ravel(contingency.sum(axis=0))
|
907 |
+
|
908 |
+
# Since MI <= min(H(X), H(Y)), any labelling with zero entropy, i.e. containing a
|
909 |
+
# single cluster, implies MI = 0
|
910 |
+
if pi.size == 1 or pj.size == 1:
|
911 |
+
return 0.0
|
912 |
+
|
913 |
+
log_contingency_nm = np.log(nz_val)
|
914 |
+
contingency_nm = nz_val / contingency_sum
|
915 |
+
# Don't need to calculate the full outer product, just for non-zeroes
|
916 |
+
outer = pi.take(nzx).astype(np.int64, copy=False) * pj.take(nzy).astype(
|
917 |
+
np.int64, copy=False
|
918 |
+
)
|
919 |
+
log_outer = -np.log(outer) + log(pi.sum()) + log(pj.sum())
|
920 |
+
mi = (
|
921 |
+
contingency_nm * (log_contingency_nm - log(contingency_sum))
|
922 |
+
+ contingency_nm * log_outer
|
923 |
+
)
|
924 |
+
mi = np.where(np.abs(mi) < np.finfo(mi.dtype).eps, 0.0, mi)
|
925 |
+
return np.clip(mi.sum(), 0.0, None)
|
926 |
+
|
927 |
+
|
928 |
+
@validate_params(
|
929 |
+
{
|
930 |
+
"labels_true": ["array-like"],
|
931 |
+
"labels_pred": ["array-like"],
|
932 |
+
"average_method": [StrOptions({"arithmetic", "max", "min", "geometric"})],
|
933 |
+
},
|
934 |
+
prefer_skip_nested_validation=True,
|
935 |
+
)
|
936 |
+
def adjusted_mutual_info_score(
|
937 |
+
labels_true, labels_pred, *, average_method="arithmetic"
|
938 |
+
):
|
939 |
+
"""Adjusted Mutual Information between two clusterings.
|
940 |
+
|
941 |
+
Adjusted Mutual Information (AMI) is an adjustment of the Mutual
|
942 |
+
Information (MI) score to account for chance. It accounts for the fact that
|
943 |
+
the MI is generally higher for two clusterings with a larger number of
|
944 |
+
clusters, regardless of whether there is actually more information shared.
|
945 |
+
For two clusterings :math:`U` and :math:`V`, the AMI is given as::
|
946 |
+
|
947 |
+
AMI(U, V) = [MI(U, V) - E(MI(U, V))] / [avg(H(U), H(V)) - E(MI(U, V))]
|
948 |
+
|
949 |
+
This metric is independent of the absolute values of the labels:
|
950 |
+
a permutation of the class or cluster label values won't change the
|
951 |
+
score value in any way.
|
952 |
+
|
953 |
+
This metric is furthermore symmetric: switching :math:`U` (``label_true``)
|
954 |
+
with :math:`V` (``labels_pred``) will return the same score value. This can
|
955 |
+
be useful to measure the agreement of two independent label assignments
|
956 |
+
strategies on the same dataset when the real ground truth is not known.
|
957 |
+
|
958 |
+
Be mindful that this function is an order of magnitude slower than other
|
959 |
+
metrics, such as the Adjusted Rand Index.
|
960 |
+
|
961 |
+
Read more in the :ref:`User Guide <mutual_info_score>`.
|
962 |
+
|
963 |
+
Parameters
|
964 |
+
----------
|
965 |
+
labels_true : int array-like of shape (n_samples,)
|
966 |
+
A clustering of the data into disjoint subsets, called :math:`U` in
|
967 |
+
the above formula.
|
968 |
+
|
969 |
+
labels_pred : int array-like of shape (n_samples,)
|
970 |
+
A clustering of the data into disjoint subsets, called :math:`V` in
|
971 |
+
the above formula.
|
972 |
+
|
973 |
+
average_method : {'min', 'geometric', 'arithmetic', 'max'}, default='arithmetic'
|
974 |
+
How to compute the normalizer in the denominator.
|
975 |
+
|
976 |
+
.. versionadded:: 0.20
|
977 |
+
|
978 |
+
.. versionchanged:: 0.22
|
979 |
+
The default value of ``average_method`` changed from 'max' to
|
980 |
+
'arithmetic'.
|
981 |
+
|
982 |
+
Returns
|
983 |
+
-------
|
984 |
+
ami: float (upperlimited by 1.0)
|
985 |
+
The AMI returns a value of 1 when the two partitions are identical
|
986 |
+
(ie perfectly matched). Random partitions (independent labellings) have
|
987 |
+
an expected AMI around 0 on average hence can be negative. The value is
|
988 |
+
in adjusted nats (based on the natural logarithm).
|
989 |
+
|
990 |
+
See Also
|
991 |
+
--------
|
992 |
+
adjusted_rand_score : Adjusted Rand Index.
|
993 |
+
mutual_info_score : Mutual Information (not adjusted for chance).
|
994 |
+
|
995 |
+
References
|
996 |
+
----------
|
997 |
+
.. [1] `Vinh, Epps, and Bailey, (2010). Information Theoretic Measures for
|
998 |
+
Clusterings Comparison: Variants, Properties, Normalization and
|
999 |
+
Correction for Chance, JMLR
|
1000 |
+
<http://jmlr.csail.mit.edu/papers/volume11/vinh10a/vinh10a.pdf>`_
|
1001 |
+
|
1002 |
+
.. [2] `Wikipedia entry for the Adjusted Mutual Information
|
1003 |
+
<https://en.wikipedia.org/wiki/Adjusted_Mutual_Information>`_
|
1004 |
+
|
1005 |
+
Examples
|
1006 |
+
--------
|
1007 |
+
|
1008 |
+
Perfect labelings are both homogeneous and complete, hence have
|
1009 |
+
score 1.0::
|
1010 |
+
|
1011 |
+
>>> from sklearn.metrics.cluster import adjusted_mutual_info_score
|
1012 |
+
>>> adjusted_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
|
1013 |
+
... # doctest: +SKIP
|
1014 |
+
1.0
|
1015 |
+
>>> adjusted_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
|
1016 |
+
... # doctest: +SKIP
|
1017 |
+
1.0
|
1018 |
+
|
1019 |
+
If classes members are completely split across different clusters,
|
1020 |
+
the assignment is totally in-complete, hence the AMI is null::
|
1021 |
+
|
1022 |
+
>>> adjusted_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
|
1023 |
+
... # doctest: +SKIP
|
1024 |
+
0.0
|
1025 |
+
"""
|
1026 |
+
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
|
1027 |
+
n_samples = labels_true.shape[0]
|
1028 |
+
classes = np.unique(labels_true)
|
1029 |
+
clusters = np.unique(labels_pred)
|
1030 |
+
|
1031 |
+
# Special limit cases: no clustering since the data is not split.
|
1032 |
+
# It corresponds to both labellings having zero entropy.
|
1033 |
+
# This is a perfect match hence return 1.0.
|
1034 |
+
if (
|
1035 |
+
classes.shape[0] == clusters.shape[0] == 1
|
1036 |
+
or classes.shape[0] == clusters.shape[0] == 0
|
1037 |
+
):
|
1038 |
+
return 1.0
|
1039 |
+
|
1040 |
+
contingency = contingency_matrix(labels_true, labels_pred, sparse=True)
|
1041 |
+
# Calculate the MI for the two clusterings
|
1042 |
+
mi = mutual_info_score(labels_true, labels_pred, contingency=contingency)
|
1043 |
+
# Calculate the expected value for the mutual information
|
1044 |
+
emi = expected_mutual_information(contingency, n_samples)
|
1045 |
+
# Calculate entropy for each labeling
|
1046 |
+
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
|
1047 |
+
normalizer = _generalized_average(h_true, h_pred, average_method)
|
1048 |
+
denominator = normalizer - emi
|
1049 |
+
# Avoid 0.0 / 0.0 when expectation equals maximum, i.e. a perfect match.
|
1050 |
+
# normalizer should always be >= emi, but because of floating-point
|
1051 |
+
# representation, sometimes emi is slightly larger. Correct this
|
1052 |
+
# by preserving the sign.
|
1053 |
+
if denominator < 0:
|
1054 |
+
denominator = min(denominator, -np.finfo("float64").eps)
|
1055 |
+
else:
|
1056 |
+
denominator = max(denominator, np.finfo("float64").eps)
|
1057 |
+
ami = (mi - emi) / denominator
|
1058 |
+
return ami
|
1059 |
+
|
1060 |
+
|
1061 |
+
@validate_params(
|
1062 |
+
{
|
1063 |
+
"labels_true": ["array-like"],
|
1064 |
+
"labels_pred": ["array-like"],
|
1065 |
+
"average_method": [StrOptions({"arithmetic", "max", "min", "geometric"})],
|
1066 |
+
},
|
1067 |
+
prefer_skip_nested_validation=True,
|
1068 |
+
)
|
1069 |
+
def normalized_mutual_info_score(
|
1070 |
+
labels_true, labels_pred, *, average_method="arithmetic"
|
1071 |
+
):
|
1072 |
+
"""Normalized Mutual Information between two clusterings.
|
1073 |
+
|
1074 |
+
Normalized Mutual Information (NMI) is a normalization of the Mutual
|
1075 |
+
Information (MI) score to scale the results between 0 (no mutual
|
1076 |
+
information) and 1 (perfect correlation). In this function, mutual
|
1077 |
+
information is normalized by some generalized mean of ``H(labels_true)``
|
1078 |
+
and ``H(labels_pred))``, defined by the `average_method`.
|
1079 |
+
|
1080 |
+
This measure is not adjusted for chance. Therefore
|
1081 |
+
:func:`adjusted_mutual_info_score` might be preferred.
|
1082 |
+
|
1083 |
+
This metric is independent of the absolute values of the labels:
|
1084 |
+
a permutation of the class or cluster label values won't change the
|
1085 |
+
score value in any way.
|
1086 |
+
|
1087 |
+
This metric is furthermore symmetric: switching ``label_true`` with
|
1088 |
+
``label_pred`` will return the same score value. This can be useful to
|
1089 |
+
measure the agreement of two independent label assignments strategies
|
1090 |
+
on the same dataset when the real ground truth is not known.
|
1091 |
+
|
1092 |
+
Read more in the :ref:`User Guide <mutual_info_score>`.
|
1093 |
+
|
1094 |
+
Parameters
|
1095 |
+
----------
|
1096 |
+
labels_true : int array-like of shape (n_samples,)
|
1097 |
+
A clustering of the data into disjoint subsets.
|
1098 |
+
|
1099 |
+
labels_pred : int array-like of shape (n_samples,)
|
1100 |
+
A clustering of the data into disjoint subsets.
|
1101 |
+
|
1102 |
+
average_method : {'min', 'geometric', 'arithmetic', 'max'}, default='arithmetic'
|
1103 |
+
How to compute the normalizer in the denominator.
|
1104 |
+
|
1105 |
+
.. versionadded:: 0.20
|
1106 |
+
|
1107 |
+
.. versionchanged:: 0.22
|
1108 |
+
The default value of ``average_method`` changed from 'geometric' to
|
1109 |
+
'arithmetic'.
|
1110 |
+
|
1111 |
+
Returns
|
1112 |
+
-------
|
1113 |
+
nmi : float
|
1114 |
+
Score between 0.0 and 1.0 in normalized nats (based on the natural
|
1115 |
+
logarithm). 1.0 stands for perfectly complete labeling.
|
1116 |
+
|
1117 |
+
See Also
|
1118 |
+
--------
|
1119 |
+
v_measure_score : V-Measure (NMI with arithmetic mean option).
|
1120 |
+
adjusted_rand_score : Adjusted Rand Index.
|
1121 |
+
adjusted_mutual_info_score : Adjusted Mutual Information (adjusted
|
1122 |
+
against chance).
|
1123 |
+
|
1124 |
+
Examples
|
1125 |
+
--------
|
1126 |
+
|
1127 |
+
Perfect labelings are both homogeneous and complete, hence have
|
1128 |
+
score 1.0::
|
1129 |
+
|
1130 |
+
>>> from sklearn.metrics.cluster import normalized_mutual_info_score
|
1131 |
+
>>> normalized_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
|
1132 |
+
... # doctest: +SKIP
|
1133 |
+
1.0
|
1134 |
+
>>> normalized_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
|
1135 |
+
... # doctest: +SKIP
|
1136 |
+
1.0
|
1137 |
+
|
1138 |
+
If classes members are completely split across different clusters,
|
1139 |
+
the assignment is totally in-complete, hence the NMI is null::
|
1140 |
+
|
1141 |
+
>>> normalized_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
|
1142 |
+
... # doctest: +SKIP
|
1143 |
+
0.0
|
1144 |
+
"""
|
1145 |
+
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
|
1146 |
+
classes = np.unique(labels_true)
|
1147 |
+
clusters = np.unique(labels_pred)
|
1148 |
+
|
1149 |
+
# Special limit cases: no clustering since the data is not split.
|
1150 |
+
# It corresponds to both labellings having zero entropy.
|
1151 |
+
# This is a perfect match hence return 1.0.
|
1152 |
+
if (
|
1153 |
+
classes.shape[0] == clusters.shape[0] == 1
|
1154 |
+
or classes.shape[0] == clusters.shape[0] == 0
|
1155 |
+
):
|
1156 |
+
return 1.0
|
1157 |
+
|
1158 |
+
contingency = contingency_matrix(labels_true, labels_pred, sparse=True)
|
1159 |
+
contingency = contingency.astype(np.float64, copy=False)
|
1160 |
+
# Calculate the MI for the two clusterings
|
1161 |
+
mi = mutual_info_score(labels_true, labels_pred, contingency=contingency)
|
1162 |
+
|
1163 |
+
# At this point mi = 0 can't be a perfect match (the special case of a single
|
1164 |
+
# cluster has been dealt with before). Hence, if mi = 0, the nmi must be 0 whatever
|
1165 |
+
# the normalization.
|
1166 |
+
if mi == 0:
|
1167 |
+
return 0.0
|
1168 |
+
|
1169 |
+
# Calculate entropy for each labeling
|
1170 |
+
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
|
1171 |
+
|
1172 |
+
normalizer = _generalized_average(h_true, h_pred, average_method)
|
1173 |
+
return mi / normalizer
|
1174 |
+
|
1175 |
+
|
1176 |
+
@validate_params(
|
1177 |
+
{
|
1178 |
+
"labels_true": ["array-like"],
|
1179 |
+
"labels_pred": ["array-like"],
|
1180 |
+
"sparse": ["boolean"],
|
1181 |
+
},
|
1182 |
+
prefer_skip_nested_validation=True,
|
1183 |
+
)
|
1184 |
+
def fowlkes_mallows_score(labels_true, labels_pred, *, sparse=False):
|
1185 |
+
"""Measure the similarity of two clusterings of a set of points.
|
1186 |
+
|
1187 |
+
.. versionadded:: 0.18
|
1188 |
+
|
1189 |
+
The Fowlkes-Mallows index (FMI) is defined as the geometric mean between of
|
1190 |
+
the precision and recall::
|
1191 |
+
|
1192 |
+
FMI = TP / sqrt((TP + FP) * (TP + FN))
|
1193 |
+
|
1194 |
+
Where ``TP`` is the number of **True Positive** (i.e. the number of pair of
|
1195 |
+
points that belongs in the same clusters in both ``labels_true`` and
|
1196 |
+
``labels_pred``), ``FP`` is the number of **False Positive** (i.e. the
|
1197 |
+
number of pair of points that belongs in the same clusters in
|
1198 |
+
``labels_true`` and not in ``labels_pred``) and ``FN`` is the number of
|
1199 |
+
**False Negative** (i.e. the number of pair of points that belongs in the
|
1200 |
+
same clusters in ``labels_pred`` and not in ``labels_True``).
|
1201 |
+
|
1202 |
+
The score ranges from 0 to 1. A high value indicates a good similarity
|
1203 |
+
between two clusters.
|
1204 |
+
|
1205 |
+
Read more in the :ref:`User Guide <fowlkes_mallows_scores>`.
|
1206 |
+
|
1207 |
+
Parameters
|
1208 |
+
----------
|
1209 |
+
labels_true : array-like of shape (n_samples,), dtype=int
|
1210 |
+
A clustering of the data into disjoint subsets.
|
1211 |
+
|
1212 |
+
labels_pred : array-like of shape (n_samples,), dtype=int
|
1213 |
+
A clustering of the data into disjoint subsets.
|
1214 |
+
|
1215 |
+
sparse : bool, default=False
|
1216 |
+
Compute contingency matrix internally with sparse matrix.
|
1217 |
+
|
1218 |
+
Returns
|
1219 |
+
-------
|
1220 |
+
score : float
|
1221 |
+
The resulting Fowlkes-Mallows score.
|
1222 |
+
|
1223 |
+
References
|
1224 |
+
----------
|
1225 |
+
.. [1] `E. B. Fowkles and C. L. Mallows, 1983. "A method for comparing two
|
1226 |
+
hierarchical clusterings". Journal of the American Statistical
|
1227 |
+
Association
|
1228 |
+
<https://www.tandfonline.com/doi/abs/10.1080/01621459.1983.10478008>`_
|
1229 |
+
|
1230 |
+
.. [2] `Wikipedia entry for the Fowlkes-Mallows Index
|
1231 |
+
<https://en.wikipedia.org/wiki/Fowlkes-Mallows_index>`_
|
1232 |
+
|
1233 |
+
Examples
|
1234 |
+
--------
|
1235 |
+
|
1236 |
+
Perfect labelings are both homogeneous and complete, hence have
|
1237 |
+
score 1.0::
|
1238 |
+
|
1239 |
+
>>> from sklearn.metrics.cluster import fowlkes_mallows_score
|
1240 |
+
>>> fowlkes_mallows_score([0, 0, 1, 1], [0, 0, 1, 1])
|
1241 |
+
1.0
|
1242 |
+
>>> fowlkes_mallows_score([0, 0, 1, 1], [1, 1, 0, 0])
|
1243 |
+
1.0
|
1244 |
+
|
1245 |
+
If classes members are completely split across different clusters,
|
1246 |
+
the assignment is totally random, hence the FMI is null::
|
1247 |
+
|
1248 |
+
>>> fowlkes_mallows_score([0, 0, 0, 0], [0, 1, 2, 3])
|
1249 |
+
0.0
|
1250 |
+
"""
|
1251 |
+
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
|
1252 |
+
(n_samples,) = labels_true.shape
|
1253 |
+
|
1254 |
+
c = contingency_matrix(labels_true, labels_pred, sparse=True)
|
1255 |
+
c = c.astype(np.int64, copy=False)
|
1256 |
+
tk = np.dot(c.data, c.data) - n_samples
|
1257 |
+
pk = np.sum(np.asarray(c.sum(axis=0)).ravel() ** 2) - n_samples
|
1258 |
+
qk = np.sum(np.asarray(c.sum(axis=1)).ravel() ** 2) - n_samples
|
1259 |
+
return np.sqrt(tk / pk) * np.sqrt(tk / qk) if tk != 0.0 else 0.0
|
1260 |
+
|
1261 |
+
|
1262 |
+
@validate_params(
|
1263 |
+
{
|
1264 |
+
"labels": ["array-like"],
|
1265 |
+
},
|
1266 |
+
prefer_skip_nested_validation=True,
|
1267 |
+
)
|
1268 |
+
def entropy(labels):
|
1269 |
+
"""Calculate the entropy for a labeling.
|
1270 |
+
|
1271 |
+
Parameters
|
1272 |
+
----------
|
1273 |
+
labels : array-like of shape (n_samples,), dtype=int
|
1274 |
+
The labels.
|
1275 |
+
|
1276 |
+
Returns
|
1277 |
+
-------
|
1278 |
+
entropy : float
|
1279 |
+
The entropy for a labeling.
|
1280 |
+
|
1281 |
+
Notes
|
1282 |
+
-----
|
1283 |
+
The logarithm used is the natural logarithm (base-e).
|
1284 |
+
"""
|
1285 |
+
if len(labels) == 0:
|
1286 |
+
return 1.0
|
1287 |
+
label_idx = np.unique(labels, return_inverse=True)[1]
|
1288 |
+
pi = np.bincount(label_idx).astype(np.float64)
|
1289 |
+
pi = pi[pi > 0]
|
1290 |
+
|
1291 |
+
# single cluster => zero entropy
|
1292 |
+
if pi.size == 1:
|
1293 |
+
return 0.0
|
1294 |
+
|
1295 |
+
pi_sum = np.sum(pi)
|
1296 |
+
# log(a / b) should be calculated as log(a) - log(b) for
|
1297 |
+
# possible loss of precision
|
1298 |
+
return -np.sum((pi / pi_sum) * (np.log(pi) - log(pi_sum)))
|
venv/lib/python3.10/site-packages/sklearn/metrics/cluster/tests/__init__.py
ADDED
File without changes
|
venv/lib/python3.10/site-packages/sklearn/metrics/cluster/tests/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (197 Bytes). View file
|
|
venv/lib/python3.10/site-packages/sklearn/metrics/cluster/tests/__pycache__/test_bicluster.cpython-310.pyc
ADDED
Binary file (1.58 kB). View file
|
|
venv/lib/python3.10/site-packages/sklearn/metrics/cluster/tests/__pycache__/test_common.cpython-310.pyc
ADDED
Binary file (5.55 kB). View file
|
|
venv/lib/python3.10/site-packages/sklearn/metrics/cluster/tests/__pycache__/test_supervised.cpython-310.pyc
ADDED
Binary file (14.1 kB). View file
|
|
venv/lib/python3.10/site-packages/sklearn/metrics/cluster/tests/__pycache__/test_unsupervised.cpython-310.pyc
ADDED
Binary file (9.11 kB). View file
|
|
venv/lib/python3.10/site-packages/sklearn/metrics/cluster/tests/test_bicluster.py
ADDED
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Testing for bicluster metrics module"""
|
2 |
+
|
3 |
+
import numpy as np
|
4 |
+
|
5 |
+
from sklearn.metrics import consensus_score
|
6 |
+
from sklearn.metrics.cluster._bicluster import _jaccard
|
7 |
+
from sklearn.utils._testing import assert_almost_equal
|
8 |
+
|
9 |
+
|
10 |
+
def test_jaccard():
|
11 |
+
a1 = np.array([True, True, False, False])
|
12 |
+
a2 = np.array([True, True, True, True])
|
13 |
+
a3 = np.array([False, True, True, False])
|
14 |
+
a4 = np.array([False, False, True, True])
|
15 |
+
|
16 |
+
assert _jaccard(a1, a1, a1, a1) == 1
|
17 |
+
assert _jaccard(a1, a1, a2, a2) == 0.25
|
18 |
+
assert _jaccard(a1, a1, a3, a3) == 1.0 / 7
|
19 |
+
assert _jaccard(a1, a1, a4, a4) == 0
|
20 |
+
|
21 |
+
|
22 |
+
def test_consensus_score():
|
23 |
+
a = [[True, True, False, False], [False, False, True, True]]
|
24 |
+
b = a[::-1]
|
25 |
+
|
26 |
+
assert consensus_score((a, a), (a, a)) == 1
|
27 |
+
assert consensus_score((a, a), (b, b)) == 1
|
28 |
+
assert consensus_score((a, b), (a, b)) == 1
|
29 |
+
assert consensus_score((a, b), (b, a)) == 1
|
30 |
+
|
31 |
+
assert consensus_score((a, a), (b, a)) == 0
|
32 |
+
assert consensus_score((a, a), (a, b)) == 0
|
33 |
+
assert consensus_score((b, b), (a, b)) == 0
|
34 |
+
assert consensus_score((b, b), (b, a)) == 0
|
35 |
+
|
36 |
+
|
37 |
+
def test_consensus_score_issue2445():
|
38 |
+
"""Different number of biclusters in A and B"""
|
39 |
+
a_rows = np.array(
|
40 |
+
[
|
41 |
+
[True, True, False, False],
|
42 |
+
[False, False, True, True],
|
43 |
+
[False, False, False, True],
|
44 |
+
]
|
45 |
+
)
|
46 |
+
a_cols = np.array(
|
47 |
+
[
|
48 |
+
[True, True, False, False],
|
49 |
+
[False, False, True, True],
|
50 |
+
[False, False, False, True],
|
51 |
+
]
|
52 |
+
)
|
53 |
+
idx = [0, 2]
|
54 |
+
s = consensus_score((a_rows, a_cols), (a_rows[idx], a_cols[idx]))
|
55 |
+
# B contains 2 of the 3 biclusters in A, so score should be 2/3
|
56 |
+
assert_almost_equal(s, 2.0 / 3.0)
|
venv/lib/python3.10/site-packages/sklearn/metrics/cluster/tests/test_common.py
ADDED
@@ -0,0 +1,219 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from functools import partial
|
2 |
+
from itertools import chain
|
3 |
+
|
4 |
+
import numpy as np
|
5 |
+
import pytest
|
6 |
+
|
7 |
+
from sklearn.metrics.cluster import (
|
8 |
+
adjusted_mutual_info_score,
|
9 |
+
adjusted_rand_score,
|
10 |
+
calinski_harabasz_score,
|
11 |
+
completeness_score,
|
12 |
+
davies_bouldin_score,
|
13 |
+
fowlkes_mallows_score,
|
14 |
+
homogeneity_score,
|
15 |
+
mutual_info_score,
|
16 |
+
normalized_mutual_info_score,
|
17 |
+
rand_score,
|
18 |
+
silhouette_score,
|
19 |
+
v_measure_score,
|
20 |
+
)
|
21 |
+
from sklearn.utils._testing import assert_allclose
|
22 |
+
|
23 |
+
# Dictionaries of metrics
|
24 |
+
# ------------------------
|
25 |
+
# The goal of having those dictionaries is to have an easy way to call a
|
26 |
+
# particular metric and associate a name to each function:
|
27 |
+
# - SUPERVISED_METRICS: all supervised cluster metrics - (when given a
|
28 |
+
# ground truth value)
|
29 |
+
# - UNSUPERVISED_METRICS: all unsupervised cluster metrics
|
30 |
+
#
|
31 |
+
# Those dictionaries will be used to test systematically some invariance
|
32 |
+
# properties, e.g. invariance toward several input layout.
|
33 |
+
#
|
34 |
+
|
35 |
+
SUPERVISED_METRICS = {
|
36 |
+
"adjusted_mutual_info_score": adjusted_mutual_info_score,
|
37 |
+
"adjusted_rand_score": adjusted_rand_score,
|
38 |
+
"rand_score": rand_score,
|
39 |
+
"completeness_score": completeness_score,
|
40 |
+
"homogeneity_score": homogeneity_score,
|
41 |
+
"mutual_info_score": mutual_info_score,
|
42 |
+
"normalized_mutual_info_score": normalized_mutual_info_score,
|
43 |
+
"v_measure_score": v_measure_score,
|
44 |
+
"fowlkes_mallows_score": fowlkes_mallows_score,
|
45 |
+
}
|
46 |
+
|
47 |
+
UNSUPERVISED_METRICS = {
|
48 |
+
"silhouette_score": silhouette_score,
|
49 |
+
"silhouette_manhattan": partial(silhouette_score, metric="manhattan"),
|
50 |
+
"calinski_harabasz_score": calinski_harabasz_score,
|
51 |
+
"davies_bouldin_score": davies_bouldin_score,
|
52 |
+
}
|
53 |
+
|
54 |
+
# Lists of metrics with common properties
|
55 |
+
# ---------------------------------------
|
56 |
+
# Lists of metrics with common properties are used to test systematically some
|
57 |
+
# functionalities and invariance, e.g. SYMMETRIC_METRICS lists all metrics
|
58 |
+
# that are symmetric with respect to their input argument y_true and y_pred.
|
59 |
+
#
|
60 |
+
# --------------------------------------------------------------------
|
61 |
+
# Symmetric with respect to their input arguments y_true and y_pred.
|
62 |
+
# Symmetric metrics only apply to supervised clusters.
|
63 |
+
SYMMETRIC_METRICS = [
|
64 |
+
"adjusted_rand_score",
|
65 |
+
"rand_score",
|
66 |
+
"v_measure_score",
|
67 |
+
"mutual_info_score",
|
68 |
+
"adjusted_mutual_info_score",
|
69 |
+
"normalized_mutual_info_score",
|
70 |
+
"fowlkes_mallows_score",
|
71 |
+
]
|
72 |
+
|
73 |
+
NON_SYMMETRIC_METRICS = ["homogeneity_score", "completeness_score"]
|
74 |
+
|
75 |
+
# Metrics whose upper bound is 1
|
76 |
+
NORMALIZED_METRICS = [
|
77 |
+
"adjusted_rand_score",
|
78 |
+
"rand_score",
|
79 |
+
"homogeneity_score",
|
80 |
+
"completeness_score",
|
81 |
+
"v_measure_score",
|
82 |
+
"adjusted_mutual_info_score",
|
83 |
+
"fowlkes_mallows_score",
|
84 |
+
"normalized_mutual_info_score",
|
85 |
+
]
|
86 |
+
|
87 |
+
|
88 |
+
rng = np.random.RandomState(0)
|
89 |
+
y1 = rng.randint(3, size=30)
|
90 |
+
y2 = rng.randint(3, size=30)
|
91 |
+
|
92 |
+
|
93 |
+
def test_symmetric_non_symmetric_union():
|
94 |
+
assert sorted(SYMMETRIC_METRICS + NON_SYMMETRIC_METRICS) == sorted(
|
95 |
+
SUPERVISED_METRICS
|
96 |
+
)
|
97 |
+
|
98 |
+
|
99 |
+
# 0.22 AMI and NMI changes
|
100 |
+
@pytest.mark.filterwarnings("ignore::FutureWarning")
|
101 |
+
@pytest.mark.parametrize(
|
102 |
+
"metric_name, y1, y2", [(name, y1, y2) for name in SYMMETRIC_METRICS]
|
103 |
+
)
|
104 |
+
def test_symmetry(metric_name, y1, y2):
|
105 |
+
metric = SUPERVISED_METRICS[metric_name]
|
106 |
+
assert metric(y1, y2) == pytest.approx(metric(y2, y1))
|
107 |
+
|
108 |
+
|
109 |
+
@pytest.mark.parametrize(
|
110 |
+
"metric_name, y1, y2", [(name, y1, y2) for name in NON_SYMMETRIC_METRICS]
|
111 |
+
)
|
112 |
+
def test_non_symmetry(metric_name, y1, y2):
|
113 |
+
metric = SUPERVISED_METRICS[metric_name]
|
114 |
+
assert metric(y1, y2) != pytest.approx(metric(y2, y1))
|
115 |
+
|
116 |
+
|
117 |
+
# 0.22 AMI and NMI changes
|
118 |
+
@pytest.mark.filterwarnings("ignore::FutureWarning")
|
119 |
+
@pytest.mark.parametrize("metric_name", NORMALIZED_METRICS)
|
120 |
+
def test_normalized_output(metric_name):
|
121 |
+
upper_bound_1 = [0, 0, 0, 1, 1, 1]
|
122 |
+
upper_bound_2 = [0, 0, 0, 1, 1, 1]
|
123 |
+
metric = SUPERVISED_METRICS[metric_name]
|
124 |
+
assert metric([0, 0, 0, 1, 1], [0, 0, 0, 1, 2]) > 0.0
|
125 |
+
assert metric([0, 0, 1, 1, 2], [0, 0, 1, 1, 1]) > 0.0
|
126 |
+
assert metric([0, 0, 0, 1, 2], [0, 1, 1, 1, 1]) < 1.0
|
127 |
+
assert metric([0, 0, 0, 1, 2], [0, 1, 1, 1, 1]) < 1.0
|
128 |
+
assert metric(upper_bound_1, upper_bound_2) == pytest.approx(1.0)
|
129 |
+
|
130 |
+
lower_bound_1 = [0, 0, 0, 0, 0, 0]
|
131 |
+
lower_bound_2 = [0, 1, 2, 3, 4, 5]
|
132 |
+
score = np.array(
|
133 |
+
[metric(lower_bound_1, lower_bound_2), metric(lower_bound_2, lower_bound_1)]
|
134 |
+
)
|
135 |
+
assert not (score < 0).any()
|
136 |
+
|
137 |
+
|
138 |
+
# 0.22 AMI and NMI changes
|
139 |
+
@pytest.mark.filterwarnings("ignore::FutureWarning")
|
140 |
+
@pytest.mark.parametrize("metric_name", chain(SUPERVISED_METRICS, UNSUPERVISED_METRICS))
|
141 |
+
def test_permute_labels(metric_name):
|
142 |
+
# All clustering metrics do not change score due to permutations of labels
|
143 |
+
# that is when 0 and 1 exchanged.
|
144 |
+
y_label = np.array([0, 0, 0, 1, 1, 0, 1])
|
145 |
+
y_pred = np.array([1, 0, 1, 0, 1, 1, 0])
|
146 |
+
if metric_name in SUPERVISED_METRICS:
|
147 |
+
metric = SUPERVISED_METRICS[metric_name]
|
148 |
+
score_1 = metric(y_pred, y_label)
|
149 |
+
assert_allclose(score_1, metric(1 - y_pred, y_label))
|
150 |
+
assert_allclose(score_1, metric(1 - y_pred, 1 - y_label))
|
151 |
+
assert_allclose(score_1, metric(y_pred, 1 - y_label))
|
152 |
+
else:
|
153 |
+
metric = UNSUPERVISED_METRICS[metric_name]
|
154 |
+
X = np.random.randint(10, size=(7, 10))
|
155 |
+
score_1 = metric(X, y_pred)
|
156 |
+
assert_allclose(score_1, metric(X, 1 - y_pred))
|
157 |
+
|
158 |
+
|
159 |
+
# 0.22 AMI and NMI changes
|
160 |
+
@pytest.mark.filterwarnings("ignore::FutureWarning")
|
161 |
+
@pytest.mark.parametrize("metric_name", chain(SUPERVISED_METRICS, UNSUPERVISED_METRICS))
|
162 |
+
# For all clustering metrics Input parameters can be both
|
163 |
+
# in the form of arrays lists, positive, negative or string
|
164 |
+
def test_format_invariance(metric_name):
|
165 |
+
y_true = [0, 0, 0, 0, 1, 1, 1, 1]
|
166 |
+
y_pred = [0, 1, 2, 3, 4, 5, 6, 7]
|
167 |
+
|
168 |
+
def generate_formats(y):
|
169 |
+
y = np.array(y)
|
170 |
+
yield y, "array of ints"
|
171 |
+
yield y.tolist(), "list of ints"
|
172 |
+
yield [str(x) + "-a" for x in y.tolist()], "list of strs"
|
173 |
+
yield (
|
174 |
+
np.array([str(x) + "-a" for x in y.tolist()], dtype=object),
|
175 |
+
"array of strs",
|
176 |
+
)
|
177 |
+
yield y - 1, "including negative ints"
|
178 |
+
yield y + 1, "strictly positive ints"
|
179 |
+
|
180 |
+
if metric_name in SUPERVISED_METRICS:
|
181 |
+
metric = SUPERVISED_METRICS[metric_name]
|
182 |
+
score_1 = metric(y_true, y_pred)
|
183 |
+
y_true_gen = generate_formats(y_true)
|
184 |
+
y_pred_gen = generate_formats(y_pred)
|
185 |
+
for (y_true_fmt, fmt_name), (y_pred_fmt, _) in zip(y_true_gen, y_pred_gen):
|
186 |
+
assert score_1 == metric(y_true_fmt, y_pred_fmt)
|
187 |
+
else:
|
188 |
+
metric = UNSUPERVISED_METRICS[metric_name]
|
189 |
+
X = np.random.randint(10, size=(8, 10))
|
190 |
+
score_1 = metric(X, y_true)
|
191 |
+
assert score_1 == metric(X.astype(float), y_true)
|
192 |
+
y_true_gen = generate_formats(y_true)
|
193 |
+
for y_true_fmt, fmt_name in y_true_gen:
|
194 |
+
assert score_1 == metric(X, y_true_fmt)
|
195 |
+
|
196 |
+
|
197 |
+
@pytest.mark.parametrize("metric", SUPERVISED_METRICS.values())
|
198 |
+
def test_single_sample(metric):
|
199 |
+
# only the supervised metrics support single sample
|
200 |
+
for i, j in [(0, 0), (0, 1), (1, 0), (1, 1)]:
|
201 |
+
metric([i], [j])
|
202 |
+
|
203 |
+
|
204 |
+
@pytest.mark.parametrize(
|
205 |
+
"metric_name, metric_func", dict(SUPERVISED_METRICS, **UNSUPERVISED_METRICS).items()
|
206 |
+
)
|
207 |
+
def test_inf_nan_input(metric_name, metric_func):
|
208 |
+
if metric_name in SUPERVISED_METRICS:
|
209 |
+
invalids = [
|
210 |
+
([0, 1], [np.inf, np.inf]),
|
211 |
+
([0, 1], [np.nan, np.nan]),
|
212 |
+
([0, 1], [np.nan, np.inf]),
|
213 |
+
]
|
214 |
+
else:
|
215 |
+
X = np.random.randint(10, size=(2, 10))
|
216 |
+
invalids = [(X, [np.inf, np.inf]), (X, [np.nan, np.nan]), (X, [np.nan, np.inf])]
|
217 |
+
with pytest.raises(ValueError, match=r"contains (NaN|infinity)"):
|
218 |
+
for args in invalids:
|
219 |
+
metric_func(*args)
|
venv/lib/python3.10/site-packages/sklearn/metrics/cluster/tests/test_supervised.py
ADDED
@@ -0,0 +1,482 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import warnings
|
2 |
+
|
3 |
+
import numpy as np
|
4 |
+
import pytest
|
5 |
+
from numpy.testing import assert_allclose, assert_array_almost_equal, assert_array_equal
|
6 |
+
|
7 |
+
from sklearn.metrics.cluster import (
|
8 |
+
adjusted_mutual_info_score,
|
9 |
+
adjusted_rand_score,
|
10 |
+
completeness_score,
|
11 |
+
contingency_matrix,
|
12 |
+
entropy,
|
13 |
+
expected_mutual_information,
|
14 |
+
fowlkes_mallows_score,
|
15 |
+
homogeneity_completeness_v_measure,
|
16 |
+
homogeneity_score,
|
17 |
+
mutual_info_score,
|
18 |
+
normalized_mutual_info_score,
|
19 |
+
pair_confusion_matrix,
|
20 |
+
rand_score,
|
21 |
+
v_measure_score,
|
22 |
+
)
|
23 |
+
from sklearn.metrics.cluster._supervised import _generalized_average, check_clusterings
|
24 |
+
from sklearn.utils import assert_all_finite
|
25 |
+
from sklearn.utils._testing import assert_almost_equal
|
26 |
+
|
27 |
+
score_funcs = [
|
28 |
+
adjusted_rand_score,
|
29 |
+
rand_score,
|
30 |
+
homogeneity_score,
|
31 |
+
completeness_score,
|
32 |
+
v_measure_score,
|
33 |
+
adjusted_mutual_info_score,
|
34 |
+
normalized_mutual_info_score,
|
35 |
+
]
|
36 |
+
|
37 |
+
|
38 |
+
def test_error_messages_on_wrong_input():
|
39 |
+
for score_func in score_funcs:
|
40 |
+
expected = (
|
41 |
+
r"Found input variables with inconsistent numbers " r"of samples: \[2, 3\]"
|
42 |
+
)
|
43 |
+
with pytest.raises(ValueError, match=expected):
|
44 |
+
score_func([0, 1], [1, 1, 1])
|
45 |
+
|
46 |
+
expected = r"labels_true must be 1D: shape is \(2"
|
47 |
+
with pytest.raises(ValueError, match=expected):
|
48 |
+
score_func([[0, 1], [1, 0]], [1, 1, 1])
|
49 |
+
|
50 |
+
expected = r"labels_pred must be 1D: shape is \(2"
|
51 |
+
with pytest.raises(ValueError, match=expected):
|
52 |
+
score_func([0, 1, 0], [[1, 1], [0, 0]])
|
53 |
+
|
54 |
+
|
55 |
+
def test_generalized_average():
|
56 |
+
a, b = 1, 2
|
57 |
+
methods = ["min", "geometric", "arithmetic", "max"]
|
58 |
+
means = [_generalized_average(a, b, method) for method in methods]
|
59 |
+
assert means[0] <= means[1] <= means[2] <= means[3]
|
60 |
+
c, d = 12, 12
|
61 |
+
means = [_generalized_average(c, d, method) for method in methods]
|
62 |
+
assert means[0] == means[1] == means[2] == means[3]
|
63 |
+
|
64 |
+
|
65 |
+
def test_perfect_matches():
|
66 |
+
for score_func in score_funcs:
|
67 |
+
assert score_func([], []) == pytest.approx(1.0)
|
68 |
+
assert score_func([0], [1]) == pytest.approx(1.0)
|
69 |
+
assert score_func([0, 0, 0], [0, 0, 0]) == pytest.approx(1.0)
|
70 |
+
assert score_func([0, 1, 0], [42, 7, 42]) == pytest.approx(1.0)
|
71 |
+
assert score_func([0.0, 1.0, 0.0], [42.0, 7.0, 42.0]) == pytest.approx(1.0)
|
72 |
+
assert score_func([0.0, 1.0, 2.0], [42.0, 7.0, 2.0]) == pytest.approx(1.0)
|
73 |
+
assert score_func([0, 1, 2], [42, 7, 2]) == pytest.approx(1.0)
|
74 |
+
score_funcs_with_changing_means = [
|
75 |
+
normalized_mutual_info_score,
|
76 |
+
adjusted_mutual_info_score,
|
77 |
+
]
|
78 |
+
means = {"min", "geometric", "arithmetic", "max"}
|
79 |
+
for score_func in score_funcs_with_changing_means:
|
80 |
+
for mean in means:
|
81 |
+
assert score_func([], [], average_method=mean) == pytest.approx(1.0)
|
82 |
+
assert score_func([0], [1], average_method=mean) == pytest.approx(1.0)
|
83 |
+
assert score_func(
|
84 |
+
[0, 0, 0], [0, 0, 0], average_method=mean
|
85 |
+
) == pytest.approx(1.0)
|
86 |
+
assert score_func(
|
87 |
+
[0, 1, 0], [42, 7, 42], average_method=mean
|
88 |
+
) == pytest.approx(1.0)
|
89 |
+
assert score_func(
|
90 |
+
[0.0, 1.0, 0.0], [42.0, 7.0, 42.0], average_method=mean
|
91 |
+
) == pytest.approx(1.0)
|
92 |
+
assert score_func(
|
93 |
+
[0.0, 1.0, 2.0], [42.0, 7.0, 2.0], average_method=mean
|
94 |
+
) == pytest.approx(1.0)
|
95 |
+
assert score_func(
|
96 |
+
[0, 1, 2], [42, 7, 2], average_method=mean
|
97 |
+
) == pytest.approx(1.0)
|
98 |
+
|
99 |
+
|
100 |
+
def test_homogeneous_but_not_complete_labeling():
|
101 |
+
# homogeneous but not complete clustering
|
102 |
+
h, c, v = homogeneity_completeness_v_measure([0, 0, 0, 1, 1, 1], [0, 0, 0, 1, 2, 2])
|
103 |
+
assert_almost_equal(h, 1.00, 2)
|
104 |
+
assert_almost_equal(c, 0.69, 2)
|
105 |
+
assert_almost_equal(v, 0.81, 2)
|
106 |
+
|
107 |
+
|
108 |
+
def test_complete_but_not_homogeneous_labeling():
|
109 |
+
# complete but not homogeneous clustering
|
110 |
+
h, c, v = homogeneity_completeness_v_measure([0, 0, 1, 1, 2, 2], [0, 0, 1, 1, 1, 1])
|
111 |
+
assert_almost_equal(h, 0.58, 2)
|
112 |
+
assert_almost_equal(c, 1.00, 2)
|
113 |
+
assert_almost_equal(v, 0.73, 2)
|
114 |
+
|
115 |
+
|
116 |
+
def test_not_complete_and_not_homogeneous_labeling():
|
117 |
+
# neither complete nor homogeneous but not so bad either
|
118 |
+
h, c, v = homogeneity_completeness_v_measure([0, 0, 0, 1, 1, 1], [0, 1, 0, 1, 2, 2])
|
119 |
+
assert_almost_equal(h, 0.67, 2)
|
120 |
+
assert_almost_equal(c, 0.42, 2)
|
121 |
+
assert_almost_equal(v, 0.52, 2)
|
122 |
+
|
123 |
+
|
124 |
+
def test_beta_parameter():
|
125 |
+
# test for when beta passed to
|
126 |
+
# homogeneity_completeness_v_measure
|
127 |
+
# and v_measure_score
|
128 |
+
beta_test = 0.2
|
129 |
+
h_test = 0.67
|
130 |
+
c_test = 0.42
|
131 |
+
v_test = (1 + beta_test) * h_test * c_test / (beta_test * h_test + c_test)
|
132 |
+
|
133 |
+
h, c, v = homogeneity_completeness_v_measure(
|
134 |
+
[0, 0, 0, 1, 1, 1], [0, 1, 0, 1, 2, 2], beta=beta_test
|
135 |
+
)
|
136 |
+
assert_almost_equal(h, h_test, 2)
|
137 |
+
assert_almost_equal(c, c_test, 2)
|
138 |
+
assert_almost_equal(v, v_test, 2)
|
139 |
+
|
140 |
+
v = v_measure_score([0, 0, 0, 1, 1, 1], [0, 1, 0, 1, 2, 2], beta=beta_test)
|
141 |
+
assert_almost_equal(v, v_test, 2)
|
142 |
+
|
143 |
+
|
144 |
+
def test_non_consecutive_labels():
|
145 |
+
# regression tests for labels with gaps
|
146 |
+
h, c, v = homogeneity_completeness_v_measure([0, 0, 0, 2, 2, 2], [0, 1, 0, 1, 2, 2])
|
147 |
+
assert_almost_equal(h, 0.67, 2)
|
148 |
+
assert_almost_equal(c, 0.42, 2)
|
149 |
+
assert_almost_equal(v, 0.52, 2)
|
150 |
+
|
151 |
+
h, c, v = homogeneity_completeness_v_measure([0, 0, 0, 1, 1, 1], [0, 4, 0, 4, 2, 2])
|
152 |
+
assert_almost_equal(h, 0.67, 2)
|
153 |
+
assert_almost_equal(c, 0.42, 2)
|
154 |
+
assert_almost_equal(v, 0.52, 2)
|
155 |
+
|
156 |
+
ari_1 = adjusted_rand_score([0, 0, 0, 1, 1, 1], [0, 1, 0, 1, 2, 2])
|
157 |
+
ari_2 = adjusted_rand_score([0, 0, 0, 1, 1, 1], [0, 4, 0, 4, 2, 2])
|
158 |
+
assert_almost_equal(ari_1, 0.24, 2)
|
159 |
+
assert_almost_equal(ari_2, 0.24, 2)
|
160 |
+
|
161 |
+
ri_1 = rand_score([0, 0, 0, 1, 1, 1], [0, 1, 0, 1, 2, 2])
|
162 |
+
ri_2 = rand_score([0, 0, 0, 1, 1, 1], [0, 4, 0, 4, 2, 2])
|
163 |
+
assert_almost_equal(ri_1, 0.66, 2)
|
164 |
+
assert_almost_equal(ri_2, 0.66, 2)
|
165 |
+
|
166 |
+
|
167 |
+
def uniform_labelings_scores(score_func, n_samples, k_range, n_runs=10, seed=42):
|
168 |
+
# Compute score for random uniform cluster labelings
|
169 |
+
random_labels = np.random.RandomState(seed).randint
|
170 |
+
scores = np.zeros((len(k_range), n_runs))
|
171 |
+
for i, k in enumerate(k_range):
|
172 |
+
for j in range(n_runs):
|
173 |
+
labels_a = random_labels(low=0, high=k, size=n_samples)
|
174 |
+
labels_b = random_labels(low=0, high=k, size=n_samples)
|
175 |
+
scores[i, j] = score_func(labels_a, labels_b)
|
176 |
+
return scores
|
177 |
+
|
178 |
+
|
179 |
+
def test_adjustment_for_chance():
|
180 |
+
# Check that adjusted scores are almost zero on random labels
|
181 |
+
n_clusters_range = [2, 10, 50, 90]
|
182 |
+
n_samples = 100
|
183 |
+
n_runs = 10
|
184 |
+
|
185 |
+
scores = uniform_labelings_scores(
|
186 |
+
adjusted_rand_score, n_samples, n_clusters_range, n_runs
|
187 |
+
)
|
188 |
+
|
189 |
+
max_abs_scores = np.abs(scores).max(axis=1)
|
190 |
+
assert_array_almost_equal(max_abs_scores, [0.02, 0.03, 0.03, 0.02], 2)
|
191 |
+
|
192 |
+
|
193 |
+
def test_adjusted_mutual_info_score():
|
194 |
+
# Compute the Adjusted Mutual Information and test against known values
|
195 |
+
labels_a = np.array([1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3])
|
196 |
+
labels_b = np.array([1, 1, 1, 1, 2, 1, 2, 2, 2, 2, 3, 1, 3, 3, 3, 2, 2])
|
197 |
+
# Mutual information
|
198 |
+
mi = mutual_info_score(labels_a, labels_b)
|
199 |
+
assert_almost_equal(mi, 0.41022, 5)
|
200 |
+
# with provided sparse contingency
|
201 |
+
C = contingency_matrix(labels_a, labels_b, sparse=True)
|
202 |
+
mi = mutual_info_score(labels_a, labels_b, contingency=C)
|
203 |
+
assert_almost_equal(mi, 0.41022, 5)
|
204 |
+
# with provided dense contingency
|
205 |
+
C = contingency_matrix(labels_a, labels_b)
|
206 |
+
mi = mutual_info_score(labels_a, labels_b, contingency=C)
|
207 |
+
assert_almost_equal(mi, 0.41022, 5)
|
208 |
+
# Expected mutual information
|
209 |
+
n_samples = C.sum()
|
210 |
+
emi = expected_mutual_information(C, n_samples)
|
211 |
+
assert_almost_equal(emi, 0.15042, 5)
|
212 |
+
# Adjusted mutual information
|
213 |
+
ami = adjusted_mutual_info_score(labels_a, labels_b)
|
214 |
+
assert_almost_equal(ami, 0.27821, 5)
|
215 |
+
ami = adjusted_mutual_info_score([1, 1, 2, 2], [2, 2, 3, 3])
|
216 |
+
assert ami == pytest.approx(1.0)
|
217 |
+
# Test with a very large array
|
218 |
+
a110 = np.array([list(labels_a) * 110]).flatten()
|
219 |
+
b110 = np.array([list(labels_b) * 110]).flatten()
|
220 |
+
ami = adjusted_mutual_info_score(a110, b110)
|
221 |
+
assert_almost_equal(ami, 0.38, 2)
|
222 |
+
|
223 |
+
|
224 |
+
def test_expected_mutual_info_overflow():
|
225 |
+
# Test for regression where contingency cell exceeds 2**16
|
226 |
+
# leading to overflow in np.outer, resulting in EMI > 1
|
227 |
+
assert expected_mutual_information(np.array([[70000]]), 70000) <= 1
|
228 |
+
|
229 |
+
|
230 |
+
def test_int_overflow_mutual_info_fowlkes_mallows_score():
|
231 |
+
# Test overflow in mutual_info_classif and fowlkes_mallows_score
|
232 |
+
x = np.array(
|
233 |
+
[1] * (52632 + 2529)
|
234 |
+
+ [2] * (14660 + 793)
|
235 |
+
+ [3] * (3271 + 204)
|
236 |
+
+ [4] * (814 + 39)
|
237 |
+
+ [5] * (316 + 20)
|
238 |
+
)
|
239 |
+
y = np.array(
|
240 |
+
[0] * 52632
|
241 |
+
+ [1] * 2529
|
242 |
+
+ [0] * 14660
|
243 |
+
+ [1] * 793
|
244 |
+
+ [0] * 3271
|
245 |
+
+ [1] * 204
|
246 |
+
+ [0] * 814
|
247 |
+
+ [1] * 39
|
248 |
+
+ [0] * 316
|
249 |
+
+ [1] * 20
|
250 |
+
)
|
251 |
+
|
252 |
+
assert_all_finite(mutual_info_score(x, y))
|
253 |
+
assert_all_finite(fowlkes_mallows_score(x, y))
|
254 |
+
|
255 |
+
|
256 |
+
def test_entropy():
|
257 |
+
ent = entropy([0, 0, 42.0])
|
258 |
+
assert_almost_equal(ent, 0.6365141, 5)
|
259 |
+
assert_almost_equal(entropy([]), 1)
|
260 |
+
assert entropy([1, 1, 1, 1]) == 0
|
261 |
+
|
262 |
+
|
263 |
+
def test_contingency_matrix():
|
264 |
+
labels_a = np.array([1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3])
|
265 |
+
labels_b = np.array([1, 1, 1, 1, 2, 1, 2, 2, 2, 2, 3, 1, 3, 3, 3, 2, 2])
|
266 |
+
C = contingency_matrix(labels_a, labels_b)
|
267 |
+
C2 = np.histogram2d(labels_a, labels_b, bins=(np.arange(1, 5), np.arange(1, 5)))[0]
|
268 |
+
assert_array_almost_equal(C, C2)
|
269 |
+
C = contingency_matrix(labels_a, labels_b, eps=0.1)
|
270 |
+
assert_array_almost_equal(C, C2 + 0.1)
|
271 |
+
|
272 |
+
|
273 |
+
def test_contingency_matrix_sparse():
|
274 |
+
labels_a = np.array([1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3])
|
275 |
+
labels_b = np.array([1, 1, 1, 1, 2, 1, 2, 2, 2, 2, 3, 1, 3, 3, 3, 2, 2])
|
276 |
+
C = contingency_matrix(labels_a, labels_b)
|
277 |
+
C_sparse = contingency_matrix(labels_a, labels_b, sparse=True).toarray()
|
278 |
+
assert_array_almost_equal(C, C_sparse)
|
279 |
+
with pytest.raises(ValueError, match="Cannot set 'eps' when sparse=True"):
|
280 |
+
contingency_matrix(labels_a, labels_b, eps=1e-10, sparse=True)
|
281 |
+
|
282 |
+
|
283 |
+
def test_exactly_zero_info_score():
|
284 |
+
# Check numerical stability when information is exactly zero
|
285 |
+
for i in np.logspace(1, 4, 4).astype(int):
|
286 |
+
labels_a, labels_b = (np.ones(i, dtype=int), np.arange(i, dtype=int))
|
287 |
+
assert normalized_mutual_info_score(labels_a, labels_b) == pytest.approx(0.0)
|
288 |
+
assert v_measure_score(labels_a, labels_b) == pytest.approx(0.0)
|
289 |
+
assert adjusted_mutual_info_score(labels_a, labels_b) == pytest.approx(0.0)
|
290 |
+
assert normalized_mutual_info_score(labels_a, labels_b) == pytest.approx(0.0)
|
291 |
+
for method in ["min", "geometric", "arithmetic", "max"]:
|
292 |
+
assert adjusted_mutual_info_score(
|
293 |
+
labels_a, labels_b, average_method=method
|
294 |
+
) == pytest.approx(0.0)
|
295 |
+
assert normalized_mutual_info_score(
|
296 |
+
labels_a, labels_b, average_method=method
|
297 |
+
) == pytest.approx(0.0)
|
298 |
+
|
299 |
+
|
300 |
+
def test_v_measure_and_mutual_information(seed=36):
|
301 |
+
# Check relation between v_measure, entropy and mutual information
|
302 |
+
for i in np.logspace(1, 4, 4).astype(int):
|
303 |
+
random_state = np.random.RandomState(seed)
|
304 |
+
labels_a, labels_b = (
|
305 |
+
random_state.randint(0, 10, i),
|
306 |
+
random_state.randint(0, 10, i),
|
307 |
+
)
|
308 |
+
assert_almost_equal(
|
309 |
+
v_measure_score(labels_a, labels_b),
|
310 |
+
2.0
|
311 |
+
* mutual_info_score(labels_a, labels_b)
|
312 |
+
/ (entropy(labels_a) + entropy(labels_b)),
|
313 |
+
0,
|
314 |
+
)
|
315 |
+
avg = "arithmetic"
|
316 |
+
assert_almost_equal(
|
317 |
+
v_measure_score(labels_a, labels_b),
|
318 |
+
normalized_mutual_info_score(labels_a, labels_b, average_method=avg),
|
319 |
+
)
|
320 |
+
|
321 |
+
|
322 |
+
def test_fowlkes_mallows_score():
|
323 |
+
# General case
|
324 |
+
score = fowlkes_mallows_score([0, 0, 0, 1, 1, 1], [0, 0, 1, 1, 2, 2])
|
325 |
+
assert_almost_equal(score, 4.0 / np.sqrt(12.0 * 6.0))
|
326 |
+
|
327 |
+
# Perfect match but where the label names changed
|
328 |
+
perfect_score = fowlkes_mallows_score([0, 0, 0, 1, 1, 1], [1, 1, 1, 0, 0, 0])
|
329 |
+
assert_almost_equal(perfect_score, 1.0)
|
330 |
+
|
331 |
+
# Worst case
|
332 |
+
worst_score = fowlkes_mallows_score([0, 0, 0, 0, 0, 0], [0, 1, 2, 3, 4, 5])
|
333 |
+
assert_almost_equal(worst_score, 0.0)
|
334 |
+
|
335 |
+
|
336 |
+
def test_fowlkes_mallows_score_properties():
|
337 |
+
# handcrafted example
|
338 |
+
labels_a = np.array([0, 0, 0, 1, 1, 2])
|
339 |
+
labels_b = np.array([1, 1, 2, 2, 0, 0])
|
340 |
+
expected = 1.0 / np.sqrt((1.0 + 3.0) * (1.0 + 2.0))
|
341 |
+
# FMI = TP / sqrt((TP + FP) * (TP + FN))
|
342 |
+
|
343 |
+
score_original = fowlkes_mallows_score(labels_a, labels_b)
|
344 |
+
assert_almost_equal(score_original, expected)
|
345 |
+
|
346 |
+
# symmetric property
|
347 |
+
score_symmetric = fowlkes_mallows_score(labels_b, labels_a)
|
348 |
+
assert_almost_equal(score_symmetric, expected)
|
349 |
+
|
350 |
+
# permutation property
|
351 |
+
score_permuted = fowlkes_mallows_score((labels_a + 1) % 3, labels_b)
|
352 |
+
assert_almost_equal(score_permuted, expected)
|
353 |
+
|
354 |
+
# symmetric and permutation(both together)
|
355 |
+
score_both = fowlkes_mallows_score(labels_b, (labels_a + 2) % 3)
|
356 |
+
assert_almost_equal(score_both, expected)
|
357 |
+
|
358 |
+
|
359 |
+
@pytest.mark.parametrize(
|
360 |
+
"labels_true, labels_pred",
|
361 |
+
[
|
362 |
+
(["a"] * 6, [1, 1, 0, 0, 1, 1]),
|
363 |
+
([1] * 6, [1, 1, 0, 0, 1, 1]),
|
364 |
+
([1, 1, 0, 0, 1, 1], ["a"] * 6),
|
365 |
+
([1, 1, 0, 0, 1, 1], [1] * 6),
|
366 |
+
(["a"] * 6, ["a"] * 6),
|
367 |
+
],
|
368 |
+
)
|
369 |
+
def test_mutual_info_score_positive_constant_label(labels_true, labels_pred):
|
370 |
+
# Check that MI = 0 when one or both labelling are constant
|
371 |
+
# non-regression test for #16355
|
372 |
+
assert mutual_info_score(labels_true, labels_pred) == 0
|
373 |
+
|
374 |
+
|
375 |
+
def test_check_clustering_error():
|
376 |
+
# Test warning message for continuous values
|
377 |
+
rng = np.random.RandomState(42)
|
378 |
+
noise = rng.rand(500)
|
379 |
+
wavelength = np.linspace(0.01, 1, 500) * 1e-6
|
380 |
+
msg = (
|
381 |
+
"Clustering metrics expects discrete values but received "
|
382 |
+
"continuous values for label, and continuous values for "
|
383 |
+
"target"
|
384 |
+
)
|
385 |
+
|
386 |
+
with pytest.warns(UserWarning, match=msg):
|
387 |
+
check_clusterings(wavelength, noise)
|
388 |
+
|
389 |
+
|
390 |
+
def test_pair_confusion_matrix_fully_dispersed():
|
391 |
+
# edge case: every element is its own cluster
|
392 |
+
N = 100
|
393 |
+
clustering1 = list(range(N))
|
394 |
+
clustering2 = clustering1
|
395 |
+
expected = np.array([[N * (N - 1), 0], [0, 0]])
|
396 |
+
assert_array_equal(pair_confusion_matrix(clustering1, clustering2), expected)
|
397 |
+
|
398 |
+
|
399 |
+
def test_pair_confusion_matrix_single_cluster():
|
400 |
+
# edge case: only one cluster
|
401 |
+
N = 100
|
402 |
+
clustering1 = np.zeros((N,))
|
403 |
+
clustering2 = clustering1
|
404 |
+
expected = np.array([[0, 0], [0, N * (N - 1)]])
|
405 |
+
assert_array_equal(pair_confusion_matrix(clustering1, clustering2), expected)
|
406 |
+
|
407 |
+
|
408 |
+
def test_pair_confusion_matrix():
|
409 |
+
# regular case: different non-trivial clusterings
|
410 |
+
n = 10
|
411 |
+
N = n**2
|
412 |
+
clustering1 = np.hstack([[i + 1] * n for i in range(n)])
|
413 |
+
clustering2 = np.hstack([[i + 1] * (n + 1) for i in range(n)])[:N]
|
414 |
+
# basic quadratic implementation
|
415 |
+
expected = np.zeros(shape=(2, 2), dtype=np.int64)
|
416 |
+
for i in range(len(clustering1)):
|
417 |
+
for j in range(len(clustering2)):
|
418 |
+
if i != j:
|
419 |
+
same_cluster_1 = int(clustering1[i] == clustering1[j])
|
420 |
+
same_cluster_2 = int(clustering2[i] == clustering2[j])
|
421 |
+
expected[same_cluster_1, same_cluster_2] += 1
|
422 |
+
assert_array_equal(pair_confusion_matrix(clustering1, clustering2), expected)
|
423 |
+
|
424 |
+
|
425 |
+
@pytest.mark.parametrize(
|
426 |
+
"clustering1, clustering2",
|
427 |
+
[(list(range(100)), list(range(100))), (np.zeros((100,)), np.zeros((100,)))],
|
428 |
+
)
|
429 |
+
def test_rand_score_edge_cases(clustering1, clustering2):
|
430 |
+
# edge case 1: every element is its own cluster
|
431 |
+
# edge case 2: only one cluster
|
432 |
+
assert_allclose(rand_score(clustering1, clustering2), 1.0)
|
433 |
+
|
434 |
+
|
435 |
+
def test_rand_score():
|
436 |
+
# regular case: different non-trivial clusterings
|
437 |
+
clustering1 = [0, 0, 0, 1, 1, 1]
|
438 |
+
clustering2 = [0, 1, 0, 1, 2, 2]
|
439 |
+
# pair confusion matrix
|
440 |
+
D11 = 2 * 2 # ordered pairs (1, 3), (5, 6)
|
441 |
+
D10 = 2 * 4 # ordered pairs (1, 2), (2, 3), (4, 5), (4, 6)
|
442 |
+
D01 = 2 * 1 # ordered pair (2, 4)
|
443 |
+
D00 = 5 * 6 - D11 - D01 - D10 # the remaining pairs
|
444 |
+
# rand score
|
445 |
+
expected_numerator = D00 + D11
|
446 |
+
expected_denominator = D00 + D01 + D10 + D11
|
447 |
+
expected = expected_numerator / expected_denominator
|
448 |
+
assert_allclose(rand_score(clustering1, clustering2), expected)
|
449 |
+
|
450 |
+
|
451 |
+
def test_adjusted_rand_score_overflow():
|
452 |
+
"""Check that large amount of data will not lead to overflow in
|
453 |
+
`adjusted_rand_score`.
|
454 |
+
Non-regression test for:
|
455 |
+
https://github.com/scikit-learn/scikit-learn/issues/20305
|
456 |
+
"""
|
457 |
+
rng = np.random.RandomState(0)
|
458 |
+
y_true = rng.randint(0, 2, 100_000, dtype=np.int8)
|
459 |
+
y_pred = rng.randint(0, 2, 100_000, dtype=np.int8)
|
460 |
+
with warnings.catch_warnings():
|
461 |
+
warnings.simplefilter("error", RuntimeWarning)
|
462 |
+
adjusted_rand_score(y_true, y_pred)
|
463 |
+
|
464 |
+
|
465 |
+
@pytest.mark.parametrize("average_method", ["min", "arithmetic", "geometric", "max"])
|
466 |
+
def test_normalized_mutual_info_score_bounded(average_method):
|
467 |
+
"""Check that nmi returns a score between 0 (included) and 1 (excluded
|
468 |
+
for non-perfect match)
|
469 |
+
|
470 |
+
Non-regression test for issue #13836
|
471 |
+
"""
|
472 |
+
labels1 = [0] * 469
|
473 |
+
labels2 = [1] + labels1[1:]
|
474 |
+
labels3 = [0, 1] + labels1[2:]
|
475 |
+
|
476 |
+
# labels1 is constant. The mutual info between labels1 and any other labelling is 0.
|
477 |
+
nmi = normalized_mutual_info_score(labels1, labels2, average_method=average_method)
|
478 |
+
assert nmi == 0
|
479 |
+
|
480 |
+
# non constant, non perfect matching labels
|
481 |
+
nmi = normalized_mutual_info_score(labels2, labels3, average_method=average_method)
|
482 |
+
assert 0 <= nmi < 1
|
venv/lib/python3.10/site-packages/sklearn/metrics/cluster/tests/test_unsupervised.py
ADDED
@@ -0,0 +1,413 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import warnings
|
2 |
+
|
3 |
+
import numpy as np
|
4 |
+
import pytest
|
5 |
+
from numpy.testing import assert_allclose
|
6 |
+
from scipy.sparse import issparse
|
7 |
+
|
8 |
+
from sklearn import datasets
|
9 |
+
from sklearn.metrics import pairwise_distances
|
10 |
+
from sklearn.metrics.cluster import (
|
11 |
+
calinski_harabasz_score,
|
12 |
+
davies_bouldin_score,
|
13 |
+
silhouette_samples,
|
14 |
+
silhouette_score,
|
15 |
+
)
|
16 |
+
from sklearn.metrics.cluster._unsupervised import _silhouette_reduce
|
17 |
+
from sklearn.utils._testing import assert_array_equal
|
18 |
+
from sklearn.utils.fixes import (
|
19 |
+
CSC_CONTAINERS,
|
20 |
+
CSR_CONTAINERS,
|
21 |
+
DOK_CONTAINERS,
|
22 |
+
LIL_CONTAINERS,
|
23 |
+
)
|
24 |
+
|
25 |
+
|
26 |
+
@pytest.mark.parametrize(
|
27 |
+
"sparse_container",
|
28 |
+
[None] + CSR_CONTAINERS + CSC_CONTAINERS + DOK_CONTAINERS + LIL_CONTAINERS,
|
29 |
+
)
|
30 |
+
@pytest.mark.parametrize("sample_size", [None, "half"])
|
31 |
+
def test_silhouette(sparse_container, sample_size):
|
32 |
+
# Tests the Silhouette Coefficient.
|
33 |
+
dataset = datasets.load_iris()
|
34 |
+
X, y = dataset.data, dataset.target
|
35 |
+
if sparse_container is not None:
|
36 |
+
X = sparse_container(X)
|
37 |
+
sample_size = int(X.shape[0] / 2) if sample_size == "half" else sample_size
|
38 |
+
|
39 |
+
D = pairwise_distances(X, metric="euclidean")
|
40 |
+
# Given that the actual labels are used, we can assume that S would be positive.
|
41 |
+
score_precomputed = silhouette_score(
|
42 |
+
D, y, metric="precomputed", sample_size=sample_size, random_state=0
|
43 |
+
)
|
44 |
+
score_euclidean = silhouette_score(
|
45 |
+
X, y, metric="euclidean", sample_size=sample_size, random_state=0
|
46 |
+
)
|
47 |
+
assert score_precomputed > 0
|
48 |
+
assert score_euclidean > 0
|
49 |
+
assert score_precomputed == pytest.approx(score_euclidean)
|
50 |
+
|
51 |
+
|
52 |
+
def test_cluster_size_1():
|
53 |
+
# Assert Silhouette Coefficient == 0 when there is 1 sample in a cluster
|
54 |
+
# (cluster 0). We also test the case where there are identical samples
|
55 |
+
# as the only members of a cluster (cluster 2). To our knowledge, this case
|
56 |
+
# is not discussed in reference material, and we choose for it a sample
|
57 |
+
# score of 1.
|
58 |
+
X = [[0.0], [1.0], [1.0], [2.0], [3.0], [3.0]]
|
59 |
+
labels = np.array([0, 1, 1, 1, 2, 2])
|
60 |
+
|
61 |
+
# Cluster 0: 1 sample -> score of 0 by Rousseeuw's convention
|
62 |
+
# Cluster 1: intra-cluster = [.5, .5, 1]
|
63 |
+
# inter-cluster = [1, 1, 1]
|
64 |
+
# silhouette = [.5, .5, 0]
|
65 |
+
# Cluster 2: intra-cluster = [0, 0]
|
66 |
+
# inter-cluster = [arbitrary, arbitrary]
|
67 |
+
# silhouette = [1., 1.]
|
68 |
+
|
69 |
+
silhouette = silhouette_score(X, labels)
|
70 |
+
assert not np.isnan(silhouette)
|
71 |
+
ss = silhouette_samples(X, labels)
|
72 |
+
assert_array_equal(ss, [0, 0.5, 0.5, 0, 1, 1])
|
73 |
+
|
74 |
+
|
75 |
+
def test_silhouette_paper_example():
|
76 |
+
# Explicitly check per-sample results against Rousseeuw (1987)
|
77 |
+
# Data from Table 1
|
78 |
+
lower = [
|
79 |
+
5.58,
|
80 |
+
7.00,
|
81 |
+
6.50,
|
82 |
+
7.08,
|
83 |
+
7.00,
|
84 |
+
3.83,
|
85 |
+
4.83,
|
86 |
+
5.08,
|
87 |
+
8.17,
|
88 |
+
5.83,
|
89 |
+
2.17,
|
90 |
+
5.75,
|
91 |
+
6.67,
|
92 |
+
6.92,
|
93 |
+
4.92,
|
94 |
+
6.42,
|
95 |
+
5.00,
|
96 |
+
5.58,
|
97 |
+
6.00,
|
98 |
+
4.67,
|
99 |
+
6.42,
|
100 |
+
3.42,
|
101 |
+
5.50,
|
102 |
+
6.42,
|
103 |
+
6.42,
|
104 |
+
5.00,
|
105 |
+
3.92,
|
106 |
+
6.17,
|
107 |
+
2.50,
|
108 |
+
4.92,
|
109 |
+
6.25,
|
110 |
+
7.33,
|
111 |
+
4.50,
|
112 |
+
2.25,
|
113 |
+
6.33,
|
114 |
+
2.75,
|
115 |
+
6.08,
|
116 |
+
6.67,
|
117 |
+
4.25,
|
118 |
+
2.67,
|
119 |
+
6.00,
|
120 |
+
6.17,
|
121 |
+
6.17,
|
122 |
+
6.92,
|
123 |
+
6.17,
|
124 |
+
5.25,
|
125 |
+
6.83,
|
126 |
+
4.50,
|
127 |
+
3.75,
|
128 |
+
5.75,
|
129 |
+
5.42,
|
130 |
+
6.08,
|
131 |
+
5.83,
|
132 |
+
6.67,
|
133 |
+
3.67,
|
134 |
+
4.75,
|
135 |
+
3.00,
|
136 |
+
6.08,
|
137 |
+
6.67,
|
138 |
+
5.00,
|
139 |
+
5.58,
|
140 |
+
4.83,
|
141 |
+
6.17,
|
142 |
+
5.67,
|
143 |
+
6.50,
|
144 |
+
6.92,
|
145 |
+
]
|
146 |
+
D = np.zeros((12, 12))
|
147 |
+
D[np.tril_indices(12, -1)] = lower
|
148 |
+
D += D.T
|
149 |
+
|
150 |
+
names = [
|
151 |
+
"BEL",
|
152 |
+
"BRA",
|
153 |
+
"CHI",
|
154 |
+
"CUB",
|
155 |
+
"EGY",
|
156 |
+
"FRA",
|
157 |
+
"IND",
|
158 |
+
"ISR",
|
159 |
+
"USA",
|
160 |
+
"USS",
|
161 |
+
"YUG",
|
162 |
+
"ZAI",
|
163 |
+
]
|
164 |
+
|
165 |
+
# Data from Figure 2
|
166 |
+
labels1 = [1, 1, 2, 2, 1, 1, 2, 1, 1, 2, 2, 1]
|
167 |
+
expected1 = {
|
168 |
+
"USA": 0.43,
|
169 |
+
"BEL": 0.39,
|
170 |
+
"FRA": 0.35,
|
171 |
+
"ISR": 0.30,
|
172 |
+
"BRA": 0.22,
|
173 |
+
"EGY": 0.20,
|
174 |
+
"ZAI": 0.19,
|
175 |
+
"CUB": 0.40,
|
176 |
+
"USS": 0.34,
|
177 |
+
"CHI": 0.33,
|
178 |
+
"YUG": 0.26,
|
179 |
+
"IND": -0.04,
|
180 |
+
}
|
181 |
+
score1 = 0.28
|
182 |
+
|
183 |
+
# Data from Figure 3
|
184 |
+
labels2 = [1, 2, 3, 3, 1, 1, 2, 1, 1, 3, 3, 2]
|
185 |
+
expected2 = {
|
186 |
+
"USA": 0.47,
|
187 |
+
"FRA": 0.44,
|
188 |
+
"BEL": 0.42,
|
189 |
+
"ISR": 0.37,
|
190 |
+
"EGY": 0.02,
|
191 |
+
"ZAI": 0.28,
|
192 |
+
"BRA": 0.25,
|
193 |
+
"IND": 0.17,
|
194 |
+
"CUB": 0.48,
|
195 |
+
"USS": 0.44,
|
196 |
+
"YUG": 0.31,
|
197 |
+
"CHI": 0.31,
|
198 |
+
}
|
199 |
+
score2 = 0.33
|
200 |
+
|
201 |
+
for labels, expected, score in [
|
202 |
+
(labels1, expected1, score1),
|
203 |
+
(labels2, expected2, score2),
|
204 |
+
]:
|
205 |
+
expected = [expected[name] for name in names]
|
206 |
+
# we check to 2dp because that's what's in the paper
|
207 |
+
pytest.approx(
|
208 |
+
expected,
|
209 |
+
silhouette_samples(D, np.array(labels), metric="precomputed"),
|
210 |
+
abs=1e-2,
|
211 |
+
)
|
212 |
+
pytest.approx(
|
213 |
+
score, silhouette_score(D, np.array(labels), metric="precomputed"), abs=1e-2
|
214 |
+
)
|
215 |
+
|
216 |
+
|
217 |
+
def test_correct_labelsize():
|
218 |
+
# Assert 1 < n_labels < n_samples
|
219 |
+
dataset = datasets.load_iris()
|
220 |
+
X = dataset.data
|
221 |
+
|
222 |
+
# n_labels = n_samples
|
223 |
+
y = np.arange(X.shape[0])
|
224 |
+
err_msg = (
|
225 |
+
r"Number of labels is %d\. Valid values are 2 "
|
226 |
+
r"to n_samples - 1 \(inclusive\)" % len(np.unique(y))
|
227 |
+
)
|
228 |
+
with pytest.raises(ValueError, match=err_msg):
|
229 |
+
silhouette_score(X, y)
|
230 |
+
|
231 |
+
# n_labels = 1
|
232 |
+
y = np.zeros(X.shape[0])
|
233 |
+
err_msg = (
|
234 |
+
r"Number of labels is %d\. Valid values are 2 "
|
235 |
+
r"to n_samples - 1 \(inclusive\)" % len(np.unique(y))
|
236 |
+
)
|
237 |
+
with pytest.raises(ValueError, match=err_msg):
|
238 |
+
silhouette_score(X, y)
|
239 |
+
|
240 |
+
|
241 |
+
def test_non_encoded_labels():
|
242 |
+
dataset = datasets.load_iris()
|
243 |
+
X = dataset.data
|
244 |
+
labels = dataset.target
|
245 |
+
assert silhouette_score(X, labels * 2 + 10) == silhouette_score(X, labels)
|
246 |
+
assert_array_equal(
|
247 |
+
silhouette_samples(X, labels * 2 + 10), silhouette_samples(X, labels)
|
248 |
+
)
|
249 |
+
|
250 |
+
|
251 |
+
def test_non_numpy_labels():
|
252 |
+
dataset = datasets.load_iris()
|
253 |
+
X = dataset.data
|
254 |
+
y = dataset.target
|
255 |
+
assert silhouette_score(list(X), list(y)) == silhouette_score(X, y)
|
256 |
+
|
257 |
+
|
258 |
+
@pytest.mark.parametrize("dtype", (np.float32, np.float64))
|
259 |
+
def test_silhouette_nonzero_diag(dtype):
|
260 |
+
# Make sure silhouette_samples requires diagonal to be zero.
|
261 |
+
# Non-regression test for #12178
|
262 |
+
|
263 |
+
# Construct a zero-diagonal matrix
|
264 |
+
dists = pairwise_distances(
|
265 |
+
np.array([[0.2, 0.1, 0.12, 1.34, 1.11, 1.6]], dtype=dtype).T
|
266 |
+
)
|
267 |
+
labels = [0, 0, 0, 1, 1, 1]
|
268 |
+
|
269 |
+
# small values on the diagonal are OK
|
270 |
+
dists[2][2] = np.finfo(dists.dtype).eps * 10
|
271 |
+
silhouette_samples(dists, labels, metric="precomputed")
|
272 |
+
|
273 |
+
# values bigger than eps * 100 are not
|
274 |
+
dists[2][2] = np.finfo(dists.dtype).eps * 1000
|
275 |
+
with pytest.raises(ValueError, match="contains non-zero"):
|
276 |
+
silhouette_samples(dists, labels, metric="precomputed")
|
277 |
+
|
278 |
+
|
279 |
+
@pytest.mark.parametrize(
|
280 |
+
"sparse_container",
|
281 |
+
CSC_CONTAINERS + CSR_CONTAINERS + DOK_CONTAINERS + LIL_CONTAINERS,
|
282 |
+
)
|
283 |
+
def test_silhouette_samples_precomputed_sparse(sparse_container):
|
284 |
+
"""Check that silhouette_samples works for sparse matrices correctly."""
|
285 |
+
X = np.array([[0.2, 0.1, 0.1, 0.2, 0.1, 1.6, 0.2, 0.1]], dtype=np.float32).T
|
286 |
+
y = [0, 0, 0, 0, 1, 1, 1, 1]
|
287 |
+
pdist_dense = pairwise_distances(X)
|
288 |
+
pdist_sparse = sparse_container(pdist_dense)
|
289 |
+
assert issparse(pdist_sparse)
|
290 |
+
output_with_sparse_input = silhouette_samples(pdist_sparse, y, metric="precomputed")
|
291 |
+
output_with_dense_input = silhouette_samples(pdist_dense, y, metric="precomputed")
|
292 |
+
assert_allclose(output_with_sparse_input, output_with_dense_input)
|
293 |
+
|
294 |
+
|
295 |
+
@pytest.mark.parametrize(
|
296 |
+
"sparse_container",
|
297 |
+
CSC_CONTAINERS + CSR_CONTAINERS + DOK_CONTAINERS + LIL_CONTAINERS,
|
298 |
+
)
|
299 |
+
def test_silhouette_samples_euclidean_sparse(sparse_container):
|
300 |
+
"""Check that silhouette_samples works for sparse matrices correctly."""
|
301 |
+
X = np.array([[0.2, 0.1, 0.1, 0.2, 0.1, 1.6, 0.2, 0.1]], dtype=np.float32).T
|
302 |
+
y = [0, 0, 0, 0, 1, 1, 1, 1]
|
303 |
+
pdist_dense = pairwise_distances(X)
|
304 |
+
pdist_sparse = sparse_container(pdist_dense)
|
305 |
+
assert issparse(pdist_sparse)
|
306 |
+
output_with_sparse_input = silhouette_samples(pdist_sparse, y)
|
307 |
+
output_with_dense_input = silhouette_samples(pdist_dense, y)
|
308 |
+
assert_allclose(output_with_sparse_input, output_with_dense_input)
|
309 |
+
|
310 |
+
|
311 |
+
@pytest.mark.parametrize(
|
312 |
+
"sparse_container", CSC_CONTAINERS + DOK_CONTAINERS + LIL_CONTAINERS
|
313 |
+
)
|
314 |
+
def test_silhouette_reduce(sparse_container):
|
315 |
+
"""Check for non-CSR input to private method `_silhouette_reduce`."""
|
316 |
+
X = np.array([[0.2, 0.1, 0.1, 0.2, 0.1, 1.6, 0.2, 0.1]], dtype=np.float32).T
|
317 |
+
pdist_dense = pairwise_distances(X)
|
318 |
+
pdist_sparse = sparse_container(pdist_dense)
|
319 |
+
y = [0, 0, 0, 0, 1, 1, 1, 1]
|
320 |
+
label_freqs = np.bincount(y)
|
321 |
+
with pytest.raises(
|
322 |
+
TypeError,
|
323 |
+
match="Expected CSR matrix. Please pass sparse matrix in CSR format.",
|
324 |
+
):
|
325 |
+
_silhouette_reduce(pdist_sparse, start=0, labels=y, label_freqs=label_freqs)
|
326 |
+
|
327 |
+
|
328 |
+
def assert_raises_on_only_one_label(func):
|
329 |
+
"""Assert message when there is only one label"""
|
330 |
+
rng = np.random.RandomState(seed=0)
|
331 |
+
with pytest.raises(ValueError, match="Number of labels is"):
|
332 |
+
func(rng.rand(10, 2), np.zeros(10))
|
333 |
+
|
334 |
+
|
335 |
+
def assert_raises_on_all_points_same_cluster(func):
|
336 |
+
"""Assert message when all point are in different clusters"""
|
337 |
+
rng = np.random.RandomState(seed=0)
|
338 |
+
with pytest.raises(ValueError, match="Number of labels is"):
|
339 |
+
func(rng.rand(10, 2), np.arange(10))
|
340 |
+
|
341 |
+
|
342 |
+
def test_calinski_harabasz_score():
|
343 |
+
assert_raises_on_only_one_label(calinski_harabasz_score)
|
344 |
+
|
345 |
+
assert_raises_on_all_points_same_cluster(calinski_harabasz_score)
|
346 |
+
|
347 |
+
# Assert the value is 1. when all samples are equals
|
348 |
+
assert 1.0 == calinski_harabasz_score(np.ones((10, 2)), [0] * 5 + [1] * 5)
|
349 |
+
|
350 |
+
# Assert the value is 0. when all the mean cluster are equal
|
351 |
+
assert 0.0 == calinski_harabasz_score([[-1, -1], [1, 1]] * 10, [0] * 10 + [1] * 10)
|
352 |
+
|
353 |
+
# General case (with non numpy arrays)
|
354 |
+
X = (
|
355 |
+
[[0, 0], [1, 1]] * 5
|
356 |
+
+ [[3, 3], [4, 4]] * 5
|
357 |
+
+ [[0, 4], [1, 3]] * 5
|
358 |
+
+ [[3, 1], [4, 0]] * 5
|
359 |
+
)
|
360 |
+
labels = [0] * 10 + [1] * 10 + [2] * 10 + [3] * 10
|
361 |
+
pytest.approx(calinski_harabasz_score(X, labels), 45 * (40 - 4) / (5 * (4 - 1)))
|
362 |
+
|
363 |
+
|
364 |
+
def test_davies_bouldin_score():
|
365 |
+
assert_raises_on_only_one_label(davies_bouldin_score)
|
366 |
+
assert_raises_on_all_points_same_cluster(davies_bouldin_score)
|
367 |
+
|
368 |
+
# Assert the value is 0. when all samples are equals
|
369 |
+
assert davies_bouldin_score(np.ones((10, 2)), [0] * 5 + [1] * 5) == pytest.approx(
|
370 |
+
0.0
|
371 |
+
)
|
372 |
+
|
373 |
+
# Assert the value is 0. when all the mean cluster are equal
|
374 |
+
assert davies_bouldin_score(
|
375 |
+
[[-1, -1], [1, 1]] * 10, [0] * 10 + [1] * 10
|
376 |
+
) == pytest.approx(0.0)
|
377 |
+
|
378 |
+
# General case (with non numpy arrays)
|
379 |
+
X = (
|
380 |
+
[[0, 0], [1, 1]] * 5
|
381 |
+
+ [[3, 3], [4, 4]] * 5
|
382 |
+
+ [[0, 4], [1, 3]] * 5
|
383 |
+
+ [[3, 1], [4, 0]] * 5
|
384 |
+
)
|
385 |
+
labels = [0] * 10 + [1] * 10 + [2] * 10 + [3] * 10
|
386 |
+
pytest.approx(davies_bouldin_score(X, labels), 2 * np.sqrt(0.5) / 3)
|
387 |
+
|
388 |
+
# Ensure divide by zero warning is not raised in general case
|
389 |
+
with warnings.catch_warnings():
|
390 |
+
warnings.simplefilter("error", RuntimeWarning)
|
391 |
+
davies_bouldin_score(X, labels)
|
392 |
+
|
393 |
+
# General case - cluster have one sample
|
394 |
+
X = [[0, 0], [2, 2], [3, 3], [5, 5]]
|
395 |
+
labels = [0, 0, 1, 2]
|
396 |
+
pytest.approx(davies_bouldin_score(X, labels), (5.0 / 4) / 3)
|
397 |
+
|
398 |
+
|
399 |
+
def test_silhouette_score_integer_precomputed():
|
400 |
+
"""Check that silhouette_score works for precomputed metrics that are integers.
|
401 |
+
|
402 |
+
Non-regression test for #22107.
|
403 |
+
"""
|
404 |
+
result = silhouette_score(
|
405 |
+
[[0, 1, 2], [1, 0, 1], [2, 1, 0]], [0, 0, 1], metric="precomputed"
|
406 |
+
)
|
407 |
+
assert result == pytest.approx(1 / 6)
|
408 |
+
|
409 |
+
# non-zero on diagonal for ints raises an error
|
410 |
+
with pytest.raises(ValueError, match="contains non-zero"):
|
411 |
+
silhouette_score(
|
412 |
+
[[1, 1, 2], [1, 0, 1], [2, 1, 0]], [0, 0, 1], metric="precomputed"
|
413 |
+
)
|
venv/lib/python3.10/site-packages/sklearn/metrics/tests/__init__.py
ADDED
File without changes
|
venv/lib/python3.10/site-packages/sklearn/metrics/tests/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (189 Bytes). View file
|
|
venv/lib/python3.10/site-packages/sklearn/metrics/tests/__pycache__/test_classification.cpython-310.pyc
ADDED
Binary file (68.9 kB). View file
|
|
venv/lib/python3.10/site-packages/sklearn/metrics/tests/__pycache__/test_common.cpython-310.pyc
ADDED
Binary file (33.2 kB). View file
|
|
venv/lib/python3.10/site-packages/sklearn/metrics/tests/__pycache__/test_dist_metrics.cpython-310.pyc
ADDED
Binary file (10.7 kB). View file
|
|
venv/lib/python3.10/site-packages/sklearn/metrics/tests/__pycache__/test_pairwise.cpython-310.pyc
ADDED
Binary file (39.4 kB). View file
|
|