Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- env-llmeval/lib/python3.10/site-packages/sklearn/datasets/__init__.py +162 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/datasets/__pycache__/__init__.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/datasets/__pycache__/_arff_parser.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/datasets/__pycache__/_base.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/datasets/__pycache__/_california_housing.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/datasets/__pycache__/_covtype.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/datasets/__pycache__/_kddcup99.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/datasets/__pycache__/_openml.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/datasets/__pycache__/_rcv1.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/datasets/__pycache__/_species_distributions.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/datasets/__pycache__/_svmlight_format_io.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/datasets/__pycache__/_twenty_newsgroups.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/datasets/_arff_parser.py +542 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/datasets/_base.py +1441 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/datasets/_california_housing.py +223 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/datasets/_covtype.py +236 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/datasets/_kddcup99.py +401 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/datasets/_lfw.py +570 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/datasets/_olivetti_faces.py +156 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/datasets/_openml.py +1158 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/datasets/_rcv1.py +306 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/datasets/_samples_generator.py +2284 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/datasets/_species_distributions.py +273 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/datasets/_svmlight_format_fast.cpython-310-x86_64-linux-gnu.so +0 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/datasets/_svmlight_format_io.py +584 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/datasets/_twenty_newsgroups.py +561 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/datasets/descr/breast_cancer.rst +122 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/datasets/descr/california_housing.rst +46 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/datasets/descr/covtype.rst +30 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/datasets/descr/iris.rst +67 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/datasets/descr/kddcup99.rst +94 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/datasets/descr/lfw.rst +128 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/datasets/descr/rcv1.rst +72 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/datasets/descr/twenty_newsgroups.rst +264 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/__init__.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/metadata_routing_common.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/random_seed.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_base.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_build.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_calibration.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_check_build.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_common.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_config.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_discriminant_analysis.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_docstring_parameters.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_docstrings.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_dummy.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_init.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_isotonic.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_kernel_approximation.cpython-310.pyc +0 -0
env-llmeval/lib/python3.10/site-packages/sklearn/datasets/__init__.py
ADDED
@@ -0,0 +1,162 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
The :mod:`sklearn.datasets` module includes utilities to load datasets,
|
3 |
+
including methods to load and fetch popular reference datasets. It also
|
4 |
+
features some artificial data generators.
|
5 |
+
"""
|
6 |
+
import textwrap
|
7 |
+
|
8 |
+
from ._base import (
|
9 |
+
clear_data_home,
|
10 |
+
get_data_home,
|
11 |
+
load_breast_cancer,
|
12 |
+
load_diabetes,
|
13 |
+
load_digits,
|
14 |
+
load_files,
|
15 |
+
load_iris,
|
16 |
+
load_linnerud,
|
17 |
+
load_sample_image,
|
18 |
+
load_sample_images,
|
19 |
+
load_wine,
|
20 |
+
)
|
21 |
+
from ._california_housing import fetch_california_housing
|
22 |
+
from ._covtype import fetch_covtype
|
23 |
+
from ._kddcup99 import fetch_kddcup99
|
24 |
+
from ._lfw import fetch_lfw_pairs, fetch_lfw_people
|
25 |
+
from ._olivetti_faces import fetch_olivetti_faces
|
26 |
+
from ._openml import fetch_openml
|
27 |
+
from ._rcv1 import fetch_rcv1
|
28 |
+
from ._samples_generator import (
|
29 |
+
make_biclusters,
|
30 |
+
make_blobs,
|
31 |
+
make_checkerboard,
|
32 |
+
make_circles,
|
33 |
+
make_classification,
|
34 |
+
make_friedman1,
|
35 |
+
make_friedman2,
|
36 |
+
make_friedman3,
|
37 |
+
make_gaussian_quantiles,
|
38 |
+
make_hastie_10_2,
|
39 |
+
make_low_rank_matrix,
|
40 |
+
make_moons,
|
41 |
+
make_multilabel_classification,
|
42 |
+
make_regression,
|
43 |
+
make_s_curve,
|
44 |
+
make_sparse_coded_signal,
|
45 |
+
make_sparse_spd_matrix,
|
46 |
+
make_sparse_uncorrelated,
|
47 |
+
make_spd_matrix,
|
48 |
+
make_swiss_roll,
|
49 |
+
)
|
50 |
+
from ._species_distributions import fetch_species_distributions
|
51 |
+
from ._svmlight_format_io import (
|
52 |
+
dump_svmlight_file,
|
53 |
+
load_svmlight_file,
|
54 |
+
load_svmlight_files,
|
55 |
+
)
|
56 |
+
from ._twenty_newsgroups import fetch_20newsgroups, fetch_20newsgroups_vectorized
|
57 |
+
|
58 |
+
__all__ = [
|
59 |
+
"clear_data_home",
|
60 |
+
"dump_svmlight_file",
|
61 |
+
"fetch_20newsgroups",
|
62 |
+
"fetch_20newsgroups_vectorized",
|
63 |
+
"fetch_lfw_pairs",
|
64 |
+
"fetch_lfw_people",
|
65 |
+
"fetch_olivetti_faces",
|
66 |
+
"fetch_species_distributions",
|
67 |
+
"fetch_california_housing",
|
68 |
+
"fetch_covtype",
|
69 |
+
"fetch_rcv1",
|
70 |
+
"fetch_kddcup99",
|
71 |
+
"fetch_openml",
|
72 |
+
"get_data_home",
|
73 |
+
"load_diabetes",
|
74 |
+
"load_digits",
|
75 |
+
"load_files",
|
76 |
+
"load_iris",
|
77 |
+
"load_breast_cancer",
|
78 |
+
"load_linnerud",
|
79 |
+
"load_sample_image",
|
80 |
+
"load_sample_images",
|
81 |
+
"load_svmlight_file",
|
82 |
+
"load_svmlight_files",
|
83 |
+
"load_wine",
|
84 |
+
"make_biclusters",
|
85 |
+
"make_blobs",
|
86 |
+
"make_circles",
|
87 |
+
"make_classification",
|
88 |
+
"make_checkerboard",
|
89 |
+
"make_friedman1",
|
90 |
+
"make_friedman2",
|
91 |
+
"make_friedman3",
|
92 |
+
"make_gaussian_quantiles",
|
93 |
+
"make_hastie_10_2",
|
94 |
+
"make_low_rank_matrix",
|
95 |
+
"make_moons",
|
96 |
+
"make_multilabel_classification",
|
97 |
+
"make_regression",
|
98 |
+
"make_s_curve",
|
99 |
+
"make_sparse_coded_signal",
|
100 |
+
"make_sparse_spd_matrix",
|
101 |
+
"make_sparse_uncorrelated",
|
102 |
+
"make_spd_matrix",
|
103 |
+
"make_swiss_roll",
|
104 |
+
]
|
105 |
+
|
106 |
+
|
107 |
+
def __getattr__(name):
|
108 |
+
if name == "load_boston":
|
109 |
+
msg = textwrap.dedent("""
|
110 |
+
`load_boston` has been removed from scikit-learn since version 1.2.
|
111 |
+
|
112 |
+
The Boston housing prices dataset has an ethical problem: as
|
113 |
+
investigated in [1], the authors of this dataset engineered a
|
114 |
+
non-invertible variable "B" assuming that racial self-segregation had a
|
115 |
+
positive impact on house prices [2]. Furthermore the goal of the
|
116 |
+
research that led to the creation of this dataset was to study the
|
117 |
+
impact of air quality but it did not give adequate demonstration of the
|
118 |
+
validity of this assumption.
|
119 |
+
|
120 |
+
The scikit-learn maintainers therefore strongly discourage the use of
|
121 |
+
this dataset unless the purpose of the code is to study and educate
|
122 |
+
about ethical issues in data science and machine learning.
|
123 |
+
|
124 |
+
In this special case, you can fetch the dataset from the original
|
125 |
+
source::
|
126 |
+
|
127 |
+
import pandas as pd
|
128 |
+
import numpy as np
|
129 |
+
|
130 |
+
data_url = "http://lib.stat.cmu.edu/datasets/boston"
|
131 |
+
raw_df = pd.read_csv(data_url, sep="\\s+", skiprows=22, header=None)
|
132 |
+
data = np.hstack([raw_df.values[::2, :], raw_df.values[1::2, :2]])
|
133 |
+
target = raw_df.values[1::2, 2]
|
134 |
+
|
135 |
+
Alternative datasets include the California housing dataset and the
|
136 |
+
Ames housing dataset. You can load the datasets as follows::
|
137 |
+
|
138 |
+
from sklearn.datasets import fetch_california_housing
|
139 |
+
housing = fetch_california_housing()
|
140 |
+
|
141 |
+
for the California housing dataset and::
|
142 |
+
|
143 |
+
from sklearn.datasets import fetch_openml
|
144 |
+
housing = fetch_openml(name="house_prices", as_frame=True)
|
145 |
+
|
146 |
+
for the Ames housing dataset.
|
147 |
+
|
148 |
+
[1] M Carlisle.
|
149 |
+
"Racist data destruction?"
|
150 |
+
<https://medium.com/@docintangible/racist-data-destruction-113e3eff54a8>
|
151 |
+
|
152 |
+
[2] Harrison Jr, David, and Daniel L. Rubinfeld.
|
153 |
+
"Hedonic housing prices and the demand for clean air."
|
154 |
+
Journal of environmental economics and management 5.1 (1978): 81-102.
|
155 |
+
<https://www.researchgate.net/publication/4974606_Hedonic_housing_prices_and_the_demand_for_clean_air>
|
156 |
+
""")
|
157 |
+
raise ImportError(msg)
|
158 |
+
try:
|
159 |
+
return globals()[name]
|
160 |
+
except KeyError:
|
161 |
+
# This is turned into the appropriate ImportError
|
162 |
+
raise AttributeError
|
env-llmeval/lib/python3.10/site-packages/sklearn/datasets/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (4.75 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/sklearn/datasets/__pycache__/_arff_parser.cpython-310.pyc
ADDED
Binary file (14.2 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/sklearn/datasets/__pycache__/_base.cpython-310.pyc
ADDED
Binary file (42 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/sklearn/datasets/__pycache__/_california_housing.cpython-310.pyc
ADDED
Binary file (5.94 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/sklearn/datasets/__pycache__/_covtype.cpython-310.pyc
ADDED
Binary file (6.58 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/sklearn/datasets/__pycache__/_kddcup99.cpython-310.pyc
ADDED
Binary file (10.9 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/sklearn/datasets/__pycache__/_openml.cpython-310.pyc
ADDED
Binary file (33 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/sklearn/datasets/__pycache__/_rcv1.cpython-310.pyc
ADDED
Binary file (8.09 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/sklearn/datasets/__pycache__/_species_distributions.cpython-310.pyc
ADDED
Binary file (8.55 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/sklearn/datasets/__pycache__/_svmlight_format_io.cpython-310.pyc
ADDED
Binary file (17.5 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/sklearn/datasets/__pycache__/_twenty_newsgroups.cpython-310.pyc
ADDED
Binary file (16.4 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/sklearn/datasets/_arff_parser.py
ADDED
@@ -0,0 +1,542 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Implementation of ARFF parsers: via LIAC-ARFF and pandas."""
|
2 |
+
import itertools
|
3 |
+
import re
|
4 |
+
from collections import OrderedDict
|
5 |
+
from collections.abc import Generator
|
6 |
+
from typing import List
|
7 |
+
|
8 |
+
import numpy as np
|
9 |
+
import scipy as sp
|
10 |
+
|
11 |
+
from ..externals import _arff
|
12 |
+
from ..externals._arff import ArffSparseDataType
|
13 |
+
from ..utils import (
|
14 |
+
_chunk_generator,
|
15 |
+
check_pandas_support,
|
16 |
+
get_chunk_n_rows,
|
17 |
+
)
|
18 |
+
from ..utils.fixes import pd_fillna
|
19 |
+
|
20 |
+
|
21 |
+
def _split_sparse_columns(
|
22 |
+
arff_data: ArffSparseDataType, include_columns: List
|
23 |
+
) -> ArffSparseDataType:
|
24 |
+
"""Obtains several columns from sparse ARFF representation. Additionally,
|
25 |
+
the column indices are re-labelled, given the columns that are not
|
26 |
+
included. (e.g., when including [1, 2, 3], the columns will be relabelled
|
27 |
+
to [0, 1, 2]).
|
28 |
+
|
29 |
+
Parameters
|
30 |
+
----------
|
31 |
+
arff_data : tuple
|
32 |
+
A tuple of three lists of equal size; first list indicating the value,
|
33 |
+
second the x coordinate and the third the y coordinate.
|
34 |
+
|
35 |
+
include_columns : list
|
36 |
+
A list of columns to include.
|
37 |
+
|
38 |
+
Returns
|
39 |
+
-------
|
40 |
+
arff_data_new : tuple
|
41 |
+
Subset of arff data with only the include columns indicated by the
|
42 |
+
include_columns argument.
|
43 |
+
"""
|
44 |
+
arff_data_new: ArffSparseDataType = (list(), list(), list())
|
45 |
+
reindexed_columns = {
|
46 |
+
column_idx: array_idx for array_idx, column_idx in enumerate(include_columns)
|
47 |
+
}
|
48 |
+
for val, row_idx, col_idx in zip(arff_data[0], arff_data[1], arff_data[2]):
|
49 |
+
if col_idx in include_columns:
|
50 |
+
arff_data_new[0].append(val)
|
51 |
+
arff_data_new[1].append(row_idx)
|
52 |
+
arff_data_new[2].append(reindexed_columns[col_idx])
|
53 |
+
return arff_data_new
|
54 |
+
|
55 |
+
|
56 |
+
def _sparse_data_to_array(
|
57 |
+
arff_data: ArffSparseDataType, include_columns: List
|
58 |
+
) -> np.ndarray:
|
59 |
+
# turns the sparse data back into an array (can't use toarray() function,
|
60 |
+
# as this does only work on numeric data)
|
61 |
+
num_obs = max(arff_data[1]) + 1
|
62 |
+
y_shape = (num_obs, len(include_columns))
|
63 |
+
reindexed_columns = {
|
64 |
+
column_idx: array_idx for array_idx, column_idx in enumerate(include_columns)
|
65 |
+
}
|
66 |
+
# TODO: improve for efficiency
|
67 |
+
y = np.empty(y_shape, dtype=np.float64)
|
68 |
+
for val, row_idx, col_idx in zip(arff_data[0], arff_data[1], arff_data[2]):
|
69 |
+
if col_idx in include_columns:
|
70 |
+
y[row_idx, reindexed_columns[col_idx]] = val
|
71 |
+
return y
|
72 |
+
|
73 |
+
|
74 |
+
def _post_process_frame(frame, feature_names, target_names):
|
75 |
+
"""Post process a dataframe to select the desired columns in `X` and `y`.
|
76 |
+
|
77 |
+
Parameters
|
78 |
+
----------
|
79 |
+
frame : dataframe
|
80 |
+
The dataframe to split into `X` and `y`.
|
81 |
+
|
82 |
+
feature_names : list of str
|
83 |
+
The list of feature names to populate `X`.
|
84 |
+
|
85 |
+
target_names : list of str
|
86 |
+
The list of target names to populate `y`.
|
87 |
+
|
88 |
+
Returns
|
89 |
+
-------
|
90 |
+
X : dataframe
|
91 |
+
The dataframe containing the features.
|
92 |
+
|
93 |
+
y : {series, dataframe} or None
|
94 |
+
The series or dataframe containing the target.
|
95 |
+
"""
|
96 |
+
X = frame[feature_names]
|
97 |
+
if len(target_names) >= 2:
|
98 |
+
y = frame[target_names]
|
99 |
+
elif len(target_names) == 1:
|
100 |
+
y = frame[target_names[0]]
|
101 |
+
else:
|
102 |
+
y = None
|
103 |
+
return X, y
|
104 |
+
|
105 |
+
|
106 |
+
def _liac_arff_parser(
|
107 |
+
gzip_file,
|
108 |
+
output_arrays_type,
|
109 |
+
openml_columns_info,
|
110 |
+
feature_names_to_select,
|
111 |
+
target_names_to_select,
|
112 |
+
shape=None,
|
113 |
+
):
|
114 |
+
"""ARFF parser using the LIAC-ARFF library coded purely in Python.
|
115 |
+
|
116 |
+
This parser is quite slow but consumes a generator. Currently it is needed
|
117 |
+
to parse sparse datasets. For dense datasets, it is recommended to instead
|
118 |
+
use the pandas-based parser, although it does not always handles the
|
119 |
+
dtypes exactly the same.
|
120 |
+
|
121 |
+
Parameters
|
122 |
+
----------
|
123 |
+
gzip_file : GzipFile instance
|
124 |
+
The file compressed to be read.
|
125 |
+
|
126 |
+
output_arrays_type : {"numpy", "sparse", "pandas"}
|
127 |
+
The type of the arrays that will be returned. The possibilities ara:
|
128 |
+
|
129 |
+
- `"numpy"`: both `X` and `y` will be NumPy arrays;
|
130 |
+
- `"sparse"`: `X` will be sparse matrix and `y` will be a NumPy array;
|
131 |
+
- `"pandas"`: `X` will be a pandas DataFrame and `y` will be either a
|
132 |
+
pandas Series or DataFrame.
|
133 |
+
|
134 |
+
columns_info : dict
|
135 |
+
The information provided by OpenML regarding the columns of the ARFF
|
136 |
+
file.
|
137 |
+
|
138 |
+
feature_names_to_select : list of str
|
139 |
+
A list of the feature names to be selected.
|
140 |
+
|
141 |
+
target_names_to_select : list of str
|
142 |
+
A list of the target names to be selected.
|
143 |
+
|
144 |
+
Returns
|
145 |
+
-------
|
146 |
+
X : {ndarray, sparse matrix, dataframe}
|
147 |
+
The data matrix.
|
148 |
+
|
149 |
+
y : {ndarray, dataframe, series}
|
150 |
+
The target.
|
151 |
+
|
152 |
+
frame : dataframe or None
|
153 |
+
A dataframe containing both `X` and `y`. `None` if
|
154 |
+
`output_array_type != "pandas"`.
|
155 |
+
|
156 |
+
categories : list of str or None
|
157 |
+
The names of the features that are categorical. `None` if
|
158 |
+
`output_array_type == "pandas"`.
|
159 |
+
"""
|
160 |
+
|
161 |
+
def _io_to_generator(gzip_file):
|
162 |
+
for line in gzip_file:
|
163 |
+
yield line.decode("utf-8")
|
164 |
+
|
165 |
+
stream = _io_to_generator(gzip_file)
|
166 |
+
|
167 |
+
# find which type (dense or sparse) ARFF type we will have to deal with
|
168 |
+
return_type = _arff.COO if output_arrays_type == "sparse" else _arff.DENSE_GEN
|
169 |
+
# we should not let LIAC-ARFF to encode the nominal attributes with NumPy
|
170 |
+
# arrays to have only numerical values.
|
171 |
+
encode_nominal = not (output_arrays_type == "pandas")
|
172 |
+
arff_container = _arff.load(
|
173 |
+
stream, return_type=return_type, encode_nominal=encode_nominal
|
174 |
+
)
|
175 |
+
columns_to_select = feature_names_to_select + target_names_to_select
|
176 |
+
|
177 |
+
categories = {
|
178 |
+
name: cat
|
179 |
+
for name, cat in arff_container["attributes"]
|
180 |
+
if isinstance(cat, list) and name in columns_to_select
|
181 |
+
}
|
182 |
+
if output_arrays_type == "pandas":
|
183 |
+
pd = check_pandas_support("fetch_openml with as_frame=True")
|
184 |
+
|
185 |
+
columns_info = OrderedDict(arff_container["attributes"])
|
186 |
+
columns_names = list(columns_info.keys())
|
187 |
+
|
188 |
+
# calculate chunksize
|
189 |
+
first_row = next(arff_container["data"])
|
190 |
+
first_df = pd.DataFrame([first_row], columns=columns_names, copy=False)
|
191 |
+
|
192 |
+
row_bytes = first_df.memory_usage(deep=True).sum()
|
193 |
+
chunksize = get_chunk_n_rows(row_bytes)
|
194 |
+
|
195 |
+
# read arff data with chunks
|
196 |
+
columns_to_keep = [col for col in columns_names if col in columns_to_select]
|
197 |
+
dfs = [first_df[columns_to_keep]]
|
198 |
+
for data in _chunk_generator(arff_container["data"], chunksize):
|
199 |
+
dfs.append(
|
200 |
+
pd.DataFrame(data, columns=columns_names, copy=False)[columns_to_keep]
|
201 |
+
)
|
202 |
+
# dfs[0] contains only one row, which may not have enough data to infer to
|
203 |
+
# column's dtype. Here we use `dfs[1]` to configure the dtype in dfs[0]
|
204 |
+
if len(dfs) >= 2:
|
205 |
+
dfs[0] = dfs[0].astype(dfs[1].dtypes)
|
206 |
+
|
207 |
+
# liac-arff parser does not depend on NumPy and uses None to represent
|
208 |
+
# missing values. To be consistent with the pandas parser, we replace
|
209 |
+
# None with np.nan.
|
210 |
+
frame = pd.concat(dfs, ignore_index=True)
|
211 |
+
frame = pd_fillna(pd, frame)
|
212 |
+
del dfs, first_df
|
213 |
+
|
214 |
+
# cast the columns frame
|
215 |
+
dtypes = {}
|
216 |
+
for name in frame.columns:
|
217 |
+
column_dtype = openml_columns_info[name]["data_type"]
|
218 |
+
if column_dtype.lower() == "integer":
|
219 |
+
# Use a pandas extension array instead of np.int64 to be able
|
220 |
+
# to support missing values.
|
221 |
+
dtypes[name] = "Int64"
|
222 |
+
elif column_dtype.lower() == "nominal":
|
223 |
+
dtypes[name] = "category"
|
224 |
+
else:
|
225 |
+
dtypes[name] = frame.dtypes[name]
|
226 |
+
frame = frame.astype(dtypes)
|
227 |
+
|
228 |
+
X, y = _post_process_frame(
|
229 |
+
frame, feature_names_to_select, target_names_to_select
|
230 |
+
)
|
231 |
+
else:
|
232 |
+
arff_data = arff_container["data"]
|
233 |
+
|
234 |
+
feature_indices_to_select = [
|
235 |
+
int(openml_columns_info[col_name]["index"])
|
236 |
+
for col_name in feature_names_to_select
|
237 |
+
]
|
238 |
+
target_indices_to_select = [
|
239 |
+
int(openml_columns_info[col_name]["index"])
|
240 |
+
for col_name in target_names_to_select
|
241 |
+
]
|
242 |
+
|
243 |
+
if isinstance(arff_data, Generator):
|
244 |
+
if shape is None:
|
245 |
+
raise ValueError(
|
246 |
+
"shape must be provided when arr['data'] is a Generator"
|
247 |
+
)
|
248 |
+
if shape[0] == -1:
|
249 |
+
count = -1
|
250 |
+
else:
|
251 |
+
count = shape[0] * shape[1]
|
252 |
+
data = np.fromiter(
|
253 |
+
itertools.chain.from_iterable(arff_data),
|
254 |
+
dtype="float64",
|
255 |
+
count=count,
|
256 |
+
)
|
257 |
+
data = data.reshape(*shape)
|
258 |
+
X = data[:, feature_indices_to_select]
|
259 |
+
y = data[:, target_indices_to_select]
|
260 |
+
elif isinstance(arff_data, tuple):
|
261 |
+
arff_data_X = _split_sparse_columns(arff_data, feature_indices_to_select)
|
262 |
+
num_obs = max(arff_data[1]) + 1
|
263 |
+
X_shape = (num_obs, len(feature_indices_to_select))
|
264 |
+
X = sp.sparse.coo_matrix(
|
265 |
+
(arff_data_X[0], (arff_data_X[1], arff_data_X[2])),
|
266 |
+
shape=X_shape,
|
267 |
+
dtype=np.float64,
|
268 |
+
)
|
269 |
+
X = X.tocsr()
|
270 |
+
y = _sparse_data_to_array(arff_data, target_indices_to_select)
|
271 |
+
else:
|
272 |
+
# This should never happen
|
273 |
+
raise ValueError(
|
274 |
+
f"Unexpected type for data obtained from arff: {type(arff_data)}"
|
275 |
+
)
|
276 |
+
|
277 |
+
is_classification = {
|
278 |
+
col_name in categories for col_name in target_names_to_select
|
279 |
+
}
|
280 |
+
if not is_classification:
|
281 |
+
# No target
|
282 |
+
pass
|
283 |
+
elif all(is_classification):
|
284 |
+
y = np.hstack(
|
285 |
+
[
|
286 |
+
np.take(
|
287 |
+
np.asarray(categories.pop(col_name), dtype="O"),
|
288 |
+
y[:, i : i + 1].astype(int, copy=False),
|
289 |
+
)
|
290 |
+
for i, col_name in enumerate(target_names_to_select)
|
291 |
+
]
|
292 |
+
)
|
293 |
+
elif any(is_classification):
|
294 |
+
raise ValueError(
|
295 |
+
"Mix of nominal and non-nominal targets is not currently supported"
|
296 |
+
)
|
297 |
+
|
298 |
+
# reshape y back to 1-D array, if there is only 1 target column;
|
299 |
+
# back to None if there are not target columns
|
300 |
+
if y.shape[1] == 1:
|
301 |
+
y = y.reshape((-1,))
|
302 |
+
elif y.shape[1] == 0:
|
303 |
+
y = None
|
304 |
+
|
305 |
+
if output_arrays_type == "pandas":
|
306 |
+
return X, y, frame, None
|
307 |
+
return X, y, None, categories
|
308 |
+
|
309 |
+
|
310 |
+
def _pandas_arff_parser(
|
311 |
+
gzip_file,
|
312 |
+
output_arrays_type,
|
313 |
+
openml_columns_info,
|
314 |
+
feature_names_to_select,
|
315 |
+
target_names_to_select,
|
316 |
+
read_csv_kwargs=None,
|
317 |
+
):
|
318 |
+
"""ARFF parser using `pandas.read_csv`.
|
319 |
+
|
320 |
+
This parser uses the metadata fetched directly from OpenML and skips the metadata
|
321 |
+
headers of ARFF file itself. The data is loaded as a CSV file.
|
322 |
+
|
323 |
+
Parameters
|
324 |
+
----------
|
325 |
+
gzip_file : GzipFile instance
|
326 |
+
The GZip compressed file with the ARFF formatted payload.
|
327 |
+
|
328 |
+
output_arrays_type : {"numpy", "sparse", "pandas"}
|
329 |
+
The type of the arrays that will be returned. The possibilities are:
|
330 |
+
|
331 |
+
- `"numpy"`: both `X` and `y` will be NumPy arrays;
|
332 |
+
- `"sparse"`: `X` will be sparse matrix and `y` will be a NumPy array;
|
333 |
+
- `"pandas"`: `X` will be a pandas DataFrame and `y` will be either a
|
334 |
+
pandas Series or DataFrame.
|
335 |
+
|
336 |
+
openml_columns_info : dict
|
337 |
+
The information provided by OpenML regarding the columns of the ARFF
|
338 |
+
file.
|
339 |
+
|
340 |
+
feature_names_to_select : list of str
|
341 |
+
A list of the feature names to be selected to build `X`.
|
342 |
+
|
343 |
+
target_names_to_select : list of str
|
344 |
+
A list of the target names to be selected to build `y`.
|
345 |
+
|
346 |
+
read_csv_kwargs : dict, default=None
|
347 |
+
Keyword arguments to pass to `pandas.read_csv`. It allows to overwrite
|
348 |
+
the default options.
|
349 |
+
|
350 |
+
Returns
|
351 |
+
-------
|
352 |
+
X : {ndarray, sparse matrix, dataframe}
|
353 |
+
The data matrix.
|
354 |
+
|
355 |
+
y : {ndarray, dataframe, series}
|
356 |
+
The target.
|
357 |
+
|
358 |
+
frame : dataframe or None
|
359 |
+
A dataframe containing both `X` and `y`. `None` if
|
360 |
+
`output_array_type != "pandas"`.
|
361 |
+
|
362 |
+
categories : list of str or None
|
363 |
+
The names of the features that are categorical. `None` if
|
364 |
+
`output_array_type == "pandas"`.
|
365 |
+
"""
|
366 |
+
import pandas as pd
|
367 |
+
|
368 |
+
# read the file until the data section to skip the ARFF metadata headers
|
369 |
+
for line in gzip_file:
|
370 |
+
if line.decode("utf-8").lower().startswith("@data"):
|
371 |
+
break
|
372 |
+
|
373 |
+
dtypes = {}
|
374 |
+
for name in openml_columns_info:
|
375 |
+
column_dtype = openml_columns_info[name]["data_type"]
|
376 |
+
if column_dtype.lower() == "integer":
|
377 |
+
# Use Int64 to infer missing values from data
|
378 |
+
# XXX: this line is not covered by our tests. Is this really needed?
|
379 |
+
dtypes[name] = "Int64"
|
380 |
+
elif column_dtype.lower() == "nominal":
|
381 |
+
dtypes[name] = "category"
|
382 |
+
# since we will not pass `names` when reading the ARFF file, we need to translate
|
383 |
+
# `dtypes` from column names to column indices to pass to `pandas.read_csv`
|
384 |
+
dtypes_positional = {
|
385 |
+
col_idx: dtypes[name]
|
386 |
+
for col_idx, name in enumerate(openml_columns_info)
|
387 |
+
if name in dtypes
|
388 |
+
}
|
389 |
+
|
390 |
+
default_read_csv_kwargs = {
|
391 |
+
"header": None,
|
392 |
+
"index_col": False, # always force pandas to not use the first column as index
|
393 |
+
"na_values": ["?"], # missing values are represented by `?`
|
394 |
+
"keep_default_na": False, # only `?` is a missing value given the ARFF specs
|
395 |
+
"comment": "%", # skip line starting by `%` since they are comments
|
396 |
+
"quotechar": '"', # delimiter to use for quoted strings
|
397 |
+
"skipinitialspace": True, # skip spaces after delimiter to follow ARFF specs
|
398 |
+
"escapechar": "\\",
|
399 |
+
"dtype": dtypes_positional,
|
400 |
+
}
|
401 |
+
read_csv_kwargs = {**default_read_csv_kwargs, **(read_csv_kwargs or {})}
|
402 |
+
frame = pd.read_csv(gzip_file, **read_csv_kwargs)
|
403 |
+
try:
|
404 |
+
# Setting the columns while reading the file will select the N first columns
|
405 |
+
# and not raise a ParserError. Instead, we set the columns after reading the
|
406 |
+
# file and raise a ParserError if the number of columns does not match the
|
407 |
+
# number of columns in the metadata given by OpenML.
|
408 |
+
frame.columns = [name for name in openml_columns_info]
|
409 |
+
except ValueError as exc:
|
410 |
+
raise pd.errors.ParserError(
|
411 |
+
"The number of columns provided by OpenML does not match the number of "
|
412 |
+
"columns inferred by pandas when reading the file."
|
413 |
+
) from exc
|
414 |
+
|
415 |
+
columns_to_select = feature_names_to_select + target_names_to_select
|
416 |
+
columns_to_keep = [col for col in frame.columns if col in columns_to_select]
|
417 |
+
frame = frame[columns_to_keep]
|
418 |
+
|
419 |
+
# `pd.read_csv` automatically handles double quotes for quoting non-numeric
|
420 |
+
# CSV cell values. Contrary to LIAC-ARFF, `pd.read_csv` cannot be configured to
|
421 |
+
# consider either single quotes and double quotes as valid quoting chars at
|
422 |
+
# the same time since this case does not occur in regular (non-ARFF) CSV files.
|
423 |
+
# To mimic the behavior of LIAC-ARFF parser, we manually strip single quotes
|
424 |
+
# on categories as a post-processing steps if needed.
|
425 |
+
#
|
426 |
+
# Note however that we intentionally do not attempt to do this kind of manual
|
427 |
+
# post-processing of (non-categorical) string-typed columns because we cannot
|
428 |
+
# resolve the ambiguity of the case of CSV cell with nesting quoting such as
|
429 |
+
# `"'some string value'"` with pandas.
|
430 |
+
single_quote_pattern = re.compile(r"^'(?P<contents>.*)'$")
|
431 |
+
|
432 |
+
def strip_single_quotes(input_string):
|
433 |
+
match = re.search(single_quote_pattern, input_string)
|
434 |
+
if match is None:
|
435 |
+
return input_string
|
436 |
+
|
437 |
+
return match.group("contents")
|
438 |
+
|
439 |
+
categorical_columns = [
|
440 |
+
name
|
441 |
+
for name, dtype in frame.dtypes.items()
|
442 |
+
if isinstance(dtype, pd.CategoricalDtype)
|
443 |
+
]
|
444 |
+
for col in categorical_columns:
|
445 |
+
frame[col] = frame[col].cat.rename_categories(strip_single_quotes)
|
446 |
+
|
447 |
+
X, y = _post_process_frame(frame, feature_names_to_select, target_names_to_select)
|
448 |
+
|
449 |
+
if output_arrays_type == "pandas":
|
450 |
+
return X, y, frame, None
|
451 |
+
else:
|
452 |
+
X, y = X.to_numpy(), y.to_numpy()
|
453 |
+
|
454 |
+
categories = {
|
455 |
+
name: dtype.categories.tolist()
|
456 |
+
for name, dtype in frame.dtypes.items()
|
457 |
+
if isinstance(dtype, pd.CategoricalDtype)
|
458 |
+
}
|
459 |
+
return X, y, None, categories
|
460 |
+
|
461 |
+
|
462 |
+
def load_arff_from_gzip_file(
|
463 |
+
gzip_file,
|
464 |
+
parser,
|
465 |
+
output_type,
|
466 |
+
openml_columns_info,
|
467 |
+
feature_names_to_select,
|
468 |
+
target_names_to_select,
|
469 |
+
shape=None,
|
470 |
+
read_csv_kwargs=None,
|
471 |
+
):
|
472 |
+
"""Load a compressed ARFF file using a given parser.
|
473 |
+
|
474 |
+
Parameters
|
475 |
+
----------
|
476 |
+
gzip_file : GzipFile instance
|
477 |
+
The file compressed to be read.
|
478 |
+
|
479 |
+
parser : {"pandas", "liac-arff"}
|
480 |
+
The parser used to parse the ARFF file. "pandas" is recommended
|
481 |
+
but only supports loading dense datasets.
|
482 |
+
|
483 |
+
output_type : {"numpy", "sparse", "pandas"}
|
484 |
+
The type of the arrays that will be returned. The possibilities ara:
|
485 |
+
|
486 |
+
- `"numpy"`: both `X` and `y` will be NumPy arrays;
|
487 |
+
- `"sparse"`: `X` will be sparse matrix and `y` will be a NumPy array;
|
488 |
+
- `"pandas"`: `X` will be a pandas DataFrame and `y` will be either a
|
489 |
+
pandas Series or DataFrame.
|
490 |
+
|
491 |
+
openml_columns_info : dict
|
492 |
+
The information provided by OpenML regarding the columns of the ARFF
|
493 |
+
file.
|
494 |
+
|
495 |
+
feature_names_to_select : list of str
|
496 |
+
A list of the feature names to be selected.
|
497 |
+
|
498 |
+
target_names_to_select : list of str
|
499 |
+
A list of the target names to be selected.
|
500 |
+
|
501 |
+
read_csv_kwargs : dict, default=None
|
502 |
+
Keyword arguments to pass to `pandas.read_csv`. It allows to overwrite
|
503 |
+
the default options.
|
504 |
+
|
505 |
+
Returns
|
506 |
+
-------
|
507 |
+
X : {ndarray, sparse matrix, dataframe}
|
508 |
+
The data matrix.
|
509 |
+
|
510 |
+
y : {ndarray, dataframe, series}
|
511 |
+
The target.
|
512 |
+
|
513 |
+
frame : dataframe or None
|
514 |
+
A dataframe containing both `X` and `y`. `None` if
|
515 |
+
`output_array_type != "pandas"`.
|
516 |
+
|
517 |
+
categories : list of str or None
|
518 |
+
The names of the features that are categorical. `None` if
|
519 |
+
`output_array_type == "pandas"`.
|
520 |
+
"""
|
521 |
+
if parser == "liac-arff":
|
522 |
+
return _liac_arff_parser(
|
523 |
+
gzip_file,
|
524 |
+
output_type,
|
525 |
+
openml_columns_info,
|
526 |
+
feature_names_to_select,
|
527 |
+
target_names_to_select,
|
528 |
+
shape,
|
529 |
+
)
|
530 |
+
elif parser == "pandas":
|
531 |
+
return _pandas_arff_parser(
|
532 |
+
gzip_file,
|
533 |
+
output_type,
|
534 |
+
openml_columns_info,
|
535 |
+
feature_names_to_select,
|
536 |
+
target_names_to_select,
|
537 |
+
read_csv_kwargs,
|
538 |
+
)
|
539 |
+
else:
|
540 |
+
raise ValueError(
|
541 |
+
f"Unknown parser: '{parser}'. Should be 'liac-arff' or 'pandas'."
|
542 |
+
)
|
env-llmeval/lib/python3.10/site-packages/sklearn/datasets/_base.py
ADDED
@@ -0,0 +1,1441 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Base IO code for all datasets
|
3 |
+
"""
|
4 |
+
|
5 |
+
# Copyright (c) 2007 David Cournapeau <[email protected]>
|
6 |
+
# 2010 Fabian Pedregosa <[email protected]>
|
7 |
+
# 2010 Olivier Grisel <[email protected]>
|
8 |
+
# License: BSD 3 clause
|
9 |
+
import csv
|
10 |
+
import gzip
|
11 |
+
import hashlib
|
12 |
+
import os
|
13 |
+
import shutil
|
14 |
+
from collections import namedtuple
|
15 |
+
from importlib import resources
|
16 |
+
from numbers import Integral
|
17 |
+
from os import environ, listdir, makedirs
|
18 |
+
from os.path import expanduser, isdir, join, splitext
|
19 |
+
from pathlib import Path
|
20 |
+
from urllib.request import urlretrieve
|
21 |
+
|
22 |
+
import numpy as np
|
23 |
+
|
24 |
+
from ..preprocessing import scale
|
25 |
+
from ..utils import Bunch, check_pandas_support, check_random_state
|
26 |
+
from ..utils._param_validation import Interval, StrOptions, validate_params
|
27 |
+
|
28 |
+
DATA_MODULE = "sklearn.datasets.data"
|
29 |
+
DESCR_MODULE = "sklearn.datasets.descr"
|
30 |
+
IMAGES_MODULE = "sklearn.datasets.images"
|
31 |
+
|
32 |
+
RemoteFileMetadata = namedtuple("RemoteFileMetadata", ["filename", "url", "checksum"])
|
33 |
+
|
34 |
+
|
35 |
+
@validate_params(
|
36 |
+
{
|
37 |
+
"data_home": [str, os.PathLike, None],
|
38 |
+
},
|
39 |
+
prefer_skip_nested_validation=True,
|
40 |
+
)
|
41 |
+
def get_data_home(data_home=None) -> str:
|
42 |
+
"""Return the path of the scikit-learn data directory.
|
43 |
+
|
44 |
+
This folder is used by some large dataset loaders to avoid downloading the
|
45 |
+
data several times.
|
46 |
+
|
47 |
+
By default the data directory is set to a folder named 'scikit_learn_data' in the
|
48 |
+
user home folder.
|
49 |
+
|
50 |
+
Alternatively, it can be set by the 'SCIKIT_LEARN_DATA' environment
|
51 |
+
variable or programmatically by giving an explicit folder path. The '~'
|
52 |
+
symbol is expanded to the user home folder.
|
53 |
+
|
54 |
+
If the folder does not already exist, it is automatically created.
|
55 |
+
|
56 |
+
Parameters
|
57 |
+
----------
|
58 |
+
data_home : str or path-like, default=None
|
59 |
+
The path to scikit-learn data directory. If `None`, the default path
|
60 |
+
is `~/scikit_learn_data`.
|
61 |
+
|
62 |
+
Returns
|
63 |
+
-------
|
64 |
+
data_home: str
|
65 |
+
The path to scikit-learn data directory.
|
66 |
+
"""
|
67 |
+
if data_home is None:
|
68 |
+
data_home = environ.get("SCIKIT_LEARN_DATA", join("~", "scikit_learn_data"))
|
69 |
+
data_home = expanduser(data_home)
|
70 |
+
makedirs(data_home, exist_ok=True)
|
71 |
+
return data_home
|
72 |
+
|
73 |
+
|
74 |
+
@validate_params(
|
75 |
+
{
|
76 |
+
"data_home": [str, os.PathLike, None],
|
77 |
+
},
|
78 |
+
prefer_skip_nested_validation=True,
|
79 |
+
)
|
80 |
+
def clear_data_home(data_home=None):
|
81 |
+
"""Delete all the content of the data home cache.
|
82 |
+
|
83 |
+
Parameters
|
84 |
+
----------
|
85 |
+
data_home : str or path-like, default=None
|
86 |
+
The path to scikit-learn data directory. If `None`, the default path
|
87 |
+
is `~/scikit_learn_data`.
|
88 |
+
|
89 |
+
Examples
|
90 |
+
--------
|
91 |
+
>>> from sklearn.datasets import clear_data_home
|
92 |
+
>>> clear_data_home() # doctest: +SKIP
|
93 |
+
"""
|
94 |
+
data_home = get_data_home(data_home)
|
95 |
+
shutil.rmtree(data_home)
|
96 |
+
|
97 |
+
|
98 |
+
def _convert_data_dataframe(
|
99 |
+
caller_name, data, target, feature_names, target_names, sparse_data=False
|
100 |
+
):
|
101 |
+
pd = check_pandas_support("{} with as_frame=True".format(caller_name))
|
102 |
+
if not sparse_data:
|
103 |
+
data_df = pd.DataFrame(data, columns=feature_names, copy=False)
|
104 |
+
else:
|
105 |
+
data_df = pd.DataFrame.sparse.from_spmatrix(data, columns=feature_names)
|
106 |
+
|
107 |
+
target_df = pd.DataFrame(target, columns=target_names)
|
108 |
+
combined_df = pd.concat([data_df, target_df], axis=1)
|
109 |
+
X = combined_df[feature_names]
|
110 |
+
y = combined_df[target_names]
|
111 |
+
if y.shape[1] == 1:
|
112 |
+
y = y.iloc[:, 0]
|
113 |
+
return combined_df, X, y
|
114 |
+
|
115 |
+
|
116 |
+
@validate_params(
|
117 |
+
{
|
118 |
+
"container_path": [str, os.PathLike],
|
119 |
+
"description": [str, None],
|
120 |
+
"categories": [list, None],
|
121 |
+
"load_content": ["boolean"],
|
122 |
+
"shuffle": ["boolean"],
|
123 |
+
"encoding": [str, None],
|
124 |
+
"decode_error": [StrOptions({"strict", "ignore", "replace"})],
|
125 |
+
"random_state": ["random_state"],
|
126 |
+
"allowed_extensions": [list, None],
|
127 |
+
},
|
128 |
+
prefer_skip_nested_validation=True,
|
129 |
+
)
|
130 |
+
def load_files(
|
131 |
+
container_path,
|
132 |
+
*,
|
133 |
+
description=None,
|
134 |
+
categories=None,
|
135 |
+
load_content=True,
|
136 |
+
shuffle=True,
|
137 |
+
encoding=None,
|
138 |
+
decode_error="strict",
|
139 |
+
random_state=0,
|
140 |
+
allowed_extensions=None,
|
141 |
+
):
|
142 |
+
"""Load text files with categories as subfolder names.
|
143 |
+
|
144 |
+
Individual samples are assumed to be files stored a two levels folder
|
145 |
+
structure such as the following:
|
146 |
+
|
147 |
+
container_folder/
|
148 |
+
category_1_folder/
|
149 |
+
file_1.txt
|
150 |
+
file_2.txt
|
151 |
+
...
|
152 |
+
file_42.txt
|
153 |
+
category_2_folder/
|
154 |
+
file_43.txt
|
155 |
+
file_44.txt
|
156 |
+
...
|
157 |
+
|
158 |
+
The folder names are used as supervised signal label names. The individual
|
159 |
+
file names are not important.
|
160 |
+
|
161 |
+
This function does not try to extract features into a numpy array or scipy
|
162 |
+
sparse matrix. In addition, if load_content is false it does not try to
|
163 |
+
load the files in memory.
|
164 |
+
|
165 |
+
To use text files in a scikit-learn classification or clustering algorithm,
|
166 |
+
you will need to use the :mod:`~sklearn.feature_extraction.text` module to
|
167 |
+
build a feature extraction transformer that suits your problem.
|
168 |
+
|
169 |
+
If you set load_content=True, you should also specify the encoding of the
|
170 |
+
text using the 'encoding' parameter. For many modern text files, 'utf-8'
|
171 |
+
will be the correct encoding. If you leave encoding equal to None, then the
|
172 |
+
content will be made of bytes instead of Unicode, and you will not be able
|
173 |
+
to use most functions in :mod:`~sklearn.feature_extraction.text`.
|
174 |
+
|
175 |
+
Similar feature extractors should be built for other kind of unstructured
|
176 |
+
data input such as images, audio, video, ...
|
177 |
+
|
178 |
+
If you want files with a specific file extension (e.g. `.txt`) then you
|
179 |
+
can pass a list of those file extensions to `allowed_extensions`.
|
180 |
+
|
181 |
+
Read more in the :ref:`User Guide <datasets>`.
|
182 |
+
|
183 |
+
Parameters
|
184 |
+
----------
|
185 |
+
container_path : str
|
186 |
+
Path to the main folder holding one subfolder per category.
|
187 |
+
|
188 |
+
description : str, default=None
|
189 |
+
A paragraph describing the characteristic of the dataset: its source,
|
190 |
+
reference, etc.
|
191 |
+
|
192 |
+
categories : list of str, default=None
|
193 |
+
If None (default), load all the categories. If not None, list of
|
194 |
+
category names to load (other categories ignored).
|
195 |
+
|
196 |
+
load_content : bool, default=True
|
197 |
+
Whether to load or not the content of the different files. If true a
|
198 |
+
'data' attribute containing the text information is present in the data
|
199 |
+
structure returned. If not, a filenames attribute gives the path to the
|
200 |
+
files.
|
201 |
+
|
202 |
+
shuffle : bool, default=True
|
203 |
+
Whether or not to shuffle the data: might be important for models that
|
204 |
+
make the assumption that the samples are independent and identically
|
205 |
+
distributed (i.i.d.), such as stochastic gradient descent.
|
206 |
+
|
207 |
+
encoding : str, default=None
|
208 |
+
If None, do not try to decode the content of the files (e.g. for images
|
209 |
+
or other non-text content). If not None, encoding to use to decode text
|
210 |
+
files to Unicode if load_content is True.
|
211 |
+
|
212 |
+
decode_error : {'strict', 'ignore', 'replace'}, default='strict'
|
213 |
+
Instruction on what to do if a byte sequence is given to analyze that
|
214 |
+
contains characters not of the given `encoding`. Passed as keyword
|
215 |
+
argument 'errors' to bytes.decode.
|
216 |
+
|
217 |
+
random_state : int, RandomState instance or None, default=0
|
218 |
+
Determines random number generation for dataset shuffling. Pass an int
|
219 |
+
for reproducible output across multiple function calls.
|
220 |
+
See :term:`Glossary <random_state>`.
|
221 |
+
|
222 |
+
allowed_extensions : list of str, default=None
|
223 |
+
List of desired file extensions to filter the files to be loaded.
|
224 |
+
|
225 |
+
Returns
|
226 |
+
-------
|
227 |
+
data : :class:`~sklearn.utils.Bunch`
|
228 |
+
Dictionary-like object, with the following attributes.
|
229 |
+
|
230 |
+
data : list of str
|
231 |
+
Only present when `load_content=True`.
|
232 |
+
The raw text data to learn.
|
233 |
+
target : ndarray
|
234 |
+
The target labels (integer index).
|
235 |
+
target_names : list
|
236 |
+
The names of target classes.
|
237 |
+
DESCR : str
|
238 |
+
The full description of the dataset.
|
239 |
+
filenames: ndarray
|
240 |
+
The filenames holding the dataset.
|
241 |
+
|
242 |
+
Examples
|
243 |
+
--------
|
244 |
+
>>> from sklearn.datasets import load_files
|
245 |
+
>>> container_path = "./"
|
246 |
+
>>> load_files(container_path) # doctest: +SKIP
|
247 |
+
"""
|
248 |
+
|
249 |
+
target = []
|
250 |
+
target_names = []
|
251 |
+
filenames = []
|
252 |
+
|
253 |
+
folders = [
|
254 |
+
f for f in sorted(listdir(container_path)) if isdir(join(container_path, f))
|
255 |
+
]
|
256 |
+
|
257 |
+
if categories is not None:
|
258 |
+
folders = [f for f in folders if f in categories]
|
259 |
+
|
260 |
+
if allowed_extensions is not None:
|
261 |
+
allowed_extensions = frozenset(allowed_extensions)
|
262 |
+
|
263 |
+
for label, folder in enumerate(folders):
|
264 |
+
target_names.append(folder)
|
265 |
+
folder_path = join(container_path, folder)
|
266 |
+
files = sorted(listdir(folder_path))
|
267 |
+
if allowed_extensions is not None:
|
268 |
+
documents = [
|
269 |
+
join(folder_path, file)
|
270 |
+
for file in files
|
271 |
+
if os.path.splitext(file)[1] in allowed_extensions
|
272 |
+
]
|
273 |
+
else:
|
274 |
+
documents = [join(folder_path, file) for file in files]
|
275 |
+
target.extend(len(documents) * [label])
|
276 |
+
filenames.extend(documents)
|
277 |
+
|
278 |
+
# convert to array for fancy indexing
|
279 |
+
filenames = np.array(filenames)
|
280 |
+
target = np.array(target)
|
281 |
+
|
282 |
+
if shuffle:
|
283 |
+
random_state = check_random_state(random_state)
|
284 |
+
indices = np.arange(filenames.shape[0])
|
285 |
+
random_state.shuffle(indices)
|
286 |
+
filenames = filenames[indices]
|
287 |
+
target = target[indices]
|
288 |
+
|
289 |
+
if load_content:
|
290 |
+
data = []
|
291 |
+
for filename in filenames:
|
292 |
+
data.append(Path(filename).read_bytes())
|
293 |
+
if encoding is not None:
|
294 |
+
data = [d.decode(encoding, decode_error) for d in data]
|
295 |
+
return Bunch(
|
296 |
+
data=data,
|
297 |
+
filenames=filenames,
|
298 |
+
target_names=target_names,
|
299 |
+
target=target,
|
300 |
+
DESCR=description,
|
301 |
+
)
|
302 |
+
|
303 |
+
return Bunch(
|
304 |
+
filenames=filenames, target_names=target_names, target=target, DESCR=description
|
305 |
+
)
|
306 |
+
|
307 |
+
|
308 |
+
def load_csv_data(
|
309 |
+
data_file_name,
|
310 |
+
*,
|
311 |
+
data_module=DATA_MODULE,
|
312 |
+
descr_file_name=None,
|
313 |
+
descr_module=DESCR_MODULE,
|
314 |
+
encoding="utf-8",
|
315 |
+
):
|
316 |
+
"""Loads `data_file_name` from `data_module with `importlib.resources`.
|
317 |
+
|
318 |
+
Parameters
|
319 |
+
----------
|
320 |
+
data_file_name : str
|
321 |
+
Name of csv file to be loaded from `data_module/data_file_name`.
|
322 |
+
For example `'wine_data.csv'`.
|
323 |
+
|
324 |
+
data_module : str or module, default='sklearn.datasets.data'
|
325 |
+
Module where data lives. The default is `'sklearn.datasets.data'`.
|
326 |
+
|
327 |
+
descr_file_name : str, default=None
|
328 |
+
Name of rst file to be loaded from `descr_module/descr_file_name`.
|
329 |
+
For example `'wine_data.rst'`. See also :func:`load_descr`.
|
330 |
+
If not None, also returns the corresponding description of
|
331 |
+
the dataset.
|
332 |
+
|
333 |
+
descr_module : str or module, default='sklearn.datasets.descr'
|
334 |
+
Module where `descr_file_name` lives. See also :func:`load_descr`.
|
335 |
+
The default is `'sklearn.datasets.descr'`.
|
336 |
+
|
337 |
+
Returns
|
338 |
+
-------
|
339 |
+
data : ndarray of shape (n_samples, n_features)
|
340 |
+
A 2D array with each row representing one sample and each column
|
341 |
+
representing the features of a given sample.
|
342 |
+
|
343 |
+
target : ndarry of shape (n_samples,)
|
344 |
+
A 1D array holding target variables for all the samples in `data`.
|
345 |
+
For example target[0] is the target variable for data[0].
|
346 |
+
|
347 |
+
target_names : ndarry of shape (n_samples,)
|
348 |
+
A 1D array containing the names of the classifications. For example
|
349 |
+
target_names[0] is the name of the target[0] class.
|
350 |
+
|
351 |
+
descr : str, optional
|
352 |
+
Description of the dataset (the content of `descr_file_name`).
|
353 |
+
Only returned if `descr_file_name` is not None.
|
354 |
+
|
355 |
+
encoding : str, optional
|
356 |
+
Text encoding of the CSV file.
|
357 |
+
|
358 |
+
.. versionadded:: 1.4
|
359 |
+
"""
|
360 |
+
data_path = resources.files(data_module) / data_file_name
|
361 |
+
with data_path.open("r", encoding="utf-8") as csv_file:
|
362 |
+
data_file = csv.reader(csv_file)
|
363 |
+
temp = next(data_file)
|
364 |
+
n_samples = int(temp[0])
|
365 |
+
n_features = int(temp[1])
|
366 |
+
target_names = np.array(temp[2:])
|
367 |
+
data = np.empty((n_samples, n_features))
|
368 |
+
target = np.empty((n_samples,), dtype=int)
|
369 |
+
|
370 |
+
for i, ir in enumerate(data_file):
|
371 |
+
data[i] = np.asarray(ir[:-1], dtype=np.float64)
|
372 |
+
target[i] = np.asarray(ir[-1], dtype=int)
|
373 |
+
|
374 |
+
if descr_file_name is None:
|
375 |
+
return data, target, target_names
|
376 |
+
else:
|
377 |
+
assert descr_module is not None
|
378 |
+
descr = load_descr(descr_module=descr_module, descr_file_name=descr_file_name)
|
379 |
+
return data, target, target_names, descr
|
380 |
+
|
381 |
+
|
382 |
+
def load_gzip_compressed_csv_data(
|
383 |
+
data_file_name,
|
384 |
+
*,
|
385 |
+
data_module=DATA_MODULE,
|
386 |
+
descr_file_name=None,
|
387 |
+
descr_module=DESCR_MODULE,
|
388 |
+
encoding="utf-8",
|
389 |
+
**kwargs,
|
390 |
+
):
|
391 |
+
"""Loads gzip-compressed with `importlib.resources`.
|
392 |
+
|
393 |
+
1) Open resource file with `importlib.resources.open_binary`
|
394 |
+
2) Decompress file obj with `gzip.open`
|
395 |
+
3) Load decompressed data with `np.loadtxt`
|
396 |
+
|
397 |
+
Parameters
|
398 |
+
----------
|
399 |
+
data_file_name : str
|
400 |
+
Name of gzip-compressed csv file (`'*.csv.gz'`) to be loaded from
|
401 |
+
`data_module/data_file_name`. For example `'diabetes_data.csv.gz'`.
|
402 |
+
|
403 |
+
data_module : str or module, default='sklearn.datasets.data'
|
404 |
+
Module where data lives. The default is `'sklearn.datasets.data'`.
|
405 |
+
|
406 |
+
descr_file_name : str, default=None
|
407 |
+
Name of rst file to be loaded from `descr_module/descr_file_name`.
|
408 |
+
For example `'wine_data.rst'`. See also :func:`load_descr`.
|
409 |
+
If not None, also returns the corresponding description of
|
410 |
+
the dataset.
|
411 |
+
|
412 |
+
descr_module : str or module, default='sklearn.datasets.descr'
|
413 |
+
Module where `descr_file_name` lives. See also :func:`load_descr`.
|
414 |
+
The default is `'sklearn.datasets.descr'`.
|
415 |
+
|
416 |
+
encoding : str, default="utf-8"
|
417 |
+
Name of the encoding that the gzip-decompressed file will be
|
418 |
+
decoded with. The default is 'utf-8'.
|
419 |
+
|
420 |
+
**kwargs : dict, optional
|
421 |
+
Keyword arguments to be passed to `np.loadtxt`;
|
422 |
+
e.g. delimiter=','.
|
423 |
+
|
424 |
+
Returns
|
425 |
+
-------
|
426 |
+
data : ndarray of shape (n_samples, n_features)
|
427 |
+
A 2D array with each row representing one sample and each column
|
428 |
+
representing the features and/or target of a given sample.
|
429 |
+
|
430 |
+
descr : str, optional
|
431 |
+
Description of the dataset (the content of `descr_file_name`).
|
432 |
+
Only returned if `descr_file_name` is not None.
|
433 |
+
"""
|
434 |
+
data_path = resources.files(data_module) / data_file_name
|
435 |
+
with data_path.open("rb") as compressed_file:
|
436 |
+
compressed_file = gzip.open(compressed_file, mode="rt", encoding=encoding)
|
437 |
+
data = np.loadtxt(compressed_file, **kwargs)
|
438 |
+
|
439 |
+
if descr_file_name is None:
|
440 |
+
return data
|
441 |
+
else:
|
442 |
+
assert descr_module is not None
|
443 |
+
descr = load_descr(descr_module=descr_module, descr_file_name=descr_file_name)
|
444 |
+
return data, descr
|
445 |
+
|
446 |
+
|
447 |
+
def load_descr(descr_file_name, *, descr_module=DESCR_MODULE, encoding="utf-8"):
|
448 |
+
"""Load `descr_file_name` from `descr_module` with `importlib.resources`.
|
449 |
+
|
450 |
+
Parameters
|
451 |
+
----------
|
452 |
+
descr_file_name : str, default=None
|
453 |
+
Name of rst file to be loaded from `descr_module/descr_file_name`.
|
454 |
+
For example `'wine_data.rst'`. See also :func:`load_descr`.
|
455 |
+
If not None, also returns the corresponding description of
|
456 |
+
the dataset.
|
457 |
+
|
458 |
+
descr_module : str or module, default='sklearn.datasets.descr'
|
459 |
+
Module where `descr_file_name` lives. See also :func:`load_descr`.
|
460 |
+
The default is `'sklearn.datasets.descr'`.
|
461 |
+
|
462 |
+
encoding : str, default="utf-8"
|
463 |
+
Name of the encoding that `descr_file_name` will be decoded with.
|
464 |
+
The default is 'utf-8'.
|
465 |
+
|
466 |
+
.. versionadded:: 1.4
|
467 |
+
|
468 |
+
Returns
|
469 |
+
-------
|
470 |
+
fdescr : str
|
471 |
+
Content of `descr_file_name`.
|
472 |
+
"""
|
473 |
+
path = resources.files(descr_module) / descr_file_name
|
474 |
+
return path.read_text(encoding=encoding)
|
475 |
+
|
476 |
+
|
477 |
+
@validate_params(
|
478 |
+
{
|
479 |
+
"return_X_y": ["boolean"],
|
480 |
+
"as_frame": ["boolean"],
|
481 |
+
},
|
482 |
+
prefer_skip_nested_validation=True,
|
483 |
+
)
|
484 |
+
def load_wine(*, return_X_y=False, as_frame=False):
|
485 |
+
"""Load and return the wine dataset (classification).
|
486 |
+
|
487 |
+
.. versionadded:: 0.18
|
488 |
+
|
489 |
+
The wine dataset is a classic and very easy multi-class classification
|
490 |
+
dataset.
|
491 |
+
|
492 |
+
================= ==============
|
493 |
+
Classes 3
|
494 |
+
Samples per class [59,71,48]
|
495 |
+
Samples total 178
|
496 |
+
Dimensionality 13
|
497 |
+
Features real, positive
|
498 |
+
================= ==============
|
499 |
+
|
500 |
+
The copy of UCI ML Wine Data Set dataset is downloaded and modified to fit
|
501 |
+
standard format from:
|
502 |
+
https://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data
|
503 |
+
|
504 |
+
Read more in the :ref:`User Guide <wine_dataset>`.
|
505 |
+
|
506 |
+
Parameters
|
507 |
+
----------
|
508 |
+
return_X_y : bool, default=False
|
509 |
+
If True, returns ``(data, target)`` instead of a Bunch object.
|
510 |
+
See below for more information about the `data` and `target` object.
|
511 |
+
|
512 |
+
as_frame : bool, default=False
|
513 |
+
If True, the data is a pandas DataFrame including columns with
|
514 |
+
appropriate dtypes (numeric). The target is
|
515 |
+
a pandas DataFrame or Series depending on the number of target columns.
|
516 |
+
If `return_X_y` is True, then (`data`, `target`) will be pandas
|
517 |
+
DataFrames or Series as described below.
|
518 |
+
|
519 |
+
.. versionadded:: 0.23
|
520 |
+
|
521 |
+
Returns
|
522 |
+
-------
|
523 |
+
data : :class:`~sklearn.utils.Bunch`
|
524 |
+
Dictionary-like object, with the following attributes.
|
525 |
+
|
526 |
+
data : {ndarray, dataframe} of shape (178, 13)
|
527 |
+
The data matrix. If `as_frame=True`, `data` will be a pandas
|
528 |
+
DataFrame.
|
529 |
+
target: {ndarray, Series} of shape (178,)
|
530 |
+
The classification target. If `as_frame=True`, `target` will be
|
531 |
+
a pandas Series.
|
532 |
+
feature_names: list
|
533 |
+
The names of the dataset columns.
|
534 |
+
target_names: list
|
535 |
+
The names of target classes.
|
536 |
+
frame: DataFrame of shape (178, 14)
|
537 |
+
Only present when `as_frame=True`. DataFrame with `data` and
|
538 |
+
`target`.
|
539 |
+
|
540 |
+
.. versionadded:: 0.23
|
541 |
+
DESCR: str
|
542 |
+
The full description of the dataset.
|
543 |
+
|
544 |
+
(data, target) : tuple if ``return_X_y`` is True
|
545 |
+
A tuple of two ndarrays by default. The first contains a 2D array of shape
|
546 |
+
(178, 13) with each row representing one sample and each column representing
|
547 |
+
the features. The second array of shape (178,) contains the target samples.
|
548 |
+
|
549 |
+
Examples
|
550 |
+
--------
|
551 |
+
Let's say you are interested in the samples 10, 80, and 140, and want to
|
552 |
+
know their class name.
|
553 |
+
|
554 |
+
>>> from sklearn.datasets import load_wine
|
555 |
+
>>> data = load_wine()
|
556 |
+
>>> data.target[[10, 80, 140]]
|
557 |
+
array([0, 1, 2])
|
558 |
+
>>> list(data.target_names)
|
559 |
+
['class_0', 'class_1', 'class_2']
|
560 |
+
"""
|
561 |
+
|
562 |
+
data, target, target_names, fdescr = load_csv_data(
|
563 |
+
data_file_name="wine_data.csv", descr_file_name="wine_data.rst"
|
564 |
+
)
|
565 |
+
|
566 |
+
feature_names = [
|
567 |
+
"alcohol",
|
568 |
+
"malic_acid",
|
569 |
+
"ash",
|
570 |
+
"alcalinity_of_ash",
|
571 |
+
"magnesium",
|
572 |
+
"total_phenols",
|
573 |
+
"flavanoids",
|
574 |
+
"nonflavanoid_phenols",
|
575 |
+
"proanthocyanins",
|
576 |
+
"color_intensity",
|
577 |
+
"hue",
|
578 |
+
"od280/od315_of_diluted_wines",
|
579 |
+
"proline",
|
580 |
+
]
|
581 |
+
|
582 |
+
frame = None
|
583 |
+
target_columns = [
|
584 |
+
"target",
|
585 |
+
]
|
586 |
+
if as_frame:
|
587 |
+
frame, data, target = _convert_data_dataframe(
|
588 |
+
"load_wine", data, target, feature_names, target_columns
|
589 |
+
)
|
590 |
+
|
591 |
+
if return_X_y:
|
592 |
+
return data, target
|
593 |
+
|
594 |
+
return Bunch(
|
595 |
+
data=data,
|
596 |
+
target=target,
|
597 |
+
frame=frame,
|
598 |
+
target_names=target_names,
|
599 |
+
DESCR=fdescr,
|
600 |
+
feature_names=feature_names,
|
601 |
+
)
|
602 |
+
|
603 |
+
|
604 |
+
@validate_params(
|
605 |
+
{"return_X_y": ["boolean"], "as_frame": ["boolean"]},
|
606 |
+
prefer_skip_nested_validation=True,
|
607 |
+
)
|
608 |
+
def load_iris(*, return_X_y=False, as_frame=False):
|
609 |
+
"""Load and return the iris dataset (classification).
|
610 |
+
|
611 |
+
The iris dataset is a classic and very easy multi-class classification
|
612 |
+
dataset.
|
613 |
+
|
614 |
+
================= ==============
|
615 |
+
Classes 3
|
616 |
+
Samples per class 50
|
617 |
+
Samples total 150
|
618 |
+
Dimensionality 4
|
619 |
+
Features real, positive
|
620 |
+
================= ==============
|
621 |
+
|
622 |
+
Read more in the :ref:`User Guide <iris_dataset>`.
|
623 |
+
|
624 |
+
Parameters
|
625 |
+
----------
|
626 |
+
return_X_y : bool, default=False
|
627 |
+
If True, returns ``(data, target)`` instead of a Bunch object. See
|
628 |
+
below for more information about the `data` and `target` object.
|
629 |
+
|
630 |
+
.. versionadded:: 0.18
|
631 |
+
|
632 |
+
as_frame : bool, default=False
|
633 |
+
If True, the data is a pandas DataFrame including columns with
|
634 |
+
appropriate dtypes (numeric). The target is
|
635 |
+
a pandas DataFrame or Series depending on the number of target columns.
|
636 |
+
If `return_X_y` is True, then (`data`, `target`) will be pandas
|
637 |
+
DataFrames or Series as described below.
|
638 |
+
|
639 |
+
.. versionadded:: 0.23
|
640 |
+
|
641 |
+
Returns
|
642 |
+
-------
|
643 |
+
data : :class:`~sklearn.utils.Bunch`
|
644 |
+
Dictionary-like object, with the following attributes.
|
645 |
+
|
646 |
+
data : {ndarray, dataframe} of shape (150, 4)
|
647 |
+
The data matrix. If `as_frame=True`, `data` will be a pandas
|
648 |
+
DataFrame.
|
649 |
+
target: {ndarray, Series} of shape (150,)
|
650 |
+
The classification target. If `as_frame=True`, `target` will be
|
651 |
+
a pandas Series.
|
652 |
+
feature_names: list
|
653 |
+
The names of the dataset columns.
|
654 |
+
target_names: list
|
655 |
+
The names of target classes.
|
656 |
+
frame: DataFrame of shape (150, 5)
|
657 |
+
Only present when `as_frame=True`. DataFrame with `data` and
|
658 |
+
`target`.
|
659 |
+
|
660 |
+
.. versionadded:: 0.23
|
661 |
+
DESCR: str
|
662 |
+
The full description of the dataset.
|
663 |
+
filename: str
|
664 |
+
The path to the location of the data.
|
665 |
+
|
666 |
+
.. versionadded:: 0.20
|
667 |
+
|
668 |
+
(data, target) : tuple if ``return_X_y`` is True
|
669 |
+
A tuple of two ndarray. The first containing a 2D array of shape
|
670 |
+
(n_samples, n_features) with each row representing one sample and
|
671 |
+
each column representing the features. The second ndarray of shape
|
672 |
+
(n_samples,) containing the target samples.
|
673 |
+
|
674 |
+
.. versionadded:: 0.18
|
675 |
+
|
676 |
+
Notes
|
677 |
+
-----
|
678 |
+
.. versionchanged:: 0.20
|
679 |
+
Fixed two wrong data points according to Fisher's paper.
|
680 |
+
The new version is the same as in R, but not as in the UCI
|
681 |
+
Machine Learning Repository.
|
682 |
+
|
683 |
+
Examples
|
684 |
+
--------
|
685 |
+
Let's say you are interested in the samples 10, 25, and 50, and want to
|
686 |
+
know their class name.
|
687 |
+
|
688 |
+
>>> from sklearn.datasets import load_iris
|
689 |
+
>>> data = load_iris()
|
690 |
+
>>> data.target[[10, 25, 50]]
|
691 |
+
array([0, 0, 1])
|
692 |
+
>>> list(data.target_names)
|
693 |
+
['setosa', 'versicolor', 'virginica']
|
694 |
+
|
695 |
+
See :ref:`sphx_glr_auto_examples_datasets_plot_iris_dataset.py` for a more
|
696 |
+
detailed example of how to work with the iris dataset.
|
697 |
+
"""
|
698 |
+
data_file_name = "iris.csv"
|
699 |
+
data, target, target_names, fdescr = load_csv_data(
|
700 |
+
data_file_name=data_file_name, descr_file_name="iris.rst"
|
701 |
+
)
|
702 |
+
|
703 |
+
feature_names = [
|
704 |
+
"sepal length (cm)",
|
705 |
+
"sepal width (cm)",
|
706 |
+
"petal length (cm)",
|
707 |
+
"petal width (cm)",
|
708 |
+
]
|
709 |
+
|
710 |
+
frame = None
|
711 |
+
target_columns = [
|
712 |
+
"target",
|
713 |
+
]
|
714 |
+
if as_frame:
|
715 |
+
frame, data, target = _convert_data_dataframe(
|
716 |
+
"load_iris", data, target, feature_names, target_columns
|
717 |
+
)
|
718 |
+
|
719 |
+
if return_X_y:
|
720 |
+
return data, target
|
721 |
+
|
722 |
+
return Bunch(
|
723 |
+
data=data,
|
724 |
+
target=target,
|
725 |
+
frame=frame,
|
726 |
+
target_names=target_names,
|
727 |
+
DESCR=fdescr,
|
728 |
+
feature_names=feature_names,
|
729 |
+
filename=data_file_name,
|
730 |
+
data_module=DATA_MODULE,
|
731 |
+
)
|
732 |
+
|
733 |
+
|
734 |
+
@validate_params(
|
735 |
+
{"return_X_y": ["boolean"], "as_frame": ["boolean"]},
|
736 |
+
prefer_skip_nested_validation=True,
|
737 |
+
)
|
738 |
+
def load_breast_cancer(*, return_X_y=False, as_frame=False):
|
739 |
+
"""Load and return the breast cancer wisconsin dataset (classification).
|
740 |
+
|
741 |
+
The breast cancer dataset is a classic and very easy binary classification
|
742 |
+
dataset.
|
743 |
+
|
744 |
+
================= ==============
|
745 |
+
Classes 2
|
746 |
+
Samples per class 212(M),357(B)
|
747 |
+
Samples total 569
|
748 |
+
Dimensionality 30
|
749 |
+
Features real, positive
|
750 |
+
================= ==============
|
751 |
+
|
752 |
+
The copy of UCI ML Breast Cancer Wisconsin (Diagnostic) dataset is
|
753 |
+
downloaded from:
|
754 |
+
https://archive.ics.uci.edu/dataset/17/breast+cancer+wisconsin+diagnostic
|
755 |
+
|
756 |
+
Read more in the :ref:`User Guide <breast_cancer_dataset>`.
|
757 |
+
|
758 |
+
Parameters
|
759 |
+
----------
|
760 |
+
return_X_y : bool, default=False
|
761 |
+
If True, returns ``(data, target)`` instead of a Bunch object.
|
762 |
+
See below for more information about the `data` and `target` object.
|
763 |
+
|
764 |
+
.. versionadded:: 0.18
|
765 |
+
|
766 |
+
as_frame : bool, default=False
|
767 |
+
If True, the data is a pandas DataFrame including columns with
|
768 |
+
appropriate dtypes (numeric). The target is
|
769 |
+
a pandas DataFrame or Series depending on the number of target columns.
|
770 |
+
If `return_X_y` is True, then (`data`, `target`) will be pandas
|
771 |
+
DataFrames or Series as described below.
|
772 |
+
|
773 |
+
.. versionadded:: 0.23
|
774 |
+
|
775 |
+
Returns
|
776 |
+
-------
|
777 |
+
data : :class:`~sklearn.utils.Bunch`
|
778 |
+
Dictionary-like object, with the following attributes.
|
779 |
+
|
780 |
+
data : {ndarray, dataframe} of shape (569, 30)
|
781 |
+
The data matrix. If `as_frame=True`, `data` will be a pandas
|
782 |
+
DataFrame.
|
783 |
+
target : {ndarray, Series} of shape (569,)
|
784 |
+
The classification target. If `as_frame=True`, `target` will be
|
785 |
+
a pandas Series.
|
786 |
+
feature_names : ndarray of shape (30,)
|
787 |
+
The names of the dataset columns.
|
788 |
+
target_names : ndarray of shape (2,)
|
789 |
+
The names of target classes.
|
790 |
+
frame : DataFrame of shape (569, 31)
|
791 |
+
Only present when `as_frame=True`. DataFrame with `data` and
|
792 |
+
`target`.
|
793 |
+
|
794 |
+
.. versionadded:: 0.23
|
795 |
+
DESCR : str
|
796 |
+
The full description of the dataset.
|
797 |
+
filename : str
|
798 |
+
The path to the location of the data.
|
799 |
+
|
800 |
+
.. versionadded:: 0.20
|
801 |
+
|
802 |
+
(data, target) : tuple if ``return_X_y`` is True
|
803 |
+
A tuple of two ndarrays by default. The first contains a 2D ndarray of
|
804 |
+
shape (569, 30) with each row representing one sample and each column
|
805 |
+
representing the features. The second ndarray of shape (569,) contains
|
806 |
+
the target samples. If `as_frame=True`, both arrays are pandas objects,
|
807 |
+
i.e. `X` a dataframe and `y` a series.
|
808 |
+
|
809 |
+
.. versionadded:: 0.18
|
810 |
+
|
811 |
+
Examples
|
812 |
+
--------
|
813 |
+
Let's say you are interested in the samples 10, 50, and 85, and want to
|
814 |
+
know their class name.
|
815 |
+
|
816 |
+
>>> from sklearn.datasets import load_breast_cancer
|
817 |
+
>>> data = load_breast_cancer()
|
818 |
+
>>> data.target[[10, 50, 85]]
|
819 |
+
array([0, 1, 0])
|
820 |
+
>>> list(data.target_names)
|
821 |
+
['malignant', 'benign']
|
822 |
+
"""
|
823 |
+
data_file_name = "breast_cancer.csv"
|
824 |
+
data, target, target_names, fdescr = load_csv_data(
|
825 |
+
data_file_name=data_file_name, descr_file_name="breast_cancer.rst"
|
826 |
+
)
|
827 |
+
|
828 |
+
feature_names = np.array(
|
829 |
+
[
|
830 |
+
"mean radius",
|
831 |
+
"mean texture",
|
832 |
+
"mean perimeter",
|
833 |
+
"mean area",
|
834 |
+
"mean smoothness",
|
835 |
+
"mean compactness",
|
836 |
+
"mean concavity",
|
837 |
+
"mean concave points",
|
838 |
+
"mean symmetry",
|
839 |
+
"mean fractal dimension",
|
840 |
+
"radius error",
|
841 |
+
"texture error",
|
842 |
+
"perimeter error",
|
843 |
+
"area error",
|
844 |
+
"smoothness error",
|
845 |
+
"compactness error",
|
846 |
+
"concavity error",
|
847 |
+
"concave points error",
|
848 |
+
"symmetry error",
|
849 |
+
"fractal dimension error",
|
850 |
+
"worst radius",
|
851 |
+
"worst texture",
|
852 |
+
"worst perimeter",
|
853 |
+
"worst area",
|
854 |
+
"worst smoothness",
|
855 |
+
"worst compactness",
|
856 |
+
"worst concavity",
|
857 |
+
"worst concave points",
|
858 |
+
"worst symmetry",
|
859 |
+
"worst fractal dimension",
|
860 |
+
]
|
861 |
+
)
|
862 |
+
|
863 |
+
frame = None
|
864 |
+
target_columns = [
|
865 |
+
"target",
|
866 |
+
]
|
867 |
+
if as_frame:
|
868 |
+
frame, data, target = _convert_data_dataframe(
|
869 |
+
"load_breast_cancer", data, target, feature_names, target_columns
|
870 |
+
)
|
871 |
+
|
872 |
+
if return_X_y:
|
873 |
+
return data, target
|
874 |
+
|
875 |
+
return Bunch(
|
876 |
+
data=data,
|
877 |
+
target=target,
|
878 |
+
frame=frame,
|
879 |
+
target_names=target_names,
|
880 |
+
DESCR=fdescr,
|
881 |
+
feature_names=feature_names,
|
882 |
+
filename=data_file_name,
|
883 |
+
data_module=DATA_MODULE,
|
884 |
+
)
|
885 |
+
|
886 |
+
|
887 |
+
@validate_params(
|
888 |
+
{
|
889 |
+
"n_class": [Interval(Integral, 1, 10, closed="both")],
|
890 |
+
"return_X_y": ["boolean"],
|
891 |
+
"as_frame": ["boolean"],
|
892 |
+
},
|
893 |
+
prefer_skip_nested_validation=True,
|
894 |
+
)
|
895 |
+
def load_digits(*, n_class=10, return_X_y=False, as_frame=False):
|
896 |
+
"""Load and return the digits dataset (classification).
|
897 |
+
|
898 |
+
Each datapoint is a 8x8 image of a digit.
|
899 |
+
|
900 |
+
================= ==============
|
901 |
+
Classes 10
|
902 |
+
Samples per class ~180
|
903 |
+
Samples total 1797
|
904 |
+
Dimensionality 64
|
905 |
+
Features integers 0-16
|
906 |
+
================= ==============
|
907 |
+
|
908 |
+
This is a copy of the test set of the UCI ML hand-written digits datasets
|
909 |
+
https://archive.ics.uci.edu/ml/datasets/Optical+Recognition+of+Handwritten+Digits
|
910 |
+
|
911 |
+
Read more in the :ref:`User Guide <digits_dataset>`.
|
912 |
+
|
913 |
+
Parameters
|
914 |
+
----------
|
915 |
+
n_class : int, default=10
|
916 |
+
The number of classes to return. Between 0 and 10.
|
917 |
+
|
918 |
+
return_X_y : bool, default=False
|
919 |
+
If True, returns ``(data, target)`` instead of a Bunch object.
|
920 |
+
See below for more information about the `data` and `target` object.
|
921 |
+
|
922 |
+
.. versionadded:: 0.18
|
923 |
+
|
924 |
+
as_frame : bool, default=False
|
925 |
+
If True, the data is a pandas DataFrame including columns with
|
926 |
+
appropriate dtypes (numeric). The target is
|
927 |
+
a pandas DataFrame or Series depending on the number of target columns.
|
928 |
+
If `return_X_y` is True, then (`data`, `target`) will be pandas
|
929 |
+
DataFrames or Series as described below.
|
930 |
+
|
931 |
+
.. versionadded:: 0.23
|
932 |
+
|
933 |
+
Returns
|
934 |
+
-------
|
935 |
+
data : :class:`~sklearn.utils.Bunch`
|
936 |
+
Dictionary-like object, with the following attributes.
|
937 |
+
|
938 |
+
data : {ndarray, dataframe} of shape (1797, 64)
|
939 |
+
The flattened data matrix. If `as_frame=True`, `data` will be
|
940 |
+
a pandas DataFrame.
|
941 |
+
target: {ndarray, Series} of shape (1797,)
|
942 |
+
The classification target. If `as_frame=True`, `target` will be
|
943 |
+
a pandas Series.
|
944 |
+
feature_names: list
|
945 |
+
The names of the dataset columns.
|
946 |
+
target_names: list
|
947 |
+
The names of target classes.
|
948 |
+
|
949 |
+
.. versionadded:: 0.20
|
950 |
+
|
951 |
+
frame: DataFrame of shape (1797, 65)
|
952 |
+
Only present when `as_frame=True`. DataFrame with `data` and
|
953 |
+
`target`.
|
954 |
+
|
955 |
+
.. versionadded:: 0.23
|
956 |
+
images: {ndarray} of shape (1797, 8, 8)
|
957 |
+
The raw image data.
|
958 |
+
DESCR: str
|
959 |
+
The full description of the dataset.
|
960 |
+
|
961 |
+
(data, target) : tuple if ``return_X_y`` is True
|
962 |
+
A tuple of two ndarrays by default. The first contains a 2D ndarray of
|
963 |
+
shape (1797, 64) with each row representing one sample and each column
|
964 |
+
representing the features. The second ndarray of shape (1797) contains
|
965 |
+
the target samples. If `as_frame=True`, both arrays are pandas objects,
|
966 |
+
i.e. `X` a dataframe and `y` a series.
|
967 |
+
|
968 |
+
.. versionadded:: 0.18
|
969 |
+
|
970 |
+
Examples
|
971 |
+
--------
|
972 |
+
To load the data and visualize the images::
|
973 |
+
|
974 |
+
>>> from sklearn.datasets import load_digits
|
975 |
+
>>> digits = load_digits()
|
976 |
+
>>> print(digits.data.shape)
|
977 |
+
(1797, 64)
|
978 |
+
>>> import matplotlib.pyplot as plt
|
979 |
+
>>> plt.gray()
|
980 |
+
>>> plt.matshow(digits.images[0])
|
981 |
+
<...>
|
982 |
+
>>> plt.show()
|
983 |
+
"""
|
984 |
+
|
985 |
+
data, fdescr = load_gzip_compressed_csv_data(
|
986 |
+
data_file_name="digits.csv.gz", descr_file_name="digits.rst", delimiter=","
|
987 |
+
)
|
988 |
+
|
989 |
+
target = data[:, -1].astype(int, copy=False)
|
990 |
+
flat_data = data[:, :-1]
|
991 |
+
images = flat_data.view()
|
992 |
+
images.shape = (-1, 8, 8)
|
993 |
+
|
994 |
+
if n_class < 10:
|
995 |
+
idx = target < n_class
|
996 |
+
flat_data, target = flat_data[idx], target[idx]
|
997 |
+
images = images[idx]
|
998 |
+
|
999 |
+
feature_names = [
|
1000 |
+
"pixel_{}_{}".format(row_idx, col_idx)
|
1001 |
+
for row_idx in range(8)
|
1002 |
+
for col_idx in range(8)
|
1003 |
+
]
|
1004 |
+
|
1005 |
+
frame = None
|
1006 |
+
target_columns = [
|
1007 |
+
"target",
|
1008 |
+
]
|
1009 |
+
if as_frame:
|
1010 |
+
frame, flat_data, target = _convert_data_dataframe(
|
1011 |
+
"load_digits", flat_data, target, feature_names, target_columns
|
1012 |
+
)
|
1013 |
+
|
1014 |
+
if return_X_y:
|
1015 |
+
return flat_data, target
|
1016 |
+
|
1017 |
+
return Bunch(
|
1018 |
+
data=flat_data,
|
1019 |
+
target=target,
|
1020 |
+
frame=frame,
|
1021 |
+
feature_names=feature_names,
|
1022 |
+
target_names=np.arange(10),
|
1023 |
+
images=images,
|
1024 |
+
DESCR=fdescr,
|
1025 |
+
)
|
1026 |
+
|
1027 |
+
|
1028 |
+
@validate_params(
|
1029 |
+
{"return_X_y": ["boolean"], "as_frame": ["boolean"], "scaled": ["boolean"]},
|
1030 |
+
prefer_skip_nested_validation=True,
|
1031 |
+
)
|
1032 |
+
def load_diabetes(*, return_X_y=False, as_frame=False, scaled=True):
|
1033 |
+
"""Load and return the diabetes dataset (regression).
|
1034 |
+
|
1035 |
+
============== ==================
|
1036 |
+
Samples total 442
|
1037 |
+
Dimensionality 10
|
1038 |
+
Features real, -.2 < x < .2
|
1039 |
+
Targets integer 25 - 346
|
1040 |
+
============== ==================
|
1041 |
+
|
1042 |
+
.. note::
|
1043 |
+
The meaning of each feature (i.e. `feature_names`) might be unclear
|
1044 |
+
(especially for `ltg`) as the documentation of the original dataset is
|
1045 |
+
not explicit. We provide information that seems correct in regard with
|
1046 |
+
the scientific literature in this field of research.
|
1047 |
+
|
1048 |
+
Read more in the :ref:`User Guide <diabetes_dataset>`.
|
1049 |
+
|
1050 |
+
Parameters
|
1051 |
+
----------
|
1052 |
+
return_X_y : bool, default=False
|
1053 |
+
If True, returns ``(data, target)`` instead of a Bunch object.
|
1054 |
+
See below for more information about the `data` and `target` object.
|
1055 |
+
|
1056 |
+
.. versionadded:: 0.18
|
1057 |
+
|
1058 |
+
as_frame : bool, default=False
|
1059 |
+
If True, the data is a pandas DataFrame including columns with
|
1060 |
+
appropriate dtypes (numeric). The target is
|
1061 |
+
a pandas DataFrame or Series depending on the number of target columns.
|
1062 |
+
If `return_X_y` is True, then (`data`, `target`) will be pandas
|
1063 |
+
DataFrames or Series as described below.
|
1064 |
+
|
1065 |
+
.. versionadded:: 0.23
|
1066 |
+
|
1067 |
+
scaled : bool, default=True
|
1068 |
+
If True, the feature variables are mean centered and scaled by the
|
1069 |
+
standard deviation times the square root of `n_samples`.
|
1070 |
+
If False, raw data is returned for the feature variables.
|
1071 |
+
|
1072 |
+
.. versionadded:: 1.1
|
1073 |
+
|
1074 |
+
Returns
|
1075 |
+
-------
|
1076 |
+
data : :class:`~sklearn.utils.Bunch`
|
1077 |
+
Dictionary-like object, with the following attributes.
|
1078 |
+
|
1079 |
+
data : {ndarray, dataframe} of shape (442, 10)
|
1080 |
+
The data matrix. If `as_frame=True`, `data` will be a pandas
|
1081 |
+
DataFrame.
|
1082 |
+
target: {ndarray, Series} of shape (442,)
|
1083 |
+
The regression target. If `as_frame=True`, `target` will be
|
1084 |
+
a pandas Series.
|
1085 |
+
feature_names: list
|
1086 |
+
The names of the dataset columns.
|
1087 |
+
frame: DataFrame of shape (442, 11)
|
1088 |
+
Only present when `as_frame=True`. DataFrame with `data` and
|
1089 |
+
`target`.
|
1090 |
+
|
1091 |
+
.. versionadded:: 0.23
|
1092 |
+
DESCR: str
|
1093 |
+
The full description of the dataset.
|
1094 |
+
data_filename: str
|
1095 |
+
The path to the location of the data.
|
1096 |
+
target_filename: str
|
1097 |
+
The path to the location of the target.
|
1098 |
+
|
1099 |
+
(data, target) : tuple if ``return_X_y`` is True
|
1100 |
+
Returns a tuple of two ndarray of shape (n_samples, n_features)
|
1101 |
+
A 2D array with each row representing one sample and each column
|
1102 |
+
representing the features and/or target of a given sample.
|
1103 |
+
|
1104 |
+
.. versionadded:: 0.18
|
1105 |
+
|
1106 |
+
Examples
|
1107 |
+
--------
|
1108 |
+
>>> from sklearn.datasets import load_diabetes
|
1109 |
+
>>> diabetes = load_diabetes()
|
1110 |
+
>>> diabetes.target[:3]
|
1111 |
+
array([151., 75., 141.])
|
1112 |
+
>>> diabetes.data.shape
|
1113 |
+
(442, 10)
|
1114 |
+
"""
|
1115 |
+
data_filename = "diabetes_data_raw.csv.gz"
|
1116 |
+
target_filename = "diabetes_target.csv.gz"
|
1117 |
+
data = load_gzip_compressed_csv_data(data_filename)
|
1118 |
+
target = load_gzip_compressed_csv_data(target_filename)
|
1119 |
+
|
1120 |
+
if scaled:
|
1121 |
+
data = scale(data, copy=False)
|
1122 |
+
data /= data.shape[0] ** 0.5
|
1123 |
+
|
1124 |
+
fdescr = load_descr("diabetes.rst")
|
1125 |
+
|
1126 |
+
feature_names = ["age", "sex", "bmi", "bp", "s1", "s2", "s3", "s4", "s5", "s6"]
|
1127 |
+
|
1128 |
+
frame = None
|
1129 |
+
target_columns = [
|
1130 |
+
"target",
|
1131 |
+
]
|
1132 |
+
if as_frame:
|
1133 |
+
frame, data, target = _convert_data_dataframe(
|
1134 |
+
"load_diabetes", data, target, feature_names, target_columns
|
1135 |
+
)
|
1136 |
+
|
1137 |
+
if return_X_y:
|
1138 |
+
return data, target
|
1139 |
+
|
1140 |
+
return Bunch(
|
1141 |
+
data=data,
|
1142 |
+
target=target,
|
1143 |
+
frame=frame,
|
1144 |
+
DESCR=fdescr,
|
1145 |
+
feature_names=feature_names,
|
1146 |
+
data_filename=data_filename,
|
1147 |
+
target_filename=target_filename,
|
1148 |
+
data_module=DATA_MODULE,
|
1149 |
+
)
|
1150 |
+
|
1151 |
+
|
1152 |
+
@validate_params(
|
1153 |
+
{
|
1154 |
+
"return_X_y": ["boolean"],
|
1155 |
+
"as_frame": ["boolean"],
|
1156 |
+
},
|
1157 |
+
prefer_skip_nested_validation=True,
|
1158 |
+
)
|
1159 |
+
def load_linnerud(*, return_X_y=False, as_frame=False):
|
1160 |
+
"""Load and return the physical exercise Linnerud dataset.
|
1161 |
+
|
1162 |
+
This dataset is suitable for multi-output regression tasks.
|
1163 |
+
|
1164 |
+
============== ============================
|
1165 |
+
Samples total 20
|
1166 |
+
Dimensionality 3 (for both data and target)
|
1167 |
+
Features integer
|
1168 |
+
Targets integer
|
1169 |
+
============== ============================
|
1170 |
+
|
1171 |
+
Read more in the :ref:`User Guide <linnerrud_dataset>`.
|
1172 |
+
|
1173 |
+
Parameters
|
1174 |
+
----------
|
1175 |
+
return_X_y : bool, default=False
|
1176 |
+
If True, returns ``(data, target)`` instead of a Bunch object.
|
1177 |
+
See below for more information about the `data` and `target` object.
|
1178 |
+
|
1179 |
+
.. versionadded:: 0.18
|
1180 |
+
|
1181 |
+
as_frame : bool, default=False
|
1182 |
+
If True, the data is a pandas DataFrame including columns with
|
1183 |
+
appropriate dtypes (numeric, string or categorical). The target is
|
1184 |
+
a pandas DataFrame or Series depending on the number of target columns.
|
1185 |
+
If `return_X_y` is True, then (`data`, `target`) will be pandas
|
1186 |
+
DataFrames or Series as described below.
|
1187 |
+
|
1188 |
+
.. versionadded:: 0.23
|
1189 |
+
|
1190 |
+
Returns
|
1191 |
+
-------
|
1192 |
+
data : :class:`~sklearn.utils.Bunch`
|
1193 |
+
Dictionary-like object, with the following attributes.
|
1194 |
+
|
1195 |
+
data : {ndarray, dataframe} of shape (20, 3)
|
1196 |
+
The data matrix. If `as_frame=True`, `data` will be a pandas
|
1197 |
+
DataFrame.
|
1198 |
+
target: {ndarray, dataframe} of shape (20, 3)
|
1199 |
+
The regression targets. If `as_frame=True`, `target` will be
|
1200 |
+
a pandas DataFrame.
|
1201 |
+
feature_names: list
|
1202 |
+
The names of the dataset columns.
|
1203 |
+
target_names: list
|
1204 |
+
The names of the target columns.
|
1205 |
+
frame: DataFrame of shape (20, 6)
|
1206 |
+
Only present when `as_frame=True`. DataFrame with `data` and
|
1207 |
+
`target`.
|
1208 |
+
|
1209 |
+
.. versionadded:: 0.23
|
1210 |
+
DESCR: str
|
1211 |
+
The full description of the dataset.
|
1212 |
+
data_filename: str
|
1213 |
+
The path to the location of the data.
|
1214 |
+
target_filename: str
|
1215 |
+
The path to the location of the target.
|
1216 |
+
|
1217 |
+
.. versionadded:: 0.20
|
1218 |
+
|
1219 |
+
(data, target) : tuple if ``return_X_y`` is True
|
1220 |
+
Returns a tuple of two ndarrays or dataframe of shape
|
1221 |
+
`(20, 3)`. Each row represents one sample and each column represents the
|
1222 |
+
features in `X` and a target in `y` of a given sample.
|
1223 |
+
|
1224 |
+
.. versionadded:: 0.18
|
1225 |
+
"""
|
1226 |
+
data_filename = "linnerud_exercise.csv"
|
1227 |
+
target_filename = "linnerud_physiological.csv"
|
1228 |
+
|
1229 |
+
data_module_path = resources.files(DATA_MODULE)
|
1230 |
+
# Read header and data
|
1231 |
+
data_path = data_module_path / data_filename
|
1232 |
+
with data_path.open("r", encoding="utf-8") as f:
|
1233 |
+
header_exercise = f.readline().split()
|
1234 |
+
f.seek(0) # reset file obj
|
1235 |
+
data_exercise = np.loadtxt(f, skiprows=1)
|
1236 |
+
|
1237 |
+
target_path = data_module_path / target_filename
|
1238 |
+
with target_path.open("r", encoding="utf-8") as f:
|
1239 |
+
header_physiological = f.readline().split()
|
1240 |
+
f.seek(0) # reset file obj
|
1241 |
+
data_physiological = np.loadtxt(f, skiprows=1)
|
1242 |
+
|
1243 |
+
fdescr = load_descr("linnerud.rst")
|
1244 |
+
|
1245 |
+
frame = None
|
1246 |
+
if as_frame:
|
1247 |
+
(frame, data_exercise, data_physiological) = _convert_data_dataframe(
|
1248 |
+
"load_linnerud",
|
1249 |
+
data_exercise,
|
1250 |
+
data_physiological,
|
1251 |
+
header_exercise,
|
1252 |
+
header_physiological,
|
1253 |
+
)
|
1254 |
+
if return_X_y:
|
1255 |
+
return data_exercise, data_physiological
|
1256 |
+
|
1257 |
+
return Bunch(
|
1258 |
+
data=data_exercise,
|
1259 |
+
feature_names=header_exercise,
|
1260 |
+
target=data_physiological,
|
1261 |
+
target_names=header_physiological,
|
1262 |
+
frame=frame,
|
1263 |
+
DESCR=fdescr,
|
1264 |
+
data_filename=data_filename,
|
1265 |
+
target_filename=target_filename,
|
1266 |
+
data_module=DATA_MODULE,
|
1267 |
+
)
|
1268 |
+
|
1269 |
+
|
1270 |
+
def load_sample_images():
|
1271 |
+
"""Load sample images for image manipulation.
|
1272 |
+
|
1273 |
+
Loads both, ``china`` and ``flower``.
|
1274 |
+
|
1275 |
+
Read more in the :ref:`User Guide <sample_images>`.
|
1276 |
+
|
1277 |
+
Returns
|
1278 |
+
-------
|
1279 |
+
data : :class:`~sklearn.utils.Bunch`
|
1280 |
+
Dictionary-like object, with the following attributes.
|
1281 |
+
|
1282 |
+
images : list of ndarray of shape (427, 640, 3)
|
1283 |
+
The two sample image.
|
1284 |
+
filenames : list
|
1285 |
+
The filenames for the images.
|
1286 |
+
DESCR : str
|
1287 |
+
The full description of the dataset.
|
1288 |
+
|
1289 |
+
Examples
|
1290 |
+
--------
|
1291 |
+
To load the data and visualize the images:
|
1292 |
+
|
1293 |
+
>>> from sklearn.datasets import load_sample_images
|
1294 |
+
>>> dataset = load_sample_images() #doctest: +SKIP
|
1295 |
+
>>> len(dataset.images) #doctest: +SKIP
|
1296 |
+
2
|
1297 |
+
>>> first_img_data = dataset.images[0] #doctest: +SKIP
|
1298 |
+
>>> first_img_data.shape #doctest: +SKIP
|
1299 |
+
(427, 640, 3)
|
1300 |
+
>>> first_img_data.dtype #doctest: +SKIP
|
1301 |
+
dtype('uint8')
|
1302 |
+
"""
|
1303 |
+
try:
|
1304 |
+
from PIL import Image
|
1305 |
+
except ImportError:
|
1306 |
+
raise ImportError(
|
1307 |
+
"The Python Imaging Library (PIL) is required to load data "
|
1308 |
+
"from jpeg files. Please refer to "
|
1309 |
+
"https://pillow.readthedocs.io/en/stable/installation.html "
|
1310 |
+
"for installing PIL."
|
1311 |
+
)
|
1312 |
+
|
1313 |
+
descr = load_descr("README.txt", descr_module=IMAGES_MODULE)
|
1314 |
+
|
1315 |
+
filenames, images = [], []
|
1316 |
+
|
1317 |
+
jpg_paths = sorted(
|
1318 |
+
resource
|
1319 |
+
for resource in resources.files(IMAGES_MODULE).iterdir()
|
1320 |
+
if resource.is_file() and resource.match("*.jpg")
|
1321 |
+
)
|
1322 |
+
|
1323 |
+
for path in jpg_paths:
|
1324 |
+
filenames.append(str(path))
|
1325 |
+
with path.open("rb") as image_file:
|
1326 |
+
pil_image = Image.open(image_file)
|
1327 |
+
image = np.asarray(pil_image)
|
1328 |
+
images.append(image)
|
1329 |
+
|
1330 |
+
return Bunch(images=images, filenames=filenames, DESCR=descr)
|
1331 |
+
|
1332 |
+
|
1333 |
+
@validate_params(
|
1334 |
+
{
|
1335 |
+
"image_name": [StrOptions({"china.jpg", "flower.jpg"})],
|
1336 |
+
},
|
1337 |
+
prefer_skip_nested_validation=True,
|
1338 |
+
)
|
1339 |
+
def load_sample_image(image_name):
|
1340 |
+
"""Load the numpy array of a single sample image.
|
1341 |
+
|
1342 |
+
Read more in the :ref:`User Guide <sample_images>`.
|
1343 |
+
|
1344 |
+
Parameters
|
1345 |
+
----------
|
1346 |
+
image_name : {`china.jpg`, `flower.jpg`}
|
1347 |
+
The name of the sample image loaded.
|
1348 |
+
|
1349 |
+
Returns
|
1350 |
+
-------
|
1351 |
+
img : 3D array
|
1352 |
+
The image as a numpy array: height x width x color.
|
1353 |
+
|
1354 |
+
Examples
|
1355 |
+
--------
|
1356 |
+
|
1357 |
+
>>> from sklearn.datasets import load_sample_image
|
1358 |
+
>>> china = load_sample_image('china.jpg') # doctest: +SKIP
|
1359 |
+
>>> china.dtype # doctest: +SKIP
|
1360 |
+
dtype('uint8')
|
1361 |
+
>>> china.shape # doctest: +SKIP
|
1362 |
+
(427, 640, 3)
|
1363 |
+
>>> flower = load_sample_image('flower.jpg') # doctest: +SKIP
|
1364 |
+
>>> flower.dtype # doctest: +SKIP
|
1365 |
+
dtype('uint8')
|
1366 |
+
>>> flower.shape # doctest: +SKIP
|
1367 |
+
(427, 640, 3)
|
1368 |
+
"""
|
1369 |
+
images = load_sample_images()
|
1370 |
+
index = None
|
1371 |
+
for i, filename in enumerate(images.filenames):
|
1372 |
+
if filename.endswith(image_name):
|
1373 |
+
index = i
|
1374 |
+
break
|
1375 |
+
if index is None:
|
1376 |
+
raise AttributeError("Cannot find sample image: %s" % image_name)
|
1377 |
+
return images.images[index]
|
1378 |
+
|
1379 |
+
|
1380 |
+
def _pkl_filepath(*args, **kwargs):
|
1381 |
+
"""Return filename for Python 3 pickles
|
1382 |
+
|
1383 |
+
args[-1] is expected to be the ".pkl" filename. For compatibility with
|
1384 |
+
older scikit-learn versions, a suffix is inserted before the extension.
|
1385 |
+
|
1386 |
+
_pkl_filepath('/path/to/folder', 'filename.pkl') returns
|
1387 |
+
'/path/to/folder/filename_py3.pkl'
|
1388 |
+
|
1389 |
+
"""
|
1390 |
+
py3_suffix = kwargs.get("py3_suffix", "_py3")
|
1391 |
+
basename, ext = splitext(args[-1])
|
1392 |
+
basename += py3_suffix
|
1393 |
+
new_args = args[:-1] + (basename + ext,)
|
1394 |
+
return join(*new_args)
|
1395 |
+
|
1396 |
+
|
1397 |
+
def _sha256(path):
|
1398 |
+
"""Calculate the sha256 hash of the file at path."""
|
1399 |
+
sha256hash = hashlib.sha256()
|
1400 |
+
chunk_size = 8192
|
1401 |
+
with open(path, "rb") as f:
|
1402 |
+
while True:
|
1403 |
+
buffer = f.read(chunk_size)
|
1404 |
+
if not buffer:
|
1405 |
+
break
|
1406 |
+
sha256hash.update(buffer)
|
1407 |
+
return sha256hash.hexdigest()
|
1408 |
+
|
1409 |
+
|
1410 |
+
def _fetch_remote(remote, dirname=None):
|
1411 |
+
"""Helper function to download a remote dataset into path
|
1412 |
+
|
1413 |
+
Fetch a dataset pointed by remote's url, save into path using remote's
|
1414 |
+
filename and ensure its integrity based on the SHA256 Checksum of the
|
1415 |
+
downloaded file.
|
1416 |
+
|
1417 |
+
Parameters
|
1418 |
+
----------
|
1419 |
+
remote : RemoteFileMetadata
|
1420 |
+
Named tuple containing remote dataset meta information: url, filename
|
1421 |
+
and checksum
|
1422 |
+
|
1423 |
+
dirname : str
|
1424 |
+
Directory to save the file to.
|
1425 |
+
|
1426 |
+
Returns
|
1427 |
+
-------
|
1428 |
+
file_path: str
|
1429 |
+
Full path of the created file.
|
1430 |
+
"""
|
1431 |
+
|
1432 |
+
file_path = remote.filename if dirname is None else join(dirname, remote.filename)
|
1433 |
+
urlretrieve(remote.url, file_path)
|
1434 |
+
checksum = _sha256(file_path)
|
1435 |
+
if remote.checksum != checksum:
|
1436 |
+
raise OSError(
|
1437 |
+
"{} has an SHA256 checksum ({}) "
|
1438 |
+
"differing from expected ({}), "
|
1439 |
+
"file may be corrupted.".format(file_path, checksum, remote.checksum)
|
1440 |
+
)
|
1441 |
+
return file_path
|
env-llmeval/lib/python3.10/site-packages/sklearn/datasets/_california_housing.py
ADDED
@@ -0,0 +1,223 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""California housing dataset.
|
2 |
+
|
3 |
+
The original database is available from StatLib
|
4 |
+
|
5 |
+
http://lib.stat.cmu.edu/datasets/
|
6 |
+
|
7 |
+
The data contains 20,640 observations on 9 variables.
|
8 |
+
|
9 |
+
This dataset contains the average house value as target variable
|
10 |
+
and the following input variables (features): average income,
|
11 |
+
housing average age, average rooms, average bedrooms, population,
|
12 |
+
average occupation, latitude, and longitude in that order.
|
13 |
+
|
14 |
+
References
|
15 |
+
----------
|
16 |
+
|
17 |
+
Pace, R. Kelley and Ronald Barry, Sparse Spatial Autoregressions,
|
18 |
+
Statistics and Probability Letters, 33 (1997) 291-297.
|
19 |
+
|
20 |
+
"""
|
21 |
+
# Authors: Peter Prettenhofer
|
22 |
+
# License: BSD 3 clause
|
23 |
+
|
24 |
+
import logging
|
25 |
+
import tarfile
|
26 |
+
from os import PathLike, makedirs, remove
|
27 |
+
from os.path import exists
|
28 |
+
|
29 |
+
import joblib
|
30 |
+
import numpy as np
|
31 |
+
|
32 |
+
from ..utils import Bunch
|
33 |
+
from ..utils._param_validation import validate_params
|
34 |
+
from . import get_data_home
|
35 |
+
from ._base import (
|
36 |
+
RemoteFileMetadata,
|
37 |
+
_convert_data_dataframe,
|
38 |
+
_fetch_remote,
|
39 |
+
_pkl_filepath,
|
40 |
+
load_descr,
|
41 |
+
)
|
42 |
+
|
43 |
+
# The original data can be found at:
|
44 |
+
# https://www.dcc.fc.up.pt/~ltorgo/Regression/cal_housing.tgz
|
45 |
+
ARCHIVE = RemoteFileMetadata(
|
46 |
+
filename="cal_housing.tgz",
|
47 |
+
url="https://ndownloader.figshare.com/files/5976036",
|
48 |
+
checksum="aaa5c9a6afe2225cc2aed2723682ae403280c4a3695a2ddda4ffb5d8215ea681",
|
49 |
+
)
|
50 |
+
|
51 |
+
logger = logging.getLogger(__name__)
|
52 |
+
|
53 |
+
|
54 |
+
@validate_params(
|
55 |
+
{
|
56 |
+
"data_home": [str, PathLike, None],
|
57 |
+
"download_if_missing": ["boolean"],
|
58 |
+
"return_X_y": ["boolean"],
|
59 |
+
"as_frame": ["boolean"],
|
60 |
+
},
|
61 |
+
prefer_skip_nested_validation=True,
|
62 |
+
)
|
63 |
+
def fetch_california_housing(
|
64 |
+
*, data_home=None, download_if_missing=True, return_X_y=False, as_frame=False
|
65 |
+
):
|
66 |
+
"""Load the California housing dataset (regression).
|
67 |
+
|
68 |
+
============== ==============
|
69 |
+
Samples total 20640
|
70 |
+
Dimensionality 8
|
71 |
+
Features real
|
72 |
+
Target real 0.15 - 5.
|
73 |
+
============== ==============
|
74 |
+
|
75 |
+
Read more in the :ref:`User Guide <california_housing_dataset>`.
|
76 |
+
|
77 |
+
Parameters
|
78 |
+
----------
|
79 |
+
data_home : str or path-like, default=None
|
80 |
+
Specify another download and cache folder for the datasets. By default
|
81 |
+
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
|
82 |
+
|
83 |
+
download_if_missing : bool, default=True
|
84 |
+
If False, raise an OSError if the data is not locally available
|
85 |
+
instead of trying to download the data from the source site.
|
86 |
+
|
87 |
+
return_X_y : bool, default=False
|
88 |
+
If True, returns ``(data.data, data.target)`` instead of a Bunch
|
89 |
+
object.
|
90 |
+
|
91 |
+
.. versionadded:: 0.20
|
92 |
+
|
93 |
+
as_frame : bool, default=False
|
94 |
+
If True, the data is a pandas DataFrame including columns with
|
95 |
+
appropriate dtypes (numeric, string or categorical). The target is
|
96 |
+
a pandas DataFrame or Series depending on the number of target_columns.
|
97 |
+
|
98 |
+
.. versionadded:: 0.23
|
99 |
+
|
100 |
+
Returns
|
101 |
+
-------
|
102 |
+
dataset : :class:`~sklearn.utils.Bunch`
|
103 |
+
Dictionary-like object, with the following attributes.
|
104 |
+
|
105 |
+
data : ndarray, shape (20640, 8)
|
106 |
+
Each row corresponding to the 8 feature values in order.
|
107 |
+
If ``as_frame`` is True, ``data`` is a pandas object.
|
108 |
+
target : numpy array of shape (20640,)
|
109 |
+
Each value corresponds to the average
|
110 |
+
house value in units of 100,000.
|
111 |
+
If ``as_frame`` is True, ``target`` is a pandas object.
|
112 |
+
feature_names : list of length 8
|
113 |
+
Array of ordered feature names used in the dataset.
|
114 |
+
DESCR : str
|
115 |
+
Description of the California housing dataset.
|
116 |
+
frame : pandas DataFrame
|
117 |
+
Only present when `as_frame=True`. DataFrame with ``data`` and
|
118 |
+
``target``.
|
119 |
+
|
120 |
+
.. versionadded:: 0.23
|
121 |
+
|
122 |
+
(data, target) : tuple if ``return_X_y`` is True
|
123 |
+
A tuple of two ndarray. The first containing a 2D array of
|
124 |
+
shape (n_samples, n_features) with each row representing one
|
125 |
+
sample and each column representing the features. The second
|
126 |
+
ndarray of shape (n_samples,) containing the target samples.
|
127 |
+
|
128 |
+
.. versionadded:: 0.20
|
129 |
+
|
130 |
+
Notes
|
131 |
+
-----
|
132 |
+
|
133 |
+
This dataset consists of 20,640 samples and 9 features.
|
134 |
+
|
135 |
+
Examples
|
136 |
+
--------
|
137 |
+
>>> from sklearn.datasets import fetch_california_housing
|
138 |
+
>>> housing = fetch_california_housing()
|
139 |
+
>>> print(housing.data.shape, housing.target.shape)
|
140 |
+
(20640, 8) (20640,)
|
141 |
+
>>> print(housing.feature_names[0:6])
|
142 |
+
['MedInc', 'HouseAge', 'AveRooms', 'AveBedrms', 'Population', 'AveOccup']
|
143 |
+
"""
|
144 |
+
data_home = get_data_home(data_home=data_home)
|
145 |
+
if not exists(data_home):
|
146 |
+
makedirs(data_home)
|
147 |
+
|
148 |
+
filepath = _pkl_filepath(data_home, "cal_housing.pkz")
|
149 |
+
if not exists(filepath):
|
150 |
+
if not download_if_missing:
|
151 |
+
raise OSError("Data not found and `download_if_missing` is False")
|
152 |
+
|
153 |
+
logger.info(
|
154 |
+
"Downloading Cal. housing from {} to {}".format(ARCHIVE.url, data_home)
|
155 |
+
)
|
156 |
+
|
157 |
+
archive_path = _fetch_remote(ARCHIVE, dirname=data_home)
|
158 |
+
|
159 |
+
with tarfile.open(mode="r:gz", name=archive_path) as f:
|
160 |
+
cal_housing = np.loadtxt(
|
161 |
+
f.extractfile("CaliforniaHousing/cal_housing.data"), delimiter=","
|
162 |
+
)
|
163 |
+
# Columns are not in the same order compared to the previous
|
164 |
+
# URL resource on lib.stat.cmu.edu
|
165 |
+
columns_index = [8, 7, 2, 3, 4, 5, 6, 1, 0]
|
166 |
+
cal_housing = cal_housing[:, columns_index]
|
167 |
+
|
168 |
+
joblib.dump(cal_housing, filepath, compress=6)
|
169 |
+
remove(archive_path)
|
170 |
+
|
171 |
+
else:
|
172 |
+
cal_housing = joblib.load(filepath)
|
173 |
+
|
174 |
+
feature_names = [
|
175 |
+
"MedInc",
|
176 |
+
"HouseAge",
|
177 |
+
"AveRooms",
|
178 |
+
"AveBedrms",
|
179 |
+
"Population",
|
180 |
+
"AveOccup",
|
181 |
+
"Latitude",
|
182 |
+
"Longitude",
|
183 |
+
]
|
184 |
+
|
185 |
+
target, data = cal_housing[:, 0], cal_housing[:, 1:]
|
186 |
+
|
187 |
+
# avg rooms = total rooms / households
|
188 |
+
data[:, 2] /= data[:, 5]
|
189 |
+
|
190 |
+
# avg bed rooms = total bed rooms / households
|
191 |
+
data[:, 3] /= data[:, 5]
|
192 |
+
|
193 |
+
# avg occupancy = population / households
|
194 |
+
data[:, 5] = data[:, 4] / data[:, 5]
|
195 |
+
|
196 |
+
# target in units of 100,000
|
197 |
+
target = target / 100000.0
|
198 |
+
|
199 |
+
descr = load_descr("california_housing.rst")
|
200 |
+
|
201 |
+
X = data
|
202 |
+
y = target
|
203 |
+
|
204 |
+
frame = None
|
205 |
+
target_names = [
|
206 |
+
"MedHouseVal",
|
207 |
+
]
|
208 |
+
if as_frame:
|
209 |
+
frame, X, y = _convert_data_dataframe(
|
210 |
+
"fetch_california_housing", data, target, feature_names, target_names
|
211 |
+
)
|
212 |
+
|
213 |
+
if return_X_y:
|
214 |
+
return X, y
|
215 |
+
|
216 |
+
return Bunch(
|
217 |
+
data=X,
|
218 |
+
target=y,
|
219 |
+
frame=frame,
|
220 |
+
target_names=target_names,
|
221 |
+
feature_names=feature_names,
|
222 |
+
DESCR=descr,
|
223 |
+
)
|
env-llmeval/lib/python3.10/site-packages/sklearn/datasets/_covtype.py
ADDED
@@ -0,0 +1,236 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Forest covertype dataset.
|
2 |
+
|
3 |
+
A classic dataset for classification benchmarks, featuring categorical and
|
4 |
+
real-valued features.
|
5 |
+
|
6 |
+
The dataset page is available from UCI Machine Learning Repository
|
7 |
+
|
8 |
+
https://archive.ics.uci.edu/ml/datasets/Covertype
|
9 |
+
|
10 |
+
Courtesy of Jock A. Blackard and Colorado State University.
|
11 |
+
"""
|
12 |
+
|
13 |
+
# Author: Lars Buitinck
|
14 |
+
# Peter Prettenhofer <[email protected]>
|
15 |
+
# License: BSD 3 clause
|
16 |
+
|
17 |
+
import logging
|
18 |
+
import os
|
19 |
+
from gzip import GzipFile
|
20 |
+
from os.path import exists, join
|
21 |
+
from tempfile import TemporaryDirectory
|
22 |
+
|
23 |
+
import joblib
|
24 |
+
import numpy as np
|
25 |
+
|
26 |
+
from ..utils import Bunch, check_random_state
|
27 |
+
from ..utils._param_validation import validate_params
|
28 |
+
from . import get_data_home
|
29 |
+
from ._base import (
|
30 |
+
RemoteFileMetadata,
|
31 |
+
_convert_data_dataframe,
|
32 |
+
_fetch_remote,
|
33 |
+
_pkl_filepath,
|
34 |
+
load_descr,
|
35 |
+
)
|
36 |
+
|
37 |
+
# The original data can be found in:
|
38 |
+
# https://archive.ics.uci.edu/ml/machine-learning-databases/covtype/covtype.data.gz
|
39 |
+
ARCHIVE = RemoteFileMetadata(
|
40 |
+
filename="covtype.data.gz",
|
41 |
+
url="https://ndownloader.figshare.com/files/5976039",
|
42 |
+
checksum="614360d0257557dd1792834a85a1cdebfadc3c4f30b011d56afee7ffb5b15771",
|
43 |
+
)
|
44 |
+
|
45 |
+
logger = logging.getLogger(__name__)
|
46 |
+
|
47 |
+
# Column names reference:
|
48 |
+
# https://archive.ics.uci.edu/ml/machine-learning-databases/covtype/covtype.info
|
49 |
+
FEATURE_NAMES = [
|
50 |
+
"Elevation",
|
51 |
+
"Aspect",
|
52 |
+
"Slope",
|
53 |
+
"Horizontal_Distance_To_Hydrology",
|
54 |
+
"Vertical_Distance_To_Hydrology",
|
55 |
+
"Horizontal_Distance_To_Roadways",
|
56 |
+
"Hillshade_9am",
|
57 |
+
"Hillshade_Noon",
|
58 |
+
"Hillshade_3pm",
|
59 |
+
"Horizontal_Distance_To_Fire_Points",
|
60 |
+
]
|
61 |
+
FEATURE_NAMES += [f"Wilderness_Area_{i}" for i in range(4)]
|
62 |
+
FEATURE_NAMES += [f"Soil_Type_{i}" for i in range(40)]
|
63 |
+
TARGET_NAMES = ["Cover_Type"]
|
64 |
+
|
65 |
+
|
66 |
+
@validate_params(
|
67 |
+
{
|
68 |
+
"data_home": [str, os.PathLike, None],
|
69 |
+
"download_if_missing": ["boolean"],
|
70 |
+
"random_state": ["random_state"],
|
71 |
+
"shuffle": ["boolean"],
|
72 |
+
"return_X_y": ["boolean"],
|
73 |
+
"as_frame": ["boolean"],
|
74 |
+
},
|
75 |
+
prefer_skip_nested_validation=True,
|
76 |
+
)
|
77 |
+
def fetch_covtype(
|
78 |
+
*,
|
79 |
+
data_home=None,
|
80 |
+
download_if_missing=True,
|
81 |
+
random_state=None,
|
82 |
+
shuffle=False,
|
83 |
+
return_X_y=False,
|
84 |
+
as_frame=False,
|
85 |
+
):
|
86 |
+
"""Load the covertype dataset (classification).
|
87 |
+
|
88 |
+
Download it if necessary.
|
89 |
+
|
90 |
+
================= ============
|
91 |
+
Classes 7
|
92 |
+
Samples total 581012
|
93 |
+
Dimensionality 54
|
94 |
+
Features int
|
95 |
+
================= ============
|
96 |
+
|
97 |
+
Read more in the :ref:`User Guide <covtype_dataset>`.
|
98 |
+
|
99 |
+
Parameters
|
100 |
+
----------
|
101 |
+
data_home : str or path-like, default=None
|
102 |
+
Specify another download and cache folder for the datasets. By default
|
103 |
+
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
|
104 |
+
|
105 |
+
download_if_missing : bool, default=True
|
106 |
+
If False, raise an OSError if the data is not locally available
|
107 |
+
instead of trying to download the data from the source site.
|
108 |
+
|
109 |
+
random_state : int, RandomState instance or None, default=None
|
110 |
+
Determines random number generation for dataset shuffling. Pass an int
|
111 |
+
for reproducible output across multiple function calls.
|
112 |
+
See :term:`Glossary <random_state>`.
|
113 |
+
|
114 |
+
shuffle : bool, default=False
|
115 |
+
Whether to shuffle dataset.
|
116 |
+
|
117 |
+
return_X_y : bool, default=False
|
118 |
+
If True, returns ``(data.data, data.target)`` instead of a Bunch
|
119 |
+
object.
|
120 |
+
|
121 |
+
.. versionadded:: 0.20
|
122 |
+
|
123 |
+
as_frame : bool, default=False
|
124 |
+
If True, the data is a pandas DataFrame including columns with
|
125 |
+
appropriate dtypes (numeric). The target is a pandas DataFrame or
|
126 |
+
Series depending on the number of target columns. If `return_X_y` is
|
127 |
+
True, then (`data`, `target`) will be pandas DataFrames or Series as
|
128 |
+
described below.
|
129 |
+
|
130 |
+
.. versionadded:: 0.24
|
131 |
+
|
132 |
+
Returns
|
133 |
+
-------
|
134 |
+
dataset : :class:`~sklearn.utils.Bunch`
|
135 |
+
Dictionary-like object, with the following attributes.
|
136 |
+
|
137 |
+
data : ndarray of shape (581012, 54)
|
138 |
+
Each row corresponds to the 54 features in the dataset.
|
139 |
+
target : ndarray of shape (581012,)
|
140 |
+
Each value corresponds to one of
|
141 |
+
the 7 forest covertypes with values
|
142 |
+
ranging between 1 to 7.
|
143 |
+
frame : dataframe of shape (581012, 55)
|
144 |
+
Only present when `as_frame=True`. Contains `data` and `target`.
|
145 |
+
DESCR : str
|
146 |
+
Description of the forest covertype dataset.
|
147 |
+
feature_names : list
|
148 |
+
The names of the dataset columns.
|
149 |
+
target_names: list
|
150 |
+
The names of the target columns.
|
151 |
+
|
152 |
+
(data, target) : tuple if ``return_X_y`` is True
|
153 |
+
A tuple of two ndarray. The first containing a 2D array of
|
154 |
+
shape (n_samples, n_features) with each row representing one
|
155 |
+
sample and each column representing the features. The second
|
156 |
+
ndarray of shape (n_samples,) containing the target samples.
|
157 |
+
|
158 |
+
.. versionadded:: 0.20
|
159 |
+
|
160 |
+
Examples
|
161 |
+
--------
|
162 |
+
>>> from sklearn.datasets import fetch_covtype
|
163 |
+
>>> cov_type = fetch_covtype()
|
164 |
+
>>> cov_type.data.shape
|
165 |
+
(581012, 54)
|
166 |
+
>>> cov_type.target.shape
|
167 |
+
(581012,)
|
168 |
+
>>> # Let's check the 4 first feature names
|
169 |
+
>>> cov_type.feature_names[:4]
|
170 |
+
['Elevation', 'Aspect', 'Slope', 'Horizontal_Distance_To_Hydrology']
|
171 |
+
"""
|
172 |
+
data_home = get_data_home(data_home=data_home)
|
173 |
+
covtype_dir = join(data_home, "covertype")
|
174 |
+
samples_path = _pkl_filepath(covtype_dir, "samples")
|
175 |
+
targets_path = _pkl_filepath(covtype_dir, "targets")
|
176 |
+
available = exists(samples_path) and exists(targets_path)
|
177 |
+
|
178 |
+
if download_if_missing and not available:
|
179 |
+
os.makedirs(covtype_dir, exist_ok=True)
|
180 |
+
|
181 |
+
# Creating temp_dir as a direct subdirectory of the target directory
|
182 |
+
# guarantees that both reside on the same filesystem, so that we can use
|
183 |
+
# os.rename to atomically move the data files to their target location.
|
184 |
+
with TemporaryDirectory(dir=covtype_dir) as temp_dir:
|
185 |
+
logger.info(f"Downloading {ARCHIVE.url}")
|
186 |
+
archive_path = _fetch_remote(ARCHIVE, dirname=temp_dir)
|
187 |
+
Xy = np.genfromtxt(GzipFile(filename=archive_path), delimiter=",")
|
188 |
+
|
189 |
+
X = Xy[:, :-1]
|
190 |
+
y = Xy[:, -1].astype(np.int32, copy=False)
|
191 |
+
|
192 |
+
samples_tmp_path = _pkl_filepath(temp_dir, "samples")
|
193 |
+
joblib.dump(X, samples_tmp_path, compress=9)
|
194 |
+
os.rename(samples_tmp_path, samples_path)
|
195 |
+
|
196 |
+
targets_tmp_path = _pkl_filepath(temp_dir, "targets")
|
197 |
+
joblib.dump(y, targets_tmp_path, compress=9)
|
198 |
+
os.rename(targets_tmp_path, targets_path)
|
199 |
+
|
200 |
+
elif not available and not download_if_missing:
|
201 |
+
raise OSError("Data not found and `download_if_missing` is False")
|
202 |
+
try:
|
203 |
+
X, y
|
204 |
+
except NameError:
|
205 |
+
X = joblib.load(samples_path)
|
206 |
+
y = joblib.load(targets_path)
|
207 |
+
|
208 |
+
if shuffle:
|
209 |
+
ind = np.arange(X.shape[0])
|
210 |
+
rng = check_random_state(random_state)
|
211 |
+
rng.shuffle(ind)
|
212 |
+
X = X[ind]
|
213 |
+
y = y[ind]
|
214 |
+
|
215 |
+
fdescr = load_descr("covtype.rst")
|
216 |
+
|
217 |
+
frame = None
|
218 |
+
if as_frame:
|
219 |
+
frame, X, y = _convert_data_dataframe(
|
220 |
+
caller_name="fetch_covtype",
|
221 |
+
data=X,
|
222 |
+
target=y,
|
223 |
+
feature_names=FEATURE_NAMES,
|
224 |
+
target_names=TARGET_NAMES,
|
225 |
+
)
|
226 |
+
if return_X_y:
|
227 |
+
return X, y
|
228 |
+
|
229 |
+
return Bunch(
|
230 |
+
data=X,
|
231 |
+
target=y,
|
232 |
+
frame=frame,
|
233 |
+
target_names=TARGET_NAMES,
|
234 |
+
feature_names=FEATURE_NAMES,
|
235 |
+
DESCR=fdescr,
|
236 |
+
)
|
env-llmeval/lib/python3.10/site-packages/sklearn/datasets/_kddcup99.py
ADDED
@@ -0,0 +1,401 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""KDDCUP 99 dataset.
|
2 |
+
|
3 |
+
A classic dataset for anomaly detection.
|
4 |
+
|
5 |
+
The dataset page is available from UCI Machine Learning Repository
|
6 |
+
|
7 |
+
https://archive.ics.uci.edu/ml/machine-learning-databases/kddcup99-mld/kddcup.data.gz
|
8 |
+
|
9 |
+
"""
|
10 |
+
|
11 |
+
import errno
|
12 |
+
import logging
|
13 |
+
import os
|
14 |
+
from gzip import GzipFile
|
15 |
+
from os.path import exists, join
|
16 |
+
|
17 |
+
import joblib
|
18 |
+
import numpy as np
|
19 |
+
|
20 |
+
from ..utils import Bunch, check_random_state
|
21 |
+
from ..utils import shuffle as shuffle_method
|
22 |
+
from ..utils._param_validation import StrOptions, validate_params
|
23 |
+
from . import get_data_home
|
24 |
+
from ._base import (
|
25 |
+
RemoteFileMetadata,
|
26 |
+
_convert_data_dataframe,
|
27 |
+
_fetch_remote,
|
28 |
+
load_descr,
|
29 |
+
)
|
30 |
+
|
31 |
+
# The original data can be found at:
|
32 |
+
# https://archive.ics.uci.edu/ml/machine-learning-databases/kddcup99-mld/kddcup.data.gz
|
33 |
+
ARCHIVE = RemoteFileMetadata(
|
34 |
+
filename="kddcup99_data",
|
35 |
+
url="https://ndownloader.figshare.com/files/5976045",
|
36 |
+
checksum="3b6c942aa0356c0ca35b7b595a26c89d343652c9db428893e7494f837b274292",
|
37 |
+
)
|
38 |
+
|
39 |
+
# The original data can be found at:
|
40 |
+
# https://archive.ics.uci.edu/ml/machine-learning-databases/kddcup99-mld/kddcup.data_10_percent.gz
|
41 |
+
ARCHIVE_10_PERCENT = RemoteFileMetadata(
|
42 |
+
filename="kddcup99_10_data",
|
43 |
+
url="https://ndownloader.figshare.com/files/5976042",
|
44 |
+
checksum="8045aca0d84e70e622d1148d7df782496f6333bf6eb979a1b0837c42a9fd9561",
|
45 |
+
)
|
46 |
+
|
47 |
+
logger = logging.getLogger(__name__)
|
48 |
+
|
49 |
+
|
50 |
+
@validate_params(
|
51 |
+
{
|
52 |
+
"subset": [StrOptions({"SA", "SF", "http", "smtp"}), None],
|
53 |
+
"data_home": [str, os.PathLike, None],
|
54 |
+
"shuffle": ["boolean"],
|
55 |
+
"random_state": ["random_state"],
|
56 |
+
"percent10": ["boolean"],
|
57 |
+
"download_if_missing": ["boolean"],
|
58 |
+
"return_X_y": ["boolean"],
|
59 |
+
"as_frame": ["boolean"],
|
60 |
+
},
|
61 |
+
prefer_skip_nested_validation=True,
|
62 |
+
)
|
63 |
+
def fetch_kddcup99(
|
64 |
+
*,
|
65 |
+
subset=None,
|
66 |
+
data_home=None,
|
67 |
+
shuffle=False,
|
68 |
+
random_state=None,
|
69 |
+
percent10=True,
|
70 |
+
download_if_missing=True,
|
71 |
+
return_X_y=False,
|
72 |
+
as_frame=False,
|
73 |
+
):
|
74 |
+
"""Load the kddcup99 dataset (classification).
|
75 |
+
|
76 |
+
Download it if necessary.
|
77 |
+
|
78 |
+
================= ====================================
|
79 |
+
Classes 23
|
80 |
+
Samples total 4898431
|
81 |
+
Dimensionality 41
|
82 |
+
Features discrete (int) or continuous (float)
|
83 |
+
================= ====================================
|
84 |
+
|
85 |
+
Read more in the :ref:`User Guide <kddcup99_dataset>`.
|
86 |
+
|
87 |
+
.. versionadded:: 0.18
|
88 |
+
|
89 |
+
Parameters
|
90 |
+
----------
|
91 |
+
subset : {'SA', 'SF', 'http', 'smtp'}, default=None
|
92 |
+
To return the corresponding classical subsets of kddcup 99.
|
93 |
+
If None, return the entire kddcup 99 dataset.
|
94 |
+
|
95 |
+
data_home : str or path-like, default=None
|
96 |
+
Specify another download and cache folder for the datasets. By default
|
97 |
+
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
|
98 |
+
|
99 |
+
.. versionadded:: 0.19
|
100 |
+
|
101 |
+
shuffle : bool, default=False
|
102 |
+
Whether to shuffle dataset.
|
103 |
+
|
104 |
+
random_state : int, RandomState instance or None, default=None
|
105 |
+
Determines random number generation for dataset shuffling and for
|
106 |
+
selection of abnormal samples if `subset='SA'`. Pass an int for
|
107 |
+
reproducible output across multiple function calls.
|
108 |
+
See :term:`Glossary <random_state>`.
|
109 |
+
|
110 |
+
percent10 : bool, default=True
|
111 |
+
Whether to load only 10 percent of the data.
|
112 |
+
|
113 |
+
download_if_missing : bool, default=True
|
114 |
+
If False, raise an OSError if the data is not locally available
|
115 |
+
instead of trying to download the data from the source site.
|
116 |
+
|
117 |
+
return_X_y : bool, default=False
|
118 |
+
If True, returns ``(data, target)`` instead of a Bunch object. See
|
119 |
+
below for more information about the `data` and `target` object.
|
120 |
+
|
121 |
+
.. versionadded:: 0.20
|
122 |
+
|
123 |
+
as_frame : bool, default=False
|
124 |
+
If `True`, returns a pandas Dataframe for the ``data`` and ``target``
|
125 |
+
objects in the `Bunch` returned object; `Bunch` return object will also
|
126 |
+
have a ``frame`` member.
|
127 |
+
|
128 |
+
.. versionadded:: 0.24
|
129 |
+
|
130 |
+
Returns
|
131 |
+
-------
|
132 |
+
data : :class:`~sklearn.utils.Bunch`
|
133 |
+
Dictionary-like object, with the following attributes.
|
134 |
+
|
135 |
+
data : {ndarray, dataframe} of shape (494021, 41)
|
136 |
+
The data matrix to learn. If `as_frame=True`, `data` will be a
|
137 |
+
pandas DataFrame.
|
138 |
+
target : {ndarray, series} of shape (494021,)
|
139 |
+
The regression target for each sample. If `as_frame=True`, `target`
|
140 |
+
will be a pandas Series.
|
141 |
+
frame : dataframe of shape (494021, 42)
|
142 |
+
Only present when `as_frame=True`. Contains `data` and `target`.
|
143 |
+
DESCR : str
|
144 |
+
The full description of the dataset.
|
145 |
+
feature_names : list
|
146 |
+
The names of the dataset columns
|
147 |
+
target_names: list
|
148 |
+
The names of the target columns
|
149 |
+
|
150 |
+
(data, target) : tuple if ``return_X_y`` is True
|
151 |
+
A tuple of two ndarray. The first containing a 2D array of
|
152 |
+
shape (n_samples, n_features) with each row representing one
|
153 |
+
sample and each column representing the features. The second
|
154 |
+
ndarray of shape (n_samples,) containing the target samples.
|
155 |
+
|
156 |
+
.. versionadded:: 0.20
|
157 |
+
"""
|
158 |
+
data_home = get_data_home(data_home=data_home)
|
159 |
+
kddcup99 = _fetch_brute_kddcup99(
|
160 |
+
data_home=data_home,
|
161 |
+
percent10=percent10,
|
162 |
+
download_if_missing=download_if_missing,
|
163 |
+
)
|
164 |
+
|
165 |
+
data = kddcup99.data
|
166 |
+
target = kddcup99.target
|
167 |
+
feature_names = kddcup99.feature_names
|
168 |
+
target_names = kddcup99.target_names
|
169 |
+
|
170 |
+
if subset == "SA":
|
171 |
+
s = target == b"normal."
|
172 |
+
t = np.logical_not(s)
|
173 |
+
normal_samples = data[s, :]
|
174 |
+
normal_targets = target[s]
|
175 |
+
abnormal_samples = data[t, :]
|
176 |
+
abnormal_targets = target[t]
|
177 |
+
|
178 |
+
n_samples_abnormal = abnormal_samples.shape[0]
|
179 |
+
# selected abnormal samples:
|
180 |
+
random_state = check_random_state(random_state)
|
181 |
+
r = random_state.randint(0, n_samples_abnormal, 3377)
|
182 |
+
abnormal_samples = abnormal_samples[r]
|
183 |
+
abnormal_targets = abnormal_targets[r]
|
184 |
+
|
185 |
+
data = np.r_[normal_samples, abnormal_samples]
|
186 |
+
target = np.r_[normal_targets, abnormal_targets]
|
187 |
+
|
188 |
+
if subset == "SF" or subset == "http" or subset == "smtp":
|
189 |
+
# select all samples with positive logged_in attribute:
|
190 |
+
s = data[:, 11] == 1
|
191 |
+
data = np.c_[data[s, :11], data[s, 12:]]
|
192 |
+
feature_names = feature_names[:11] + feature_names[12:]
|
193 |
+
target = target[s]
|
194 |
+
|
195 |
+
data[:, 0] = np.log((data[:, 0] + 0.1).astype(float, copy=False))
|
196 |
+
data[:, 4] = np.log((data[:, 4] + 0.1).astype(float, copy=False))
|
197 |
+
data[:, 5] = np.log((data[:, 5] + 0.1).astype(float, copy=False))
|
198 |
+
|
199 |
+
if subset == "http":
|
200 |
+
s = data[:, 2] == b"http"
|
201 |
+
data = data[s]
|
202 |
+
target = target[s]
|
203 |
+
data = np.c_[data[:, 0], data[:, 4], data[:, 5]]
|
204 |
+
feature_names = [feature_names[0], feature_names[4], feature_names[5]]
|
205 |
+
|
206 |
+
if subset == "smtp":
|
207 |
+
s = data[:, 2] == b"smtp"
|
208 |
+
data = data[s]
|
209 |
+
target = target[s]
|
210 |
+
data = np.c_[data[:, 0], data[:, 4], data[:, 5]]
|
211 |
+
feature_names = [feature_names[0], feature_names[4], feature_names[5]]
|
212 |
+
|
213 |
+
if subset == "SF":
|
214 |
+
data = np.c_[data[:, 0], data[:, 2], data[:, 4], data[:, 5]]
|
215 |
+
feature_names = [
|
216 |
+
feature_names[0],
|
217 |
+
feature_names[2],
|
218 |
+
feature_names[4],
|
219 |
+
feature_names[5],
|
220 |
+
]
|
221 |
+
|
222 |
+
if shuffle:
|
223 |
+
data, target = shuffle_method(data, target, random_state=random_state)
|
224 |
+
|
225 |
+
fdescr = load_descr("kddcup99.rst")
|
226 |
+
|
227 |
+
frame = None
|
228 |
+
if as_frame:
|
229 |
+
frame, data, target = _convert_data_dataframe(
|
230 |
+
"fetch_kddcup99", data, target, feature_names, target_names
|
231 |
+
)
|
232 |
+
|
233 |
+
if return_X_y:
|
234 |
+
return data, target
|
235 |
+
|
236 |
+
return Bunch(
|
237 |
+
data=data,
|
238 |
+
target=target,
|
239 |
+
frame=frame,
|
240 |
+
target_names=target_names,
|
241 |
+
feature_names=feature_names,
|
242 |
+
DESCR=fdescr,
|
243 |
+
)
|
244 |
+
|
245 |
+
|
246 |
+
def _fetch_brute_kddcup99(data_home=None, download_if_missing=True, percent10=True):
|
247 |
+
"""Load the kddcup99 dataset, downloading it if necessary.
|
248 |
+
|
249 |
+
Parameters
|
250 |
+
----------
|
251 |
+
data_home : str, default=None
|
252 |
+
Specify another download and cache folder for the datasets. By default
|
253 |
+
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
|
254 |
+
|
255 |
+
download_if_missing : bool, default=True
|
256 |
+
If False, raise an OSError if the data is not locally available
|
257 |
+
instead of trying to download the data from the source site.
|
258 |
+
|
259 |
+
percent10 : bool, default=True
|
260 |
+
Whether to load only 10 percent of the data.
|
261 |
+
|
262 |
+
Returns
|
263 |
+
-------
|
264 |
+
dataset : :class:`~sklearn.utils.Bunch`
|
265 |
+
Dictionary-like object, with the following attributes.
|
266 |
+
|
267 |
+
data : ndarray of shape (494021, 41)
|
268 |
+
Each row corresponds to the 41 features in the dataset.
|
269 |
+
target : ndarray of shape (494021,)
|
270 |
+
Each value corresponds to one of the 21 attack types or to the
|
271 |
+
label 'normal.'.
|
272 |
+
feature_names : list
|
273 |
+
The names of the dataset columns
|
274 |
+
target_names: list
|
275 |
+
The names of the target columns
|
276 |
+
DESCR : str
|
277 |
+
Description of the kddcup99 dataset.
|
278 |
+
|
279 |
+
"""
|
280 |
+
|
281 |
+
data_home = get_data_home(data_home=data_home)
|
282 |
+
dir_suffix = "-py3"
|
283 |
+
|
284 |
+
if percent10:
|
285 |
+
kddcup_dir = join(data_home, "kddcup99_10" + dir_suffix)
|
286 |
+
archive = ARCHIVE_10_PERCENT
|
287 |
+
else:
|
288 |
+
kddcup_dir = join(data_home, "kddcup99" + dir_suffix)
|
289 |
+
archive = ARCHIVE
|
290 |
+
|
291 |
+
samples_path = join(kddcup_dir, "samples")
|
292 |
+
targets_path = join(kddcup_dir, "targets")
|
293 |
+
available = exists(samples_path)
|
294 |
+
|
295 |
+
dt = [
|
296 |
+
("duration", int),
|
297 |
+
("protocol_type", "S4"),
|
298 |
+
("service", "S11"),
|
299 |
+
("flag", "S6"),
|
300 |
+
("src_bytes", int),
|
301 |
+
("dst_bytes", int),
|
302 |
+
("land", int),
|
303 |
+
("wrong_fragment", int),
|
304 |
+
("urgent", int),
|
305 |
+
("hot", int),
|
306 |
+
("num_failed_logins", int),
|
307 |
+
("logged_in", int),
|
308 |
+
("num_compromised", int),
|
309 |
+
("root_shell", int),
|
310 |
+
("su_attempted", int),
|
311 |
+
("num_root", int),
|
312 |
+
("num_file_creations", int),
|
313 |
+
("num_shells", int),
|
314 |
+
("num_access_files", int),
|
315 |
+
("num_outbound_cmds", int),
|
316 |
+
("is_host_login", int),
|
317 |
+
("is_guest_login", int),
|
318 |
+
("count", int),
|
319 |
+
("srv_count", int),
|
320 |
+
("serror_rate", float),
|
321 |
+
("srv_serror_rate", float),
|
322 |
+
("rerror_rate", float),
|
323 |
+
("srv_rerror_rate", float),
|
324 |
+
("same_srv_rate", float),
|
325 |
+
("diff_srv_rate", float),
|
326 |
+
("srv_diff_host_rate", float),
|
327 |
+
("dst_host_count", int),
|
328 |
+
("dst_host_srv_count", int),
|
329 |
+
("dst_host_same_srv_rate", float),
|
330 |
+
("dst_host_diff_srv_rate", float),
|
331 |
+
("dst_host_same_src_port_rate", float),
|
332 |
+
("dst_host_srv_diff_host_rate", float),
|
333 |
+
("dst_host_serror_rate", float),
|
334 |
+
("dst_host_srv_serror_rate", float),
|
335 |
+
("dst_host_rerror_rate", float),
|
336 |
+
("dst_host_srv_rerror_rate", float),
|
337 |
+
("labels", "S16"),
|
338 |
+
]
|
339 |
+
|
340 |
+
column_names = [c[0] for c in dt]
|
341 |
+
target_names = column_names[-1]
|
342 |
+
feature_names = column_names[:-1]
|
343 |
+
|
344 |
+
if available:
|
345 |
+
try:
|
346 |
+
X = joblib.load(samples_path)
|
347 |
+
y = joblib.load(targets_path)
|
348 |
+
except Exception as e:
|
349 |
+
raise OSError(
|
350 |
+
"The cache for fetch_kddcup99 is invalid, please delete "
|
351 |
+
f"{str(kddcup_dir)} and run the fetch_kddcup99 again"
|
352 |
+
) from e
|
353 |
+
|
354 |
+
elif download_if_missing:
|
355 |
+
_mkdirp(kddcup_dir)
|
356 |
+
logger.info("Downloading %s" % archive.url)
|
357 |
+
_fetch_remote(archive, dirname=kddcup_dir)
|
358 |
+
DT = np.dtype(dt)
|
359 |
+
logger.debug("extracting archive")
|
360 |
+
archive_path = join(kddcup_dir, archive.filename)
|
361 |
+
file_ = GzipFile(filename=archive_path, mode="r")
|
362 |
+
Xy = []
|
363 |
+
for line in file_.readlines():
|
364 |
+
line = line.decode()
|
365 |
+
Xy.append(line.replace("\n", "").split(","))
|
366 |
+
file_.close()
|
367 |
+
logger.debug("extraction done")
|
368 |
+
os.remove(archive_path)
|
369 |
+
|
370 |
+
Xy = np.asarray(Xy, dtype=object)
|
371 |
+
for j in range(42):
|
372 |
+
Xy[:, j] = Xy[:, j].astype(DT[j])
|
373 |
+
|
374 |
+
X = Xy[:, :-1]
|
375 |
+
y = Xy[:, -1]
|
376 |
+
# XXX bug when compress!=0:
|
377 |
+
# (error: 'Incorrect data length while decompressing[...] the file
|
378 |
+
# could be corrupted.')
|
379 |
+
|
380 |
+
joblib.dump(X, samples_path, compress=0)
|
381 |
+
joblib.dump(y, targets_path, compress=0)
|
382 |
+
else:
|
383 |
+
raise OSError("Data not found and `download_if_missing` is False")
|
384 |
+
|
385 |
+
return Bunch(
|
386 |
+
data=X,
|
387 |
+
target=y,
|
388 |
+
feature_names=feature_names,
|
389 |
+
target_names=[target_names],
|
390 |
+
)
|
391 |
+
|
392 |
+
|
393 |
+
def _mkdirp(d):
|
394 |
+
"""Ensure directory d exists (like mkdir -p on Unix)
|
395 |
+
No guarantee that the directory is writable.
|
396 |
+
"""
|
397 |
+
try:
|
398 |
+
os.makedirs(d)
|
399 |
+
except OSError as e:
|
400 |
+
if e.errno != errno.EEXIST:
|
401 |
+
raise
|
env-llmeval/lib/python3.10/site-packages/sklearn/datasets/_lfw.py
ADDED
@@ -0,0 +1,570 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Labeled Faces in the Wild (LFW) dataset
|
2 |
+
|
3 |
+
This dataset is a collection of JPEG pictures of famous people collected
|
4 |
+
over the internet, all details are available on the official website:
|
5 |
+
|
6 |
+
http://vis-www.cs.umass.edu/lfw/
|
7 |
+
"""
|
8 |
+
# Copyright (c) 2011 Olivier Grisel <[email protected]>
|
9 |
+
# License: BSD 3 clause
|
10 |
+
|
11 |
+
import logging
|
12 |
+
from numbers import Integral, Real
|
13 |
+
from os import PathLike, listdir, makedirs, remove
|
14 |
+
from os.path import exists, isdir, join
|
15 |
+
|
16 |
+
import numpy as np
|
17 |
+
from joblib import Memory
|
18 |
+
|
19 |
+
from ..utils import Bunch
|
20 |
+
from ..utils._param_validation import Hidden, Interval, StrOptions, validate_params
|
21 |
+
from ._base import (
|
22 |
+
RemoteFileMetadata,
|
23 |
+
_fetch_remote,
|
24 |
+
get_data_home,
|
25 |
+
load_descr,
|
26 |
+
)
|
27 |
+
|
28 |
+
logger = logging.getLogger(__name__)
|
29 |
+
|
30 |
+
# The original data can be found in:
|
31 |
+
# http://vis-www.cs.umass.edu/lfw/lfw.tgz
|
32 |
+
ARCHIVE = RemoteFileMetadata(
|
33 |
+
filename="lfw.tgz",
|
34 |
+
url="https://ndownloader.figshare.com/files/5976018",
|
35 |
+
checksum="055f7d9c632d7370e6fb4afc7468d40f970c34a80d4c6f50ffec63f5a8d536c0",
|
36 |
+
)
|
37 |
+
|
38 |
+
# The original funneled data can be found in:
|
39 |
+
# http://vis-www.cs.umass.edu/lfw/lfw-funneled.tgz
|
40 |
+
FUNNELED_ARCHIVE = RemoteFileMetadata(
|
41 |
+
filename="lfw-funneled.tgz",
|
42 |
+
url="https://ndownloader.figshare.com/files/5976015",
|
43 |
+
checksum="b47c8422c8cded889dc5a13418c4bc2abbda121092b3533a83306f90d900100a",
|
44 |
+
)
|
45 |
+
|
46 |
+
# The original target data can be found in:
|
47 |
+
# http://vis-www.cs.umass.edu/lfw/pairsDevTrain.txt',
|
48 |
+
# http://vis-www.cs.umass.edu/lfw/pairsDevTest.txt',
|
49 |
+
# http://vis-www.cs.umass.edu/lfw/pairs.txt',
|
50 |
+
TARGETS = (
|
51 |
+
RemoteFileMetadata(
|
52 |
+
filename="pairsDevTrain.txt",
|
53 |
+
url="https://ndownloader.figshare.com/files/5976012",
|
54 |
+
checksum="1d454dada7dfeca0e7eab6f65dc4e97a6312d44cf142207be28d688be92aabfa",
|
55 |
+
),
|
56 |
+
RemoteFileMetadata(
|
57 |
+
filename="pairsDevTest.txt",
|
58 |
+
url="https://ndownloader.figshare.com/files/5976009",
|
59 |
+
checksum="7cb06600ea8b2814ac26e946201cdb304296262aad67d046a16a7ec85d0ff87c",
|
60 |
+
),
|
61 |
+
RemoteFileMetadata(
|
62 |
+
filename="pairs.txt",
|
63 |
+
url="https://ndownloader.figshare.com/files/5976006",
|
64 |
+
checksum="ea42330c62c92989f9d7c03237ed5d591365e89b3e649747777b70e692dc1592",
|
65 |
+
),
|
66 |
+
)
|
67 |
+
|
68 |
+
|
69 |
+
#
|
70 |
+
# Common private utilities for data fetching from the original LFW website
|
71 |
+
# local disk caching, and image decoding.
|
72 |
+
#
|
73 |
+
|
74 |
+
|
75 |
+
def _check_fetch_lfw(data_home=None, funneled=True, download_if_missing=True):
|
76 |
+
"""Helper function to download any missing LFW data"""
|
77 |
+
|
78 |
+
data_home = get_data_home(data_home=data_home)
|
79 |
+
lfw_home = join(data_home, "lfw_home")
|
80 |
+
|
81 |
+
if not exists(lfw_home):
|
82 |
+
makedirs(lfw_home)
|
83 |
+
|
84 |
+
for target in TARGETS:
|
85 |
+
target_filepath = join(lfw_home, target.filename)
|
86 |
+
if not exists(target_filepath):
|
87 |
+
if download_if_missing:
|
88 |
+
logger.info("Downloading LFW metadata: %s", target.url)
|
89 |
+
_fetch_remote(target, dirname=lfw_home)
|
90 |
+
else:
|
91 |
+
raise OSError("%s is missing" % target_filepath)
|
92 |
+
|
93 |
+
if funneled:
|
94 |
+
data_folder_path = join(lfw_home, "lfw_funneled")
|
95 |
+
archive = FUNNELED_ARCHIVE
|
96 |
+
else:
|
97 |
+
data_folder_path = join(lfw_home, "lfw")
|
98 |
+
archive = ARCHIVE
|
99 |
+
|
100 |
+
if not exists(data_folder_path):
|
101 |
+
archive_path = join(lfw_home, archive.filename)
|
102 |
+
if not exists(archive_path):
|
103 |
+
if download_if_missing:
|
104 |
+
logger.info("Downloading LFW data (~200MB): %s", archive.url)
|
105 |
+
_fetch_remote(archive, dirname=lfw_home)
|
106 |
+
else:
|
107 |
+
raise OSError("%s is missing" % archive_path)
|
108 |
+
|
109 |
+
import tarfile
|
110 |
+
|
111 |
+
logger.debug("Decompressing the data archive to %s", data_folder_path)
|
112 |
+
tarfile.open(archive_path, "r:gz").extractall(path=lfw_home)
|
113 |
+
remove(archive_path)
|
114 |
+
|
115 |
+
return lfw_home, data_folder_path
|
116 |
+
|
117 |
+
|
118 |
+
def _load_imgs(file_paths, slice_, color, resize):
|
119 |
+
"""Internally used to load images"""
|
120 |
+
try:
|
121 |
+
from PIL import Image
|
122 |
+
except ImportError:
|
123 |
+
raise ImportError(
|
124 |
+
"The Python Imaging Library (PIL) is required to load data "
|
125 |
+
"from jpeg files. Please refer to "
|
126 |
+
"https://pillow.readthedocs.io/en/stable/installation.html "
|
127 |
+
"for installing PIL."
|
128 |
+
)
|
129 |
+
|
130 |
+
# compute the portion of the images to load to respect the slice_ parameter
|
131 |
+
# given by the caller
|
132 |
+
default_slice = (slice(0, 250), slice(0, 250))
|
133 |
+
if slice_ is None:
|
134 |
+
slice_ = default_slice
|
135 |
+
else:
|
136 |
+
slice_ = tuple(s or ds for s, ds in zip(slice_, default_slice))
|
137 |
+
|
138 |
+
h_slice, w_slice = slice_
|
139 |
+
h = (h_slice.stop - h_slice.start) // (h_slice.step or 1)
|
140 |
+
w = (w_slice.stop - w_slice.start) // (w_slice.step or 1)
|
141 |
+
|
142 |
+
if resize is not None:
|
143 |
+
resize = float(resize)
|
144 |
+
h = int(resize * h)
|
145 |
+
w = int(resize * w)
|
146 |
+
|
147 |
+
# allocate some contiguous memory to host the decoded image slices
|
148 |
+
n_faces = len(file_paths)
|
149 |
+
if not color:
|
150 |
+
faces = np.zeros((n_faces, h, w), dtype=np.float32)
|
151 |
+
else:
|
152 |
+
faces = np.zeros((n_faces, h, w, 3), dtype=np.float32)
|
153 |
+
|
154 |
+
# iterate over the collected file path to load the jpeg files as numpy
|
155 |
+
# arrays
|
156 |
+
for i, file_path in enumerate(file_paths):
|
157 |
+
if i % 1000 == 0:
|
158 |
+
logger.debug("Loading face #%05d / %05d", i + 1, n_faces)
|
159 |
+
|
160 |
+
# Checks if jpeg reading worked. Refer to issue #3594 for more
|
161 |
+
# details.
|
162 |
+
pil_img = Image.open(file_path)
|
163 |
+
pil_img = pil_img.crop(
|
164 |
+
(w_slice.start, h_slice.start, w_slice.stop, h_slice.stop)
|
165 |
+
)
|
166 |
+
if resize is not None:
|
167 |
+
pil_img = pil_img.resize((w, h))
|
168 |
+
face = np.asarray(pil_img, dtype=np.float32)
|
169 |
+
|
170 |
+
if face.ndim == 0:
|
171 |
+
raise RuntimeError(
|
172 |
+
"Failed to read the image file %s, "
|
173 |
+
"Please make sure that libjpeg is installed" % file_path
|
174 |
+
)
|
175 |
+
|
176 |
+
face /= 255.0 # scale uint8 coded colors to the [0.0, 1.0] floats
|
177 |
+
if not color:
|
178 |
+
# average the color channels to compute a gray levels
|
179 |
+
# representation
|
180 |
+
face = face.mean(axis=2)
|
181 |
+
|
182 |
+
faces[i, ...] = face
|
183 |
+
|
184 |
+
return faces
|
185 |
+
|
186 |
+
|
187 |
+
#
|
188 |
+
# Task #1: Face Identification on picture with names
|
189 |
+
#
|
190 |
+
|
191 |
+
|
192 |
+
def _fetch_lfw_people(
|
193 |
+
data_folder_path, slice_=None, color=False, resize=None, min_faces_per_person=0
|
194 |
+
):
|
195 |
+
"""Perform the actual data loading for the lfw people dataset
|
196 |
+
|
197 |
+
This operation is meant to be cached by a joblib wrapper.
|
198 |
+
"""
|
199 |
+
# scan the data folder content to retain people with more that
|
200 |
+
# `min_faces_per_person` face pictures
|
201 |
+
person_names, file_paths = [], []
|
202 |
+
for person_name in sorted(listdir(data_folder_path)):
|
203 |
+
folder_path = join(data_folder_path, person_name)
|
204 |
+
if not isdir(folder_path):
|
205 |
+
continue
|
206 |
+
paths = [join(folder_path, f) for f in sorted(listdir(folder_path))]
|
207 |
+
n_pictures = len(paths)
|
208 |
+
if n_pictures >= min_faces_per_person:
|
209 |
+
person_name = person_name.replace("_", " ")
|
210 |
+
person_names.extend([person_name] * n_pictures)
|
211 |
+
file_paths.extend(paths)
|
212 |
+
|
213 |
+
n_faces = len(file_paths)
|
214 |
+
if n_faces == 0:
|
215 |
+
raise ValueError(
|
216 |
+
"min_faces_per_person=%d is too restrictive" % min_faces_per_person
|
217 |
+
)
|
218 |
+
|
219 |
+
target_names = np.unique(person_names)
|
220 |
+
target = np.searchsorted(target_names, person_names)
|
221 |
+
|
222 |
+
faces = _load_imgs(file_paths, slice_, color, resize)
|
223 |
+
|
224 |
+
# shuffle the faces with a deterministic RNG scheme to avoid having
|
225 |
+
# all faces of the same person in a row, as it would break some
|
226 |
+
# cross validation and learning algorithms such as SGD and online
|
227 |
+
# k-means that make an IID assumption
|
228 |
+
|
229 |
+
indices = np.arange(n_faces)
|
230 |
+
np.random.RandomState(42).shuffle(indices)
|
231 |
+
faces, target = faces[indices], target[indices]
|
232 |
+
return faces, target, target_names
|
233 |
+
|
234 |
+
|
235 |
+
@validate_params(
|
236 |
+
{
|
237 |
+
"data_home": [str, PathLike, None],
|
238 |
+
"funneled": ["boolean"],
|
239 |
+
"resize": [Interval(Real, 0, None, closed="neither"), None],
|
240 |
+
"min_faces_per_person": [Interval(Integral, 0, None, closed="left"), None],
|
241 |
+
"color": ["boolean"],
|
242 |
+
"slice_": [tuple, Hidden(None)],
|
243 |
+
"download_if_missing": ["boolean"],
|
244 |
+
"return_X_y": ["boolean"],
|
245 |
+
},
|
246 |
+
prefer_skip_nested_validation=True,
|
247 |
+
)
|
248 |
+
def fetch_lfw_people(
|
249 |
+
*,
|
250 |
+
data_home=None,
|
251 |
+
funneled=True,
|
252 |
+
resize=0.5,
|
253 |
+
min_faces_per_person=0,
|
254 |
+
color=False,
|
255 |
+
slice_=(slice(70, 195), slice(78, 172)),
|
256 |
+
download_if_missing=True,
|
257 |
+
return_X_y=False,
|
258 |
+
):
|
259 |
+
"""Load the Labeled Faces in the Wild (LFW) people dataset \
|
260 |
+
(classification).
|
261 |
+
|
262 |
+
Download it if necessary.
|
263 |
+
|
264 |
+
================= =======================
|
265 |
+
Classes 5749
|
266 |
+
Samples total 13233
|
267 |
+
Dimensionality 5828
|
268 |
+
Features real, between 0 and 255
|
269 |
+
================= =======================
|
270 |
+
|
271 |
+
Read more in the :ref:`User Guide <labeled_faces_in_the_wild_dataset>`.
|
272 |
+
|
273 |
+
Parameters
|
274 |
+
----------
|
275 |
+
data_home : str or path-like, default=None
|
276 |
+
Specify another download and cache folder for the datasets. By default
|
277 |
+
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
|
278 |
+
|
279 |
+
funneled : bool, default=True
|
280 |
+
Download and use the funneled variant of the dataset.
|
281 |
+
|
282 |
+
resize : float or None, default=0.5
|
283 |
+
Ratio used to resize the each face picture. If `None`, no resizing is
|
284 |
+
performed.
|
285 |
+
|
286 |
+
min_faces_per_person : int, default=None
|
287 |
+
The extracted dataset will only retain pictures of people that have at
|
288 |
+
least `min_faces_per_person` different pictures.
|
289 |
+
|
290 |
+
color : bool, default=False
|
291 |
+
Keep the 3 RGB channels instead of averaging them to a single
|
292 |
+
gray level channel. If color is True the shape of the data has
|
293 |
+
one more dimension than the shape with color = False.
|
294 |
+
|
295 |
+
slice_ : tuple of slice, default=(slice(70, 195), slice(78, 172))
|
296 |
+
Provide a custom 2D slice (height, width) to extract the
|
297 |
+
'interesting' part of the jpeg files and avoid use statistical
|
298 |
+
correlation from the background.
|
299 |
+
|
300 |
+
download_if_missing : bool, default=True
|
301 |
+
If False, raise an OSError if the data is not locally available
|
302 |
+
instead of trying to download the data from the source site.
|
303 |
+
|
304 |
+
return_X_y : bool, default=False
|
305 |
+
If True, returns ``(dataset.data, dataset.target)`` instead of a Bunch
|
306 |
+
object. See below for more information about the `dataset.data` and
|
307 |
+
`dataset.target` object.
|
308 |
+
|
309 |
+
.. versionadded:: 0.20
|
310 |
+
|
311 |
+
Returns
|
312 |
+
-------
|
313 |
+
dataset : :class:`~sklearn.utils.Bunch`
|
314 |
+
Dictionary-like object, with the following attributes.
|
315 |
+
|
316 |
+
data : numpy array of shape (13233, 2914)
|
317 |
+
Each row corresponds to a ravelled face image
|
318 |
+
of original size 62 x 47 pixels.
|
319 |
+
Changing the ``slice_`` or resize parameters will change the
|
320 |
+
shape of the output.
|
321 |
+
images : numpy array of shape (13233, 62, 47)
|
322 |
+
Each row is a face image corresponding to one of the 5749 people in
|
323 |
+
the dataset. Changing the ``slice_``
|
324 |
+
or resize parameters will change the shape of the output.
|
325 |
+
target : numpy array of shape (13233,)
|
326 |
+
Labels associated to each face image.
|
327 |
+
Those labels range from 0-5748 and correspond to the person IDs.
|
328 |
+
target_names : numpy array of shape (5749,)
|
329 |
+
Names of all persons in the dataset.
|
330 |
+
Position in array corresponds to the person ID in the target array.
|
331 |
+
DESCR : str
|
332 |
+
Description of the Labeled Faces in the Wild (LFW) dataset.
|
333 |
+
|
334 |
+
(data, target) : tuple if ``return_X_y`` is True
|
335 |
+
A tuple of two ndarray. The first containing a 2D array of
|
336 |
+
shape (n_samples, n_features) with each row representing one
|
337 |
+
sample and each column representing the features. The second
|
338 |
+
ndarray of shape (n_samples,) containing the target samples.
|
339 |
+
|
340 |
+
.. versionadded:: 0.20
|
341 |
+
"""
|
342 |
+
lfw_home, data_folder_path = _check_fetch_lfw(
|
343 |
+
data_home=data_home, funneled=funneled, download_if_missing=download_if_missing
|
344 |
+
)
|
345 |
+
logger.debug("Loading LFW people faces from %s", lfw_home)
|
346 |
+
|
347 |
+
# wrap the loader in a memoizing function that will return memmaped data
|
348 |
+
# arrays for optimal memory usage
|
349 |
+
m = Memory(location=lfw_home, compress=6, verbose=0)
|
350 |
+
load_func = m.cache(_fetch_lfw_people)
|
351 |
+
|
352 |
+
# load and memoize the pairs as np arrays
|
353 |
+
faces, target, target_names = load_func(
|
354 |
+
data_folder_path,
|
355 |
+
resize=resize,
|
356 |
+
min_faces_per_person=min_faces_per_person,
|
357 |
+
color=color,
|
358 |
+
slice_=slice_,
|
359 |
+
)
|
360 |
+
|
361 |
+
X = faces.reshape(len(faces), -1)
|
362 |
+
|
363 |
+
fdescr = load_descr("lfw.rst")
|
364 |
+
|
365 |
+
if return_X_y:
|
366 |
+
return X, target
|
367 |
+
|
368 |
+
# pack the results as a Bunch instance
|
369 |
+
return Bunch(
|
370 |
+
data=X, images=faces, target=target, target_names=target_names, DESCR=fdescr
|
371 |
+
)
|
372 |
+
|
373 |
+
|
374 |
+
#
|
375 |
+
# Task #2: Face Verification on pairs of face pictures
|
376 |
+
#
|
377 |
+
|
378 |
+
|
379 |
+
def _fetch_lfw_pairs(
|
380 |
+
index_file_path, data_folder_path, slice_=None, color=False, resize=None
|
381 |
+
):
|
382 |
+
"""Perform the actual data loading for the LFW pairs dataset
|
383 |
+
|
384 |
+
This operation is meant to be cached by a joblib wrapper.
|
385 |
+
"""
|
386 |
+
# parse the index file to find the number of pairs to be able to allocate
|
387 |
+
# the right amount of memory before starting to decode the jpeg files
|
388 |
+
with open(index_file_path, "rb") as index_file:
|
389 |
+
split_lines = [ln.decode().strip().split("\t") for ln in index_file]
|
390 |
+
pair_specs = [sl for sl in split_lines if len(sl) > 2]
|
391 |
+
n_pairs = len(pair_specs)
|
392 |
+
|
393 |
+
# iterating over the metadata lines for each pair to find the filename to
|
394 |
+
# decode and load in memory
|
395 |
+
target = np.zeros(n_pairs, dtype=int)
|
396 |
+
file_paths = list()
|
397 |
+
for i, components in enumerate(pair_specs):
|
398 |
+
if len(components) == 3:
|
399 |
+
target[i] = 1
|
400 |
+
pair = (
|
401 |
+
(components[0], int(components[1]) - 1),
|
402 |
+
(components[0], int(components[2]) - 1),
|
403 |
+
)
|
404 |
+
elif len(components) == 4:
|
405 |
+
target[i] = 0
|
406 |
+
pair = (
|
407 |
+
(components[0], int(components[1]) - 1),
|
408 |
+
(components[2], int(components[3]) - 1),
|
409 |
+
)
|
410 |
+
else:
|
411 |
+
raise ValueError("invalid line %d: %r" % (i + 1, components))
|
412 |
+
for j, (name, idx) in enumerate(pair):
|
413 |
+
try:
|
414 |
+
person_folder = join(data_folder_path, name)
|
415 |
+
except TypeError:
|
416 |
+
person_folder = join(data_folder_path, str(name, "UTF-8"))
|
417 |
+
filenames = list(sorted(listdir(person_folder)))
|
418 |
+
file_path = join(person_folder, filenames[idx])
|
419 |
+
file_paths.append(file_path)
|
420 |
+
|
421 |
+
pairs = _load_imgs(file_paths, slice_, color, resize)
|
422 |
+
shape = list(pairs.shape)
|
423 |
+
n_faces = shape.pop(0)
|
424 |
+
shape.insert(0, 2)
|
425 |
+
shape.insert(0, n_faces // 2)
|
426 |
+
pairs.shape = shape
|
427 |
+
|
428 |
+
return pairs, target, np.array(["Different persons", "Same person"])
|
429 |
+
|
430 |
+
|
431 |
+
@validate_params(
|
432 |
+
{
|
433 |
+
"subset": [StrOptions({"train", "test", "10_folds"})],
|
434 |
+
"data_home": [str, PathLike, None],
|
435 |
+
"funneled": ["boolean"],
|
436 |
+
"resize": [Interval(Real, 0, None, closed="neither"), None],
|
437 |
+
"color": ["boolean"],
|
438 |
+
"slice_": [tuple, Hidden(None)],
|
439 |
+
"download_if_missing": ["boolean"],
|
440 |
+
},
|
441 |
+
prefer_skip_nested_validation=True,
|
442 |
+
)
|
443 |
+
def fetch_lfw_pairs(
|
444 |
+
*,
|
445 |
+
subset="train",
|
446 |
+
data_home=None,
|
447 |
+
funneled=True,
|
448 |
+
resize=0.5,
|
449 |
+
color=False,
|
450 |
+
slice_=(slice(70, 195), slice(78, 172)),
|
451 |
+
download_if_missing=True,
|
452 |
+
):
|
453 |
+
"""Load the Labeled Faces in the Wild (LFW) pairs dataset (classification).
|
454 |
+
|
455 |
+
Download it if necessary.
|
456 |
+
|
457 |
+
================= =======================
|
458 |
+
Classes 2
|
459 |
+
Samples total 13233
|
460 |
+
Dimensionality 5828
|
461 |
+
Features real, between 0 and 255
|
462 |
+
================= =======================
|
463 |
+
|
464 |
+
In the official `README.txt`_ this task is described as the
|
465 |
+
"Restricted" task. As I am not sure as to implement the
|
466 |
+
"Unrestricted" variant correctly, I left it as unsupported for now.
|
467 |
+
|
468 |
+
.. _`README.txt`: http://vis-www.cs.umass.edu/lfw/README.txt
|
469 |
+
|
470 |
+
The original images are 250 x 250 pixels, but the default slice and resize
|
471 |
+
arguments reduce them to 62 x 47.
|
472 |
+
|
473 |
+
Read more in the :ref:`User Guide <labeled_faces_in_the_wild_dataset>`.
|
474 |
+
|
475 |
+
Parameters
|
476 |
+
----------
|
477 |
+
subset : {'train', 'test', '10_folds'}, default='train'
|
478 |
+
Select the dataset to load: 'train' for the development training
|
479 |
+
set, 'test' for the development test set, and '10_folds' for the
|
480 |
+
official evaluation set that is meant to be used with a 10-folds
|
481 |
+
cross validation.
|
482 |
+
|
483 |
+
data_home : str or path-like, default=None
|
484 |
+
Specify another download and cache folder for the datasets. By
|
485 |
+
default all scikit-learn data is stored in '~/scikit_learn_data'
|
486 |
+
subfolders.
|
487 |
+
|
488 |
+
funneled : bool, default=True
|
489 |
+
Download and use the funneled variant of the dataset.
|
490 |
+
|
491 |
+
resize : float, default=0.5
|
492 |
+
Ratio used to resize the each face picture.
|
493 |
+
|
494 |
+
color : bool, default=False
|
495 |
+
Keep the 3 RGB channels instead of averaging them to a single
|
496 |
+
gray level channel. If color is True the shape of the data has
|
497 |
+
one more dimension than the shape with color = False.
|
498 |
+
|
499 |
+
slice_ : tuple of slice, default=(slice(70, 195), slice(78, 172))
|
500 |
+
Provide a custom 2D slice (height, width) to extract the
|
501 |
+
'interesting' part of the jpeg files and avoid use statistical
|
502 |
+
correlation from the background.
|
503 |
+
|
504 |
+
download_if_missing : bool, default=True
|
505 |
+
If False, raise an OSError if the data is not locally available
|
506 |
+
instead of trying to download the data from the source site.
|
507 |
+
|
508 |
+
Returns
|
509 |
+
-------
|
510 |
+
data : :class:`~sklearn.utils.Bunch`
|
511 |
+
Dictionary-like object, with the following attributes.
|
512 |
+
|
513 |
+
data : ndarray of shape (2200, 5828). Shape depends on ``subset``.
|
514 |
+
Each row corresponds to 2 ravel'd face images
|
515 |
+
of original size 62 x 47 pixels.
|
516 |
+
Changing the ``slice_``, ``resize`` or ``subset`` parameters
|
517 |
+
will change the shape of the output.
|
518 |
+
pairs : ndarray of shape (2200, 2, 62, 47). Shape depends on ``subset``
|
519 |
+
Each row has 2 face images corresponding
|
520 |
+
to same or different person from the dataset
|
521 |
+
containing 5749 people. Changing the ``slice_``,
|
522 |
+
``resize`` or ``subset`` parameters will change the shape of the
|
523 |
+
output.
|
524 |
+
target : numpy array of shape (2200,). Shape depends on ``subset``.
|
525 |
+
Labels associated to each pair of images.
|
526 |
+
The two label values being different persons or the same person.
|
527 |
+
target_names : numpy array of shape (2,)
|
528 |
+
Explains the target values of the target array.
|
529 |
+
0 corresponds to "Different person", 1 corresponds to "same person".
|
530 |
+
DESCR : str
|
531 |
+
Description of the Labeled Faces in the Wild (LFW) dataset.
|
532 |
+
"""
|
533 |
+
lfw_home, data_folder_path = _check_fetch_lfw(
|
534 |
+
data_home=data_home, funneled=funneled, download_if_missing=download_if_missing
|
535 |
+
)
|
536 |
+
logger.debug("Loading %s LFW pairs from %s", subset, lfw_home)
|
537 |
+
|
538 |
+
# wrap the loader in a memoizing function that will return memmaped data
|
539 |
+
# arrays for optimal memory usage
|
540 |
+
m = Memory(location=lfw_home, compress=6, verbose=0)
|
541 |
+
load_func = m.cache(_fetch_lfw_pairs)
|
542 |
+
|
543 |
+
# select the right metadata file according to the requested subset
|
544 |
+
label_filenames = {
|
545 |
+
"train": "pairsDevTrain.txt",
|
546 |
+
"test": "pairsDevTest.txt",
|
547 |
+
"10_folds": "pairs.txt",
|
548 |
+
}
|
549 |
+
if subset not in label_filenames:
|
550 |
+
raise ValueError(
|
551 |
+
"subset='%s' is invalid: should be one of %r"
|
552 |
+
% (subset, list(sorted(label_filenames.keys())))
|
553 |
+
)
|
554 |
+
index_file_path = join(lfw_home, label_filenames[subset])
|
555 |
+
|
556 |
+
# load and memoize the pairs as np arrays
|
557 |
+
pairs, target, target_names = load_func(
|
558 |
+
index_file_path, data_folder_path, resize=resize, color=color, slice_=slice_
|
559 |
+
)
|
560 |
+
|
561 |
+
fdescr = load_descr("lfw.rst")
|
562 |
+
|
563 |
+
# pack the results as a Bunch instance
|
564 |
+
return Bunch(
|
565 |
+
data=pairs.reshape(len(pairs), -1),
|
566 |
+
pairs=pairs,
|
567 |
+
target=target,
|
568 |
+
target_names=target_names,
|
569 |
+
DESCR=fdescr,
|
570 |
+
)
|
env-llmeval/lib/python3.10/site-packages/sklearn/datasets/_olivetti_faces.py
ADDED
@@ -0,0 +1,156 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Modified Olivetti faces dataset.
|
2 |
+
|
3 |
+
The original database was available from (now defunct)
|
4 |
+
|
5 |
+
https://www.cl.cam.ac.uk/research/dtg/attarchive/facedatabase.html
|
6 |
+
|
7 |
+
The version retrieved here comes in MATLAB format from the personal
|
8 |
+
web page of Sam Roweis:
|
9 |
+
|
10 |
+
https://cs.nyu.edu/~roweis/
|
11 |
+
"""
|
12 |
+
|
13 |
+
# Copyright (c) 2011 David Warde-Farley <wardefar at iro dot umontreal dot ca>
|
14 |
+
# License: BSD 3 clause
|
15 |
+
|
16 |
+
from os import PathLike, makedirs, remove
|
17 |
+
from os.path import exists
|
18 |
+
|
19 |
+
import joblib
|
20 |
+
import numpy as np
|
21 |
+
from scipy.io import loadmat
|
22 |
+
|
23 |
+
from ..utils import Bunch, check_random_state
|
24 |
+
from ..utils._param_validation import validate_params
|
25 |
+
from . import get_data_home
|
26 |
+
from ._base import RemoteFileMetadata, _fetch_remote, _pkl_filepath, load_descr
|
27 |
+
|
28 |
+
# The original data can be found at:
|
29 |
+
# https://cs.nyu.edu/~roweis/data/olivettifaces.mat
|
30 |
+
FACES = RemoteFileMetadata(
|
31 |
+
filename="olivettifaces.mat",
|
32 |
+
url="https://ndownloader.figshare.com/files/5976027",
|
33 |
+
checksum="b612fb967f2dc77c9c62d3e1266e0c73d5fca46a4b8906c18e454d41af987794",
|
34 |
+
)
|
35 |
+
|
36 |
+
|
37 |
+
@validate_params(
|
38 |
+
{
|
39 |
+
"data_home": [str, PathLike, None],
|
40 |
+
"shuffle": ["boolean"],
|
41 |
+
"random_state": ["random_state"],
|
42 |
+
"download_if_missing": ["boolean"],
|
43 |
+
"return_X_y": ["boolean"],
|
44 |
+
},
|
45 |
+
prefer_skip_nested_validation=True,
|
46 |
+
)
|
47 |
+
def fetch_olivetti_faces(
|
48 |
+
*,
|
49 |
+
data_home=None,
|
50 |
+
shuffle=False,
|
51 |
+
random_state=0,
|
52 |
+
download_if_missing=True,
|
53 |
+
return_X_y=False,
|
54 |
+
):
|
55 |
+
"""Load the Olivetti faces data-set from AT&T (classification).
|
56 |
+
|
57 |
+
Download it if necessary.
|
58 |
+
|
59 |
+
================= =====================
|
60 |
+
Classes 40
|
61 |
+
Samples total 400
|
62 |
+
Dimensionality 4096
|
63 |
+
Features real, between 0 and 1
|
64 |
+
================= =====================
|
65 |
+
|
66 |
+
Read more in the :ref:`User Guide <olivetti_faces_dataset>`.
|
67 |
+
|
68 |
+
Parameters
|
69 |
+
----------
|
70 |
+
data_home : str or path-like, default=None
|
71 |
+
Specify another download and cache folder for the datasets. By default
|
72 |
+
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
|
73 |
+
|
74 |
+
shuffle : bool, default=False
|
75 |
+
If True the order of the dataset is shuffled to avoid having
|
76 |
+
images of the same person grouped.
|
77 |
+
|
78 |
+
random_state : int, RandomState instance or None, default=0
|
79 |
+
Determines random number generation for dataset shuffling. Pass an int
|
80 |
+
for reproducible output across multiple function calls.
|
81 |
+
See :term:`Glossary <random_state>`.
|
82 |
+
|
83 |
+
download_if_missing : bool, default=True
|
84 |
+
If False, raise an OSError if the data is not locally available
|
85 |
+
instead of trying to download the data from the source site.
|
86 |
+
|
87 |
+
return_X_y : bool, default=False
|
88 |
+
If True, returns `(data, target)` instead of a `Bunch` object. See
|
89 |
+
below for more information about the `data` and `target` object.
|
90 |
+
|
91 |
+
.. versionadded:: 0.22
|
92 |
+
|
93 |
+
Returns
|
94 |
+
-------
|
95 |
+
data : :class:`~sklearn.utils.Bunch`
|
96 |
+
Dictionary-like object, with the following attributes.
|
97 |
+
|
98 |
+
data: ndarray, shape (400, 4096)
|
99 |
+
Each row corresponds to a ravelled
|
100 |
+
face image of original size 64 x 64 pixels.
|
101 |
+
images : ndarray, shape (400, 64, 64)
|
102 |
+
Each row is a face image
|
103 |
+
corresponding to one of the 40 subjects of the dataset.
|
104 |
+
target : ndarray, shape (400,)
|
105 |
+
Labels associated to each face image.
|
106 |
+
Those labels are ranging from 0-39 and correspond to the
|
107 |
+
Subject IDs.
|
108 |
+
DESCR : str
|
109 |
+
Description of the modified Olivetti Faces Dataset.
|
110 |
+
|
111 |
+
(data, target) : tuple if `return_X_y=True`
|
112 |
+
Tuple with the `data` and `target` objects described above.
|
113 |
+
|
114 |
+
.. versionadded:: 0.22
|
115 |
+
"""
|
116 |
+
data_home = get_data_home(data_home=data_home)
|
117 |
+
if not exists(data_home):
|
118 |
+
makedirs(data_home)
|
119 |
+
filepath = _pkl_filepath(data_home, "olivetti.pkz")
|
120 |
+
if not exists(filepath):
|
121 |
+
if not download_if_missing:
|
122 |
+
raise OSError("Data not found and `download_if_missing` is False")
|
123 |
+
|
124 |
+
print("downloading Olivetti faces from %s to %s" % (FACES.url, data_home))
|
125 |
+
mat_path = _fetch_remote(FACES, dirname=data_home)
|
126 |
+
mfile = loadmat(file_name=mat_path)
|
127 |
+
# delete raw .mat data
|
128 |
+
remove(mat_path)
|
129 |
+
|
130 |
+
faces = mfile["faces"].T.copy()
|
131 |
+
joblib.dump(faces, filepath, compress=6)
|
132 |
+
del mfile
|
133 |
+
else:
|
134 |
+
faces = joblib.load(filepath)
|
135 |
+
|
136 |
+
# We want floating point data, but float32 is enough (there is only
|
137 |
+
# one byte of precision in the original uint8s anyway)
|
138 |
+
faces = np.float32(faces)
|
139 |
+
faces = faces - faces.min()
|
140 |
+
faces /= faces.max()
|
141 |
+
faces = faces.reshape((400, 64, 64)).transpose(0, 2, 1)
|
142 |
+
# 10 images per class, 400 images total, each class is contiguous.
|
143 |
+
target = np.array([i // 10 for i in range(400)])
|
144 |
+
if shuffle:
|
145 |
+
random_state = check_random_state(random_state)
|
146 |
+
order = random_state.permutation(len(faces))
|
147 |
+
faces = faces[order]
|
148 |
+
target = target[order]
|
149 |
+
faces_vectorized = faces.reshape(len(faces), -1)
|
150 |
+
|
151 |
+
fdescr = load_descr("olivetti_faces.rst")
|
152 |
+
|
153 |
+
if return_X_y:
|
154 |
+
return faces_vectorized, target
|
155 |
+
|
156 |
+
return Bunch(data=faces_vectorized, images=faces, target=target, DESCR=fdescr)
|
env-llmeval/lib/python3.10/site-packages/sklearn/datasets/_openml.py
ADDED
@@ -0,0 +1,1158 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gzip
|
2 |
+
import hashlib
|
3 |
+
import json
|
4 |
+
import os
|
5 |
+
import shutil
|
6 |
+
import time
|
7 |
+
from contextlib import closing
|
8 |
+
from functools import wraps
|
9 |
+
from os.path import join
|
10 |
+
from tempfile import TemporaryDirectory
|
11 |
+
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
|
12 |
+
from urllib.error import HTTPError, URLError
|
13 |
+
from urllib.request import Request, urlopen
|
14 |
+
from warnings import warn
|
15 |
+
|
16 |
+
import numpy as np
|
17 |
+
|
18 |
+
from ..utils import (
|
19 |
+
Bunch,
|
20 |
+
check_pandas_support, # noqa # noqa
|
21 |
+
)
|
22 |
+
from ..utils._param_validation import (
|
23 |
+
Integral,
|
24 |
+
Interval,
|
25 |
+
Real,
|
26 |
+
StrOptions,
|
27 |
+
validate_params,
|
28 |
+
)
|
29 |
+
from . import get_data_home
|
30 |
+
from ._arff_parser import load_arff_from_gzip_file
|
31 |
+
|
32 |
+
__all__ = ["fetch_openml"]
|
33 |
+
|
34 |
+
_OPENML_PREFIX = "https://api.openml.org/"
|
35 |
+
_SEARCH_NAME = "api/v1/json/data/list/data_name/{}/limit/2"
|
36 |
+
_DATA_INFO = "api/v1/json/data/{}"
|
37 |
+
_DATA_FEATURES = "api/v1/json/data/features/{}"
|
38 |
+
_DATA_QUALITIES = "api/v1/json/data/qualities/{}"
|
39 |
+
_DATA_FILE = "data/v1/download/{}"
|
40 |
+
|
41 |
+
OpenmlQualitiesType = List[Dict[str, str]]
|
42 |
+
OpenmlFeaturesType = List[Dict[str, str]]
|
43 |
+
|
44 |
+
|
45 |
+
def _get_local_path(openml_path: str, data_home: str) -> str:
|
46 |
+
return os.path.join(data_home, "openml.org", openml_path + ".gz")
|
47 |
+
|
48 |
+
|
49 |
+
def _retry_with_clean_cache(
|
50 |
+
openml_path: str,
|
51 |
+
data_home: Optional[str],
|
52 |
+
no_retry_exception: Optional[Exception] = None,
|
53 |
+
) -> Callable:
|
54 |
+
"""If the first call to the decorated function fails, the local cached
|
55 |
+
file is removed, and the function is called again. If ``data_home`` is
|
56 |
+
``None``, then the function is called once. We can provide a specific
|
57 |
+
exception to not retry on using `no_retry_exception` parameter.
|
58 |
+
"""
|
59 |
+
|
60 |
+
def decorator(f):
|
61 |
+
@wraps(f)
|
62 |
+
def wrapper(*args, **kw):
|
63 |
+
if data_home is None:
|
64 |
+
return f(*args, **kw)
|
65 |
+
try:
|
66 |
+
return f(*args, **kw)
|
67 |
+
except URLError:
|
68 |
+
raise
|
69 |
+
except Exception as exc:
|
70 |
+
if no_retry_exception is not None and isinstance(
|
71 |
+
exc, no_retry_exception
|
72 |
+
):
|
73 |
+
raise
|
74 |
+
warn("Invalid cache, redownloading file", RuntimeWarning)
|
75 |
+
local_path = _get_local_path(openml_path, data_home)
|
76 |
+
if os.path.exists(local_path):
|
77 |
+
os.unlink(local_path)
|
78 |
+
return f(*args, **kw)
|
79 |
+
|
80 |
+
return wrapper
|
81 |
+
|
82 |
+
return decorator
|
83 |
+
|
84 |
+
|
85 |
+
def _retry_on_network_error(
|
86 |
+
n_retries: int = 3, delay: float = 1.0, url: str = ""
|
87 |
+
) -> Callable:
|
88 |
+
"""If the function call results in a network error, call the function again
|
89 |
+
up to ``n_retries`` times with a ``delay`` between each call. If the error
|
90 |
+
has a 412 status code, don't call the function again as this is a specific
|
91 |
+
OpenML error.
|
92 |
+
The url parameter is used to give more information to the user about the
|
93 |
+
error.
|
94 |
+
"""
|
95 |
+
|
96 |
+
def decorator(f):
|
97 |
+
@wraps(f)
|
98 |
+
def wrapper(*args, **kwargs):
|
99 |
+
retry_counter = n_retries
|
100 |
+
while True:
|
101 |
+
try:
|
102 |
+
return f(*args, **kwargs)
|
103 |
+
except (URLError, TimeoutError) as e:
|
104 |
+
# 412 is a specific OpenML error code.
|
105 |
+
if isinstance(e, HTTPError) and e.code == 412:
|
106 |
+
raise
|
107 |
+
if retry_counter == 0:
|
108 |
+
raise
|
109 |
+
warn(
|
110 |
+
f"A network error occurred while downloading {url}. Retrying..."
|
111 |
+
)
|
112 |
+
retry_counter -= 1
|
113 |
+
time.sleep(delay)
|
114 |
+
|
115 |
+
return wrapper
|
116 |
+
|
117 |
+
return decorator
|
118 |
+
|
119 |
+
|
120 |
+
def _open_openml_url(
|
121 |
+
openml_path: str, data_home: Optional[str], n_retries: int = 3, delay: float = 1.0
|
122 |
+
):
|
123 |
+
"""
|
124 |
+
Returns a resource from OpenML.org. Caches it to data_home if required.
|
125 |
+
|
126 |
+
Parameters
|
127 |
+
----------
|
128 |
+
openml_path : str
|
129 |
+
OpenML URL that will be accessed. This will be prefixes with
|
130 |
+
_OPENML_PREFIX.
|
131 |
+
|
132 |
+
data_home : str
|
133 |
+
Directory to which the files will be cached. If None, no caching will
|
134 |
+
be applied.
|
135 |
+
|
136 |
+
n_retries : int, default=3
|
137 |
+
Number of retries when HTTP errors are encountered. Error with status
|
138 |
+
code 412 won't be retried as they represent OpenML generic errors.
|
139 |
+
|
140 |
+
delay : float, default=1.0
|
141 |
+
Number of seconds between retries.
|
142 |
+
|
143 |
+
Returns
|
144 |
+
-------
|
145 |
+
result : stream
|
146 |
+
A stream to the OpenML resource.
|
147 |
+
"""
|
148 |
+
|
149 |
+
def is_gzip_encoded(_fsrc):
|
150 |
+
return _fsrc.info().get("Content-Encoding", "") == "gzip"
|
151 |
+
|
152 |
+
req = Request(_OPENML_PREFIX + openml_path)
|
153 |
+
req.add_header("Accept-encoding", "gzip")
|
154 |
+
|
155 |
+
if data_home is None:
|
156 |
+
fsrc = _retry_on_network_error(n_retries, delay, req.full_url)(urlopen)(req)
|
157 |
+
if is_gzip_encoded(fsrc):
|
158 |
+
return gzip.GzipFile(fileobj=fsrc, mode="rb")
|
159 |
+
return fsrc
|
160 |
+
|
161 |
+
local_path = _get_local_path(openml_path, data_home)
|
162 |
+
dir_name, file_name = os.path.split(local_path)
|
163 |
+
if not os.path.exists(local_path):
|
164 |
+
os.makedirs(dir_name, exist_ok=True)
|
165 |
+
try:
|
166 |
+
# Create a tmpdir as a subfolder of dir_name where the final file will
|
167 |
+
# be moved to if the download is successful. This guarantees that the
|
168 |
+
# renaming operation to the final location is atomic to ensure the
|
169 |
+
# concurrence safety of the dataset caching mechanism.
|
170 |
+
with TemporaryDirectory(dir=dir_name) as tmpdir:
|
171 |
+
with closing(
|
172 |
+
_retry_on_network_error(n_retries, delay, req.full_url)(urlopen)(
|
173 |
+
req
|
174 |
+
)
|
175 |
+
) as fsrc:
|
176 |
+
opener: Callable
|
177 |
+
if is_gzip_encoded(fsrc):
|
178 |
+
opener = open
|
179 |
+
else:
|
180 |
+
opener = gzip.GzipFile
|
181 |
+
with opener(os.path.join(tmpdir, file_name), "wb") as fdst:
|
182 |
+
shutil.copyfileobj(fsrc, fdst)
|
183 |
+
shutil.move(fdst.name, local_path)
|
184 |
+
except Exception:
|
185 |
+
if os.path.exists(local_path):
|
186 |
+
os.unlink(local_path)
|
187 |
+
raise
|
188 |
+
|
189 |
+
# XXX: First time, decompression will not be necessary (by using fsrc), but
|
190 |
+
# it will happen nonetheless
|
191 |
+
return gzip.GzipFile(local_path, "rb")
|
192 |
+
|
193 |
+
|
194 |
+
class OpenMLError(ValueError):
|
195 |
+
"""HTTP 412 is a specific OpenML error code, indicating a generic error"""
|
196 |
+
|
197 |
+
pass
|
198 |
+
|
199 |
+
|
200 |
+
def _get_json_content_from_openml_api(
|
201 |
+
url: str,
|
202 |
+
error_message: Optional[str],
|
203 |
+
data_home: Optional[str],
|
204 |
+
n_retries: int = 3,
|
205 |
+
delay: float = 1.0,
|
206 |
+
) -> Dict:
|
207 |
+
"""
|
208 |
+
Loads json data from the openml api.
|
209 |
+
|
210 |
+
Parameters
|
211 |
+
----------
|
212 |
+
url : str
|
213 |
+
The URL to load from. Should be an official OpenML endpoint.
|
214 |
+
|
215 |
+
error_message : str or None
|
216 |
+
The error message to raise if an acceptable OpenML error is thrown
|
217 |
+
(acceptable error is, e.g., data id not found. Other errors, like 404's
|
218 |
+
will throw the native error message).
|
219 |
+
|
220 |
+
data_home : str or None
|
221 |
+
Location to cache the response. None if no cache is required.
|
222 |
+
|
223 |
+
n_retries : int, default=3
|
224 |
+
Number of retries when HTTP errors are encountered. Error with status
|
225 |
+
code 412 won't be retried as they represent OpenML generic errors.
|
226 |
+
|
227 |
+
delay : float, default=1.0
|
228 |
+
Number of seconds between retries.
|
229 |
+
|
230 |
+
Returns
|
231 |
+
-------
|
232 |
+
json_data : json
|
233 |
+
the json result from the OpenML server if the call was successful.
|
234 |
+
An exception otherwise.
|
235 |
+
"""
|
236 |
+
|
237 |
+
@_retry_with_clean_cache(url, data_home=data_home)
|
238 |
+
def _load_json():
|
239 |
+
with closing(
|
240 |
+
_open_openml_url(url, data_home, n_retries=n_retries, delay=delay)
|
241 |
+
) as response:
|
242 |
+
return json.loads(response.read().decode("utf-8"))
|
243 |
+
|
244 |
+
try:
|
245 |
+
return _load_json()
|
246 |
+
except HTTPError as error:
|
247 |
+
# 412 is an OpenML specific error code, indicating a generic error
|
248 |
+
# (e.g., data not found)
|
249 |
+
if error.code != 412:
|
250 |
+
raise error
|
251 |
+
|
252 |
+
# 412 error, not in except for nicer traceback
|
253 |
+
raise OpenMLError(error_message)
|
254 |
+
|
255 |
+
|
256 |
+
def _get_data_info_by_name(
|
257 |
+
name: str,
|
258 |
+
version: Union[int, str],
|
259 |
+
data_home: Optional[str],
|
260 |
+
n_retries: int = 3,
|
261 |
+
delay: float = 1.0,
|
262 |
+
):
|
263 |
+
"""
|
264 |
+
Utilizes the openml dataset listing api to find a dataset by
|
265 |
+
name/version
|
266 |
+
OpenML api function:
|
267 |
+
https://www.openml.org/api_docs#!/data/get_data_list_data_name_data_name
|
268 |
+
|
269 |
+
Parameters
|
270 |
+
----------
|
271 |
+
name : str
|
272 |
+
name of the dataset
|
273 |
+
|
274 |
+
version : int or str
|
275 |
+
If version is an integer, the exact name/version will be obtained from
|
276 |
+
OpenML. If version is a string (value: "active") it will take the first
|
277 |
+
version from OpenML that is annotated as active. Any other string
|
278 |
+
values except "active" are treated as integer.
|
279 |
+
|
280 |
+
data_home : str or None
|
281 |
+
Location to cache the response. None if no cache is required.
|
282 |
+
|
283 |
+
n_retries : int, default=3
|
284 |
+
Number of retries when HTTP errors are encountered. Error with status
|
285 |
+
code 412 won't be retried as they represent OpenML generic errors.
|
286 |
+
|
287 |
+
delay : float, default=1.0
|
288 |
+
Number of seconds between retries.
|
289 |
+
|
290 |
+
Returns
|
291 |
+
-------
|
292 |
+
first_dataset : json
|
293 |
+
json representation of the first dataset object that adhired to the
|
294 |
+
search criteria
|
295 |
+
|
296 |
+
"""
|
297 |
+
if version == "active":
|
298 |
+
# situation in which we return the oldest active version
|
299 |
+
url = _SEARCH_NAME.format(name) + "/status/active/"
|
300 |
+
error_msg = "No active dataset {} found.".format(name)
|
301 |
+
json_data = _get_json_content_from_openml_api(
|
302 |
+
url,
|
303 |
+
error_msg,
|
304 |
+
data_home=data_home,
|
305 |
+
n_retries=n_retries,
|
306 |
+
delay=delay,
|
307 |
+
)
|
308 |
+
res = json_data["data"]["dataset"]
|
309 |
+
if len(res) > 1:
|
310 |
+
first_version = version = res[0]["version"]
|
311 |
+
warning_msg = (
|
312 |
+
"Multiple active versions of the dataset matching the name"
|
313 |
+
f" {name} exist. Versions may be fundamentally different, "
|
314 |
+
f"returning version {first_version}. "
|
315 |
+
"Available versions:\n"
|
316 |
+
)
|
317 |
+
for r in res:
|
318 |
+
warning_msg += f"- version {r['version']}, status: {r['status']}\n"
|
319 |
+
warning_msg += (
|
320 |
+
f" url: https://www.openml.org/search?type=data&id={r['did']}\n"
|
321 |
+
)
|
322 |
+
warn(warning_msg)
|
323 |
+
return res[0]
|
324 |
+
|
325 |
+
# an integer version has been provided
|
326 |
+
url = (_SEARCH_NAME + "/data_version/{}").format(name, version)
|
327 |
+
try:
|
328 |
+
json_data = _get_json_content_from_openml_api(
|
329 |
+
url,
|
330 |
+
error_message=None,
|
331 |
+
data_home=data_home,
|
332 |
+
n_retries=n_retries,
|
333 |
+
delay=delay,
|
334 |
+
)
|
335 |
+
except OpenMLError:
|
336 |
+
# we can do this in 1 function call if OpenML does not require the
|
337 |
+
# specification of the dataset status (i.e., return datasets with a
|
338 |
+
# given name / version regardless of active, deactivated, etc. )
|
339 |
+
# TODO: feature request OpenML.
|
340 |
+
url += "/status/deactivated"
|
341 |
+
error_msg = "Dataset {} with version {} not found.".format(name, version)
|
342 |
+
json_data = _get_json_content_from_openml_api(
|
343 |
+
url,
|
344 |
+
error_msg,
|
345 |
+
data_home=data_home,
|
346 |
+
n_retries=n_retries,
|
347 |
+
delay=delay,
|
348 |
+
)
|
349 |
+
|
350 |
+
return json_data["data"]["dataset"][0]
|
351 |
+
|
352 |
+
|
353 |
+
def _get_data_description_by_id(
|
354 |
+
data_id: int,
|
355 |
+
data_home: Optional[str],
|
356 |
+
n_retries: int = 3,
|
357 |
+
delay: float = 1.0,
|
358 |
+
) -> Dict[str, Any]:
|
359 |
+
# OpenML API function: https://www.openml.org/api_docs#!/data/get_data_id
|
360 |
+
url = _DATA_INFO.format(data_id)
|
361 |
+
error_message = "Dataset with data_id {} not found.".format(data_id)
|
362 |
+
json_data = _get_json_content_from_openml_api(
|
363 |
+
url,
|
364 |
+
error_message,
|
365 |
+
data_home=data_home,
|
366 |
+
n_retries=n_retries,
|
367 |
+
delay=delay,
|
368 |
+
)
|
369 |
+
return json_data["data_set_description"]
|
370 |
+
|
371 |
+
|
372 |
+
def _get_data_features(
|
373 |
+
data_id: int,
|
374 |
+
data_home: Optional[str],
|
375 |
+
n_retries: int = 3,
|
376 |
+
delay: float = 1.0,
|
377 |
+
) -> OpenmlFeaturesType:
|
378 |
+
# OpenML function:
|
379 |
+
# https://www.openml.org/api_docs#!/data/get_data_features_id
|
380 |
+
url = _DATA_FEATURES.format(data_id)
|
381 |
+
error_message = "Dataset with data_id {} not found.".format(data_id)
|
382 |
+
json_data = _get_json_content_from_openml_api(
|
383 |
+
url,
|
384 |
+
error_message,
|
385 |
+
data_home=data_home,
|
386 |
+
n_retries=n_retries,
|
387 |
+
delay=delay,
|
388 |
+
)
|
389 |
+
return json_data["data_features"]["feature"]
|
390 |
+
|
391 |
+
|
392 |
+
def _get_data_qualities(
|
393 |
+
data_id: int,
|
394 |
+
data_home: Optional[str],
|
395 |
+
n_retries: int = 3,
|
396 |
+
delay: float = 1.0,
|
397 |
+
) -> OpenmlQualitiesType:
|
398 |
+
# OpenML API function:
|
399 |
+
# https://www.openml.org/api_docs#!/data/get_data_qualities_id
|
400 |
+
url = _DATA_QUALITIES.format(data_id)
|
401 |
+
error_message = "Dataset with data_id {} not found.".format(data_id)
|
402 |
+
json_data = _get_json_content_from_openml_api(
|
403 |
+
url,
|
404 |
+
error_message,
|
405 |
+
data_home=data_home,
|
406 |
+
n_retries=n_retries,
|
407 |
+
delay=delay,
|
408 |
+
)
|
409 |
+
# the qualities might not be available, but we still try to process
|
410 |
+
# the data
|
411 |
+
return json_data.get("data_qualities", {}).get("quality", [])
|
412 |
+
|
413 |
+
|
414 |
+
def _get_num_samples(data_qualities: OpenmlQualitiesType) -> int:
|
415 |
+
"""Get the number of samples from data qualities.
|
416 |
+
|
417 |
+
Parameters
|
418 |
+
----------
|
419 |
+
data_qualities : list of dict
|
420 |
+
Used to retrieve the number of instances (samples) in the dataset.
|
421 |
+
|
422 |
+
Returns
|
423 |
+
-------
|
424 |
+
n_samples : int
|
425 |
+
The number of samples in the dataset or -1 if data qualities are
|
426 |
+
unavailable.
|
427 |
+
"""
|
428 |
+
# If the data qualities are unavailable, we return -1
|
429 |
+
default_n_samples = -1
|
430 |
+
|
431 |
+
qualities = {d["name"]: d["value"] for d in data_qualities}
|
432 |
+
return int(float(qualities.get("NumberOfInstances", default_n_samples)))
|
433 |
+
|
434 |
+
|
435 |
+
def _load_arff_response(
|
436 |
+
url: str,
|
437 |
+
data_home: Optional[str],
|
438 |
+
parser: str,
|
439 |
+
output_type: str,
|
440 |
+
openml_columns_info: dict,
|
441 |
+
feature_names_to_select: List[str],
|
442 |
+
target_names_to_select: List[str],
|
443 |
+
shape: Optional[Tuple[int, int]],
|
444 |
+
md5_checksum: str,
|
445 |
+
n_retries: int = 3,
|
446 |
+
delay: float = 1.0,
|
447 |
+
read_csv_kwargs: Optional[Dict] = None,
|
448 |
+
):
|
449 |
+
"""Load the ARFF data associated with the OpenML URL.
|
450 |
+
|
451 |
+
In addition of loading the data, this function will also check the
|
452 |
+
integrity of the downloaded file from OpenML using MD5 checksum.
|
453 |
+
|
454 |
+
Parameters
|
455 |
+
----------
|
456 |
+
url : str
|
457 |
+
The URL of the ARFF file on OpenML.
|
458 |
+
|
459 |
+
data_home : str
|
460 |
+
The location where to cache the data.
|
461 |
+
|
462 |
+
parser : {"liac-arff", "pandas"}
|
463 |
+
The parser used to parse the ARFF file.
|
464 |
+
|
465 |
+
output_type : {"numpy", "pandas", "sparse"}
|
466 |
+
The type of the arrays that will be returned. The possibilities are:
|
467 |
+
|
468 |
+
- `"numpy"`: both `X` and `y` will be NumPy arrays;
|
469 |
+
- `"sparse"`: `X` will be sparse matrix and `y` will be a NumPy array;
|
470 |
+
- `"pandas"`: `X` will be a pandas DataFrame and `y` will be either a
|
471 |
+
pandas Series or DataFrame.
|
472 |
+
|
473 |
+
openml_columns_info : dict
|
474 |
+
The information provided by OpenML regarding the columns of the ARFF
|
475 |
+
file.
|
476 |
+
|
477 |
+
feature_names_to_select : list of str
|
478 |
+
The list of the features to be selected.
|
479 |
+
|
480 |
+
target_names_to_select : list of str
|
481 |
+
The list of the target variables to be selected.
|
482 |
+
|
483 |
+
shape : tuple or None
|
484 |
+
With `parser="liac-arff"`, when using a generator to load the data,
|
485 |
+
one needs to provide the shape of the data beforehand.
|
486 |
+
|
487 |
+
md5_checksum : str
|
488 |
+
The MD5 checksum provided by OpenML to check the data integrity.
|
489 |
+
|
490 |
+
n_retries : int, default=3
|
491 |
+
The number of times to retry downloading the data if it fails.
|
492 |
+
|
493 |
+
delay : float, default=1.0
|
494 |
+
The delay between two consecutive downloads in seconds.
|
495 |
+
|
496 |
+
read_csv_kwargs : dict, default=None
|
497 |
+
Keyword arguments to pass to `pandas.read_csv` when using the pandas parser.
|
498 |
+
It allows to overwrite the default options.
|
499 |
+
|
500 |
+
.. versionadded:: 1.3
|
501 |
+
|
502 |
+
Returns
|
503 |
+
-------
|
504 |
+
X : {ndarray, sparse matrix, dataframe}
|
505 |
+
The data matrix.
|
506 |
+
|
507 |
+
y : {ndarray, dataframe, series}
|
508 |
+
The target.
|
509 |
+
|
510 |
+
frame : dataframe or None
|
511 |
+
A dataframe containing both `X` and `y`. `None` if
|
512 |
+
`output_array_type != "pandas"`.
|
513 |
+
|
514 |
+
categories : list of str or None
|
515 |
+
The names of the features that are categorical. `None` if
|
516 |
+
`output_array_type == "pandas"`.
|
517 |
+
"""
|
518 |
+
gzip_file = _open_openml_url(url, data_home, n_retries=n_retries, delay=delay)
|
519 |
+
with closing(gzip_file):
|
520 |
+
md5 = hashlib.md5()
|
521 |
+
for chunk in iter(lambda: gzip_file.read(4096), b""):
|
522 |
+
md5.update(chunk)
|
523 |
+
actual_md5_checksum = md5.hexdigest()
|
524 |
+
|
525 |
+
if actual_md5_checksum != md5_checksum:
|
526 |
+
raise ValueError(
|
527 |
+
f"md5 checksum of local file for {url} does not match description: "
|
528 |
+
f"expected: {md5_checksum} but got {actual_md5_checksum}. "
|
529 |
+
"Downloaded file could have been modified / corrupted, clean cache "
|
530 |
+
"and retry..."
|
531 |
+
)
|
532 |
+
|
533 |
+
def _open_url_and_load_gzip_file(url, data_home, n_retries, delay, arff_params):
|
534 |
+
gzip_file = _open_openml_url(url, data_home, n_retries=n_retries, delay=delay)
|
535 |
+
with closing(gzip_file):
|
536 |
+
return load_arff_from_gzip_file(gzip_file, **arff_params)
|
537 |
+
|
538 |
+
arff_params: Dict = dict(
|
539 |
+
parser=parser,
|
540 |
+
output_type=output_type,
|
541 |
+
openml_columns_info=openml_columns_info,
|
542 |
+
feature_names_to_select=feature_names_to_select,
|
543 |
+
target_names_to_select=target_names_to_select,
|
544 |
+
shape=shape,
|
545 |
+
read_csv_kwargs=read_csv_kwargs or {},
|
546 |
+
)
|
547 |
+
try:
|
548 |
+
X, y, frame, categories = _open_url_and_load_gzip_file(
|
549 |
+
url, data_home, n_retries, delay, arff_params
|
550 |
+
)
|
551 |
+
except Exception as exc:
|
552 |
+
if parser != "pandas":
|
553 |
+
raise
|
554 |
+
|
555 |
+
from pandas.errors import ParserError
|
556 |
+
|
557 |
+
if not isinstance(exc, ParserError):
|
558 |
+
raise
|
559 |
+
|
560 |
+
# A parsing error could come from providing the wrong quotechar
|
561 |
+
# to pandas. By default, we use a double quote. Thus, we retry
|
562 |
+
# with a single quote before to raise the error.
|
563 |
+
arff_params["read_csv_kwargs"].update(quotechar="'")
|
564 |
+
X, y, frame, categories = _open_url_and_load_gzip_file(
|
565 |
+
url, data_home, n_retries, delay, arff_params
|
566 |
+
)
|
567 |
+
|
568 |
+
return X, y, frame, categories
|
569 |
+
|
570 |
+
|
571 |
+
def _download_data_to_bunch(
|
572 |
+
url: str,
|
573 |
+
sparse: bool,
|
574 |
+
data_home: Optional[str],
|
575 |
+
*,
|
576 |
+
as_frame: bool,
|
577 |
+
openml_columns_info: List[dict],
|
578 |
+
data_columns: List[str],
|
579 |
+
target_columns: List[str],
|
580 |
+
shape: Optional[Tuple[int, int]],
|
581 |
+
md5_checksum: str,
|
582 |
+
n_retries: int = 3,
|
583 |
+
delay: float = 1.0,
|
584 |
+
parser: str,
|
585 |
+
read_csv_kwargs: Optional[Dict] = None,
|
586 |
+
):
|
587 |
+
"""Download ARFF data, load it to a specific container and create to Bunch.
|
588 |
+
|
589 |
+
This function has a mechanism to retry/cache/clean the data.
|
590 |
+
|
591 |
+
Parameters
|
592 |
+
----------
|
593 |
+
url : str
|
594 |
+
The URL of the ARFF file on OpenML.
|
595 |
+
|
596 |
+
sparse : bool
|
597 |
+
Whether the dataset is expected to use the sparse ARFF format.
|
598 |
+
|
599 |
+
data_home : str
|
600 |
+
The location where to cache the data.
|
601 |
+
|
602 |
+
as_frame : bool
|
603 |
+
Whether or not to return the data into a pandas DataFrame.
|
604 |
+
|
605 |
+
openml_columns_info : list of dict
|
606 |
+
The information regarding the columns provided by OpenML for the
|
607 |
+
ARFF dataset. The information is stored as a list of dictionaries.
|
608 |
+
|
609 |
+
data_columns : list of str
|
610 |
+
The list of the features to be selected.
|
611 |
+
|
612 |
+
target_columns : list of str
|
613 |
+
The list of the target variables to be selected.
|
614 |
+
|
615 |
+
shape : tuple or None
|
616 |
+
With `parser="liac-arff"`, when using a generator to load the data,
|
617 |
+
one needs to provide the shape of the data beforehand.
|
618 |
+
|
619 |
+
md5_checksum : str
|
620 |
+
The MD5 checksum provided by OpenML to check the data integrity.
|
621 |
+
|
622 |
+
n_retries : int, default=3
|
623 |
+
Number of retries when HTTP errors are encountered. Error with status
|
624 |
+
code 412 won't be retried as they represent OpenML generic errors.
|
625 |
+
|
626 |
+
delay : float, default=1.0
|
627 |
+
Number of seconds between retries.
|
628 |
+
|
629 |
+
parser : {"liac-arff", "pandas"}
|
630 |
+
The parser used to parse the ARFF file.
|
631 |
+
|
632 |
+
read_csv_kwargs : dict, default=None
|
633 |
+
Keyword arguments to pass to `pandas.read_csv` when using the pandas parser.
|
634 |
+
It allows to overwrite the default options.
|
635 |
+
|
636 |
+
.. versionadded:: 1.3
|
637 |
+
|
638 |
+
Returns
|
639 |
+
-------
|
640 |
+
data : :class:`~sklearn.utils.Bunch`
|
641 |
+
Dictionary-like object, with the following attributes.
|
642 |
+
|
643 |
+
X : {ndarray, sparse matrix, dataframe}
|
644 |
+
The data matrix.
|
645 |
+
y : {ndarray, dataframe, series}
|
646 |
+
The target.
|
647 |
+
frame : dataframe or None
|
648 |
+
A dataframe containing both `X` and `y`. `None` if
|
649 |
+
`output_array_type != "pandas"`.
|
650 |
+
categories : list of str or None
|
651 |
+
The names of the features that are categorical. `None` if
|
652 |
+
`output_array_type == "pandas"`.
|
653 |
+
"""
|
654 |
+
# Prepare which columns and data types should be returned for the X and y
|
655 |
+
features_dict = {feature["name"]: feature for feature in openml_columns_info}
|
656 |
+
|
657 |
+
if sparse:
|
658 |
+
output_type = "sparse"
|
659 |
+
elif as_frame:
|
660 |
+
output_type = "pandas"
|
661 |
+
else:
|
662 |
+
output_type = "numpy"
|
663 |
+
|
664 |
+
# XXX: target columns should all be categorical or all numeric
|
665 |
+
_verify_target_data_type(features_dict, target_columns)
|
666 |
+
for name in target_columns:
|
667 |
+
column_info = features_dict[name]
|
668 |
+
n_missing_values = int(column_info["number_of_missing_values"])
|
669 |
+
if n_missing_values > 0:
|
670 |
+
raise ValueError(
|
671 |
+
f"Target column '{column_info['name']}' has {n_missing_values} missing "
|
672 |
+
"values. Missing values are not supported for target columns."
|
673 |
+
)
|
674 |
+
|
675 |
+
no_retry_exception = None
|
676 |
+
if parser == "pandas":
|
677 |
+
# If we get a ParserError with pandas, then we don't want to retry and we raise
|
678 |
+
# early.
|
679 |
+
from pandas.errors import ParserError
|
680 |
+
|
681 |
+
no_retry_exception = ParserError
|
682 |
+
|
683 |
+
X, y, frame, categories = _retry_with_clean_cache(
|
684 |
+
url, data_home, no_retry_exception
|
685 |
+
)(_load_arff_response)(
|
686 |
+
url,
|
687 |
+
data_home,
|
688 |
+
parser=parser,
|
689 |
+
output_type=output_type,
|
690 |
+
openml_columns_info=features_dict,
|
691 |
+
feature_names_to_select=data_columns,
|
692 |
+
target_names_to_select=target_columns,
|
693 |
+
shape=shape,
|
694 |
+
md5_checksum=md5_checksum,
|
695 |
+
n_retries=n_retries,
|
696 |
+
delay=delay,
|
697 |
+
read_csv_kwargs=read_csv_kwargs,
|
698 |
+
)
|
699 |
+
|
700 |
+
return Bunch(
|
701 |
+
data=X,
|
702 |
+
target=y,
|
703 |
+
frame=frame,
|
704 |
+
categories=categories,
|
705 |
+
feature_names=data_columns,
|
706 |
+
target_names=target_columns,
|
707 |
+
)
|
708 |
+
|
709 |
+
|
710 |
+
def _verify_target_data_type(features_dict, target_columns):
|
711 |
+
# verifies the data type of the y array in case there are multiple targets
|
712 |
+
# (throws an error if these targets do not comply with sklearn support)
|
713 |
+
if not isinstance(target_columns, list):
|
714 |
+
raise ValueError("target_column should be list, got: %s" % type(target_columns))
|
715 |
+
found_types = set()
|
716 |
+
for target_column in target_columns:
|
717 |
+
if target_column not in features_dict:
|
718 |
+
raise KeyError(f"Could not find target_column='{target_column}'")
|
719 |
+
if features_dict[target_column]["data_type"] == "numeric":
|
720 |
+
found_types.add(np.float64)
|
721 |
+
else:
|
722 |
+
found_types.add(object)
|
723 |
+
|
724 |
+
# note: we compare to a string, not boolean
|
725 |
+
if features_dict[target_column]["is_ignore"] == "true":
|
726 |
+
warn(f"target_column='{target_column}' has flag is_ignore.")
|
727 |
+
if features_dict[target_column]["is_row_identifier"] == "true":
|
728 |
+
warn(f"target_column='{target_column}' has flag is_row_identifier.")
|
729 |
+
if len(found_types) > 1:
|
730 |
+
raise ValueError(
|
731 |
+
"Can only handle homogeneous multi-target datasets, "
|
732 |
+
"i.e., all targets are either numeric or "
|
733 |
+
"categorical."
|
734 |
+
)
|
735 |
+
|
736 |
+
|
737 |
+
def _valid_data_column_names(features_list, target_columns):
|
738 |
+
# logic for determining on which columns can be learned. Note that from the
|
739 |
+
# OpenML guide follows that columns that have the `is_row_identifier` or
|
740 |
+
# `is_ignore` flag, these can not be learned on. Also target columns are
|
741 |
+
# excluded.
|
742 |
+
valid_data_column_names = []
|
743 |
+
for feature in features_list:
|
744 |
+
if (
|
745 |
+
feature["name"] not in target_columns
|
746 |
+
and feature["is_ignore"] != "true"
|
747 |
+
and feature["is_row_identifier"] != "true"
|
748 |
+
):
|
749 |
+
valid_data_column_names.append(feature["name"])
|
750 |
+
return valid_data_column_names
|
751 |
+
|
752 |
+
|
753 |
+
@validate_params(
|
754 |
+
{
|
755 |
+
"name": [str, None],
|
756 |
+
"version": [Interval(Integral, 1, None, closed="left"), StrOptions({"active"})],
|
757 |
+
"data_id": [Interval(Integral, 1, None, closed="left"), None],
|
758 |
+
"data_home": [str, os.PathLike, None],
|
759 |
+
"target_column": [str, list, None],
|
760 |
+
"cache": [bool],
|
761 |
+
"return_X_y": [bool],
|
762 |
+
"as_frame": [bool, StrOptions({"auto"})],
|
763 |
+
"n_retries": [Interval(Integral, 1, None, closed="left")],
|
764 |
+
"delay": [Interval(Real, 0, None, closed="right")],
|
765 |
+
"parser": [
|
766 |
+
StrOptions({"auto", "pandas", "liac-arff"}),
|
767 |
+
],
|
768 |
+
"read_csv_kwargs": [dict, None],
|
769 |
+
},
|
770 |
+
prefer_skip_nested_validation=True,
|
771 |
+
)
|
772 |
+
def fetch_openml(
|
773 |
+
name: Optional[str] = None,
|
774 |
+
*,
|
775 |
+
version: Union[str, int] = "active",
|
776 |
+
data_id: Optional[int] = None,
|
777 |
+
data_home: Optional[Union[str, os.PathLike]] = None,
|
778 |
+
target_column: Optional[Union[str, List]] = "default-target",
|
779 |
+
cache: bool = True,
|
780 |
+
return_X_y: bool = False,
|
781 |
+
as_frame: Union[str, bool] = "auto",
|
782 |
+
n_retries: int = 3,
|
783 |
+
delay: float = 1.0,
|
784 |
+
parser: str = "auto",
|
785 |
+
read_csv_kwargs: Optional[Dict] = None,
|
786 |
+
):
|
787 |
+
"""Fetch dataset from openml by name or dataset id.
|
788 |
+
|
789 |
+
Datasets are uniquely identified by either an integer ID or by a
|
790 |
+
combination of name and version (i.e. there might be multiple
|
791 |
+
versions of the 'iris' dataset). Please give either name or data_id
|
792 |
+
(not both). In case a name is given, a version can also be
|
793 |
+
provided.
|
794 |
+
|
795 |
+
Read more in the :ref:`User Guide <openml>`.
|
796 |
+
|
797 |
+
.. versionadded:: 0.20
|
798 |
+
|
799 |
+
.. note:: EXPERIMENTAL
|
800 |
+
|
801 |
+
The API is experimental (particularly the return value structure),
|
802 |
+
and might have small backward-incompatible changes without notice
|
803 |
+
or warning in future releases.
|
804 |
+
|
805 |
+
Parameters
|
806 |
+
----------
|
807 |
+
name : str, default=None
|
808 |
+
String identifier of the dataset. Note that OpenML can have multiple
|
809 |
+
datasets with the same name.
|
810 |
+
|
811 |
+
version : int or 'active', default='active'
|
812 |
+
Version of the dataset. Can only be provided if also ``name`` is given.
|
813 |
+
If 'active' the oldest version that's still active is used. Since
|
814 |
+
there may be more than one active version of a dataset, and those
|
815 |
+
versions may fundamentally be different from one another, setting an
|
816 |
+
exact version is highly recommended.
|
817 |
+
|
818 |
+
data_id : int, default=None
|
819 |
+
OpenML ID of the dataset. The most specific way of retrieving a
|
820 |
+
dataset. If data_id is not given, name (and potential version) are
|
821 |
+
used to obtain a dataset.
|
822 |
+
|
823 |
+
data_home : str or path-like, default=None
|
824 |
+
Specify another download and cache folder for the data sets. By default
|
825 |
+
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
|
826 |
+
|
827 |
+
target_column : str, list or None, default='default-target'
|
828 |
+
Specify the column name in the data to use as target. If
|
829 |
+
'default-target', the standard target column a stored on the server
|
830 |
+
is used. If ``None``, all columns are returned as data and the
|
831 |
+
target is ``None``. If list (of strings), all columns with these names
|
832 |
+
are returned as multi-target (Note: not all scikit-learn classifiers
|
833 |
+
can handle all types of multi-output combinations).
|
834 |
+
|
835 |
+
cache : bool, default=True
|
836 |
+
Whether to cache the downloaded datasets into `data_home`.
|
837 |
+
|
838 |
+
return_X_y : bool, default=False
|
839 |
+
If True, returns ``(data, target)`` instead of a Bunch object. See
|
840 |
+
below for more information about the `data` and `target` objects.
|
841 |
+
|
842 |
+
as_frame : bool or 'auto', default='auto'
|
843 |
+
If True, the data is a pandas DataFrame including columns with
|
844 |
+
appropriate dtypes (numeric, string or categorical). The target is
|
845 |
+
a pandas DataFrame or Series depending on the number of target_columns.
|
846 |
+
The Bunch will contain a ``frame`` attribute with the target and the
|
847 |
+
data. If ``return_X_y`` is True, then ``(data, target)`` will be pandas
|
848 |
+
DataFrames or Series as describe above.
|
849 |
+
|
850 |
+
If `as_frame` is 'auto', the data and target will be converted to
|
851 |
+
DataFrame or Series as if `as_frame` is set to True, unless the dataset
|
852 |
+
is stored in sparse format.
|
853 |
+
|
854 |
+
If `as_frame` is False, the data and target will be NumPy arrays and
|
855 |
+
the `data` will only contain numerical values when `parser="liac-arff"`
|
856 |
+
where the categories are provided in the attribute `categories` of the
|
857 |
+
`Bunch` instance. When `parser="pandas"`, no ordinal encoding is made.
|
858 |
+
|
859 |
+
.. versionchanged:: 0.24
|
860 |
+
The default value of `as_frame` changed from `False` to `'auto'`
|
861 |
+
in 0.24.
|
862 |
+
|
863 |
+
n_retries : int, default=3
|
864 |
+
Number of retries when HTTP errors or network timeouts are encountered.
|
865 |
+
Error with status code 412 won't be retried as they represent OpenML
|
866 |
+
generic errors.
|
867 |
+
|
868 |
+
delay : float, default=1.0
|
869 |
+
Number of seconds between retries.
|
870 |
+
|
871 |
+
parser : {"auto", "pandas", "liac-arff"}, default="auto"
|
872 |
+
Parser used to load the ARFF file. Two parsers are implemented:
|
873 |
+
|
874 |
+
- `"pandas"`: this is the most efficient parser. However, it requires
|
875 |
+
pandas to be installed and can only open dense datasets.
|
876 |
+
- `"liac-arff"`: this is a pure Python ARFF parser that is much less
|
877 |
+
memory- and CPU-efficient. It deals with sparse ARFF datasets.
|
878 |
+
|
879 |
+
If `"auto"`, the parser is chosen automatically such that `"liac-arff"`
|
880 |
+
is selected for sparse ARFF datasets, otherwise `"pandas"` is selected.
|
881 |
+
|
882 |
+
.. versionadded:: 1.2
|
883 |
+
.. versionchanged:: 1.4
|
884 |
+
The default value of `parser` changes from `"liac-arff"` to
|
885 |
+
`"auto"`.
|
886 |
+
|
887 |
+
read_csv_kwargs : dict, default=None
|
888 |
+
Keyword arguments passed to :func:`pandas.read_csv` when loading the data
|
889 |
+
from a ARFF file and using the pandas parser. It can allow to
|
890 |
+
overwrite some default parameters.
|
891 |
+
|
892 |
+
.. versionadded:: 1.3
|
893 |
+
|
894 |
+
Returns
|
895 |
+
-------
|
896 |
+
data : :class:`~sklearn.utils.Bunch`
|
897 |
+
Dictionary-like object, with the following attributes.
|
898 |
+
|
899 |
+
data : np.array, scipy.sparse.csr_matrix of floats, or pandas DataFrame
|
900 |
+
The feature matrix. Categorical features are encoded as ordinals.
|
901 |
+
target : np.array, pandas Series or DataFrame
|
902 |
+
The regression target or classification labels, if applicable.
|
903 |
+
Dtype is float if numeric, and object if categorical. If
|
904 |
+
``as_frame`` is True, ``target`` is a pandas object.
|
905 |
+
DESCR : str
|
906 |
+
The full description of the dataset.
|
907 |
+
feature_names : list
|
908 |
+
The names of the dataset columns.
|
909 |
+
target_names: list
|
910 |
+
The names of the target columns.
|
911 |
+
|
912 |
+
.. versionadded:: 0.22
|
913 |
+
|
914 |
+
categories : dict or None
|
915 |
+
Maps each categorical feature name to a list of values, such
|
916 |
+
that the value encoded as i is ith in the list. If ``as_frame``
|
917 |
+
is True, this is None.
|
918 |
+
details : dict
|
919 |
+
More metadata from OpenML.
|
920 |
+
frame : pandas DataFrame
|
921 |
+
Only present when `as_frame=True`. DataFrame with ``data`` and
|
922 |
+
``target``.
|
923 |
+
|
924 |
+
(data, target) : tuple if ``return_X_y`` is True
|
925 |
+
|
926 |
+
.. note:: EXPERIMENTAL
|
927 |
+
|
928 |
+
This interface is **experimental** and subsequent releases may
|
929 |
+
change attributes without notice (although there should only be
|
930 |
+
minor changes to ``data`` and ``target``).
|
931 |
+
|
932 |
+
Missing values in the 'data' are represented as NaN's. Missing values
|
933 |
+
in 'target' are represented as NaN's (numerical target) or None
|
934 |
+
(categorical target).
|
935 |
+
|
936 |
+
Notes
|
937 |
+
-----
|
938 |
+
The `"pandas"` and `"liac-arff"` parsers can lead to different data types
|
939 |
+
in the output. The notable differences are the following:
|
940 |
+
|
941 |
+
- The `"liac-arff"` parser always encodes categorical features as `str` objects.
|
942 |
+
To the contrary, the `"pandas"` parser instead infers the type while
|
943 |
+
reading and numerical categories will be casted into integers whenever
|
944 |
+
possible.
|
945 |
+
- The `"liac-arff"` parser uses float64 to encode numerical features
|
946 |
+
tagged as 'REAL' and 'NUMERICAL' in the metadata. The `"pandas"`
|
947 |
+
parser instead infers if these numerical features corresponds
|
948 |
+
to integers and uses panda's Integer extension dtype.
|
949 |
+
- In particular, classification datasets with integer categories are
|
950 |
+
typically loaded as such `(0, 1, ...)` with the `"pandas"` parser while
|
951 |
+
`"liac-arff"` will force the use of string encoded class labels such as
|
952 |
+
`"0"`, `"1"` and so on.
|
953 |
+
- The `"pandas"` parser will not strip single quotes - i.e. `'` - from
|
954 |
+
string columns. For instance, a string `'my string'` will be kept as is
|
955 |
+
while the `"liac-arff"` parser will strip the single quotes. For
|
956 |
+
categorical columns, the single quotes are stripped from the values.
|
957 |
+
|
958 |
+
In addition, when `as_frame=False` is used, the `"liac-arff"` parser
|
959 |
+
returns ordinally encoded data where the categories are provided in the
|
960 |
+
attribute `categories` of the `Bunch` instance. Instead, `"pandas"` returns
|
961 |
+
a NumPy array were the categories are not encoded.
|
962 |
+
|
963 |
+
Examples
|
964 |
+
--------
|
965 |
+
>>> from sklearn.datasets import fetch_openml
|
966 |
+
>>> adult = fetch_openml("adult", version=2) # doctest: +SKIP
|
967 |
+
>>> adult.frame.info() # doctest: +SKIP
|
968 |
+
<class 'pandas.core.frame.DataFrame'>
|
969 |
+
RangeIndex: 48842 entries, 0 to 48841
|
970 |
+
Data columns (total 15 columns):
|
971 |
+
# Column Non-Null Count Dtype
|
972 |
+
--- ------ -------------- -----
|
973 |
+
0 age 48842 non-null int64
|
974 |
+
1 workclass 46043 non-null category
|
975 |
+
2 fnlwgt 48842 non-null int64
|
976 |
+
3 education 48842 non-null category
|
977 |
+
4 education-num 48842 non-null int64
|
978 |
+
5 marital-status 48842 non-null category
|
979 |
+
6 occupation 46033 non-null category
|
980 |
+
7 relationship 48842 non-null category
|
981 |
+
8 race 48842 non-null category
|
982 |
+
9 sex 48842 non-null category
|
983 |
+
10 capital-gain 48842 non-null int64
|
984 |
+
11 capital-loss 48842 non-null int64
|
985 |
+
12 hours-per-week 48842 non-null int64
|
986 |
+
13 native-country 47985 non-null category
|
987 |
+
14 class 48842 non-null category
|
988 |
+
dtypes: category(9), int64(6)
|
989 |
+
memory usage: 2.7 MB
|
990 |
+
"""
|
991 |
+
if cache is False:
|
992 |
+
# no caching will be applied
|
993 |
+
data_home = None
|
994 |
+
else:
|
995 |
+
data_home = get_data_home(data_home=data_home)
|
996 |
+
data_home = join(str(data_home), "openml")
|
997 |
+
|
998 |
+
# check valid function arguments. data_id XOR (name, version) should be
|
999 |
+
# provided
|
1000 |
+
if name is not None:
|
1001 |
+
# OpenML is case-insensitive, but the caching mechanism is not
|
1002 |
+
# convert all data names (str) to lower case
|
1003 |
+
name = name.lower()
|
1004 |
+
if data_id is not None:
|
1005 |
+
raise ValueError(
|
1006 |
+
"Dataset data_id={} and name={} passed, but you can only "
|
1007 |
+
"specify a numeric data_id or a name, not "
|
1008 |
+
"both.".format(data_id, name)
|
1009 |
+
)
|
1010 |
+
data_info = _get_data_info_by_name(
|
1011 |
+
name, version, data_home, n_retries=n_retries, delay=delay
|
1012 |
+
)
|
1013 |
+
data_id = data_info["did"]
|
1014 |
+
elif data_id is not None:
|
1015 |
+
# from the previous if statement, it is given that name is None
|
1016 |
+
if version != "active":
|
1017 |
+
raise ValueError(
|
1018 |
+
"Dataset data_id={} and version={} passed, but you can only "
|
1019 |
+
"specify a numeric data_id or a version, not "
|
1020 |
+
"both.".format(data_id, version)
|
1021 |
+
)
|
1022 |
+
else:
|
1023 |
+
raise ValueError(
|
1024 |
+
"Neither name nor data_id are provided. Please provide name or data_id."
|
1025 |
+
)
|
1026 |
+
|
1027 |
+
data_description = _get_data_description_by_id(data_id, data_home)
|
1028 |
+
if data_description["status"] != "active":
|
1029 |
+
warn(
|
1030 |
+
"Version {} of dataset {} is inactive, meaning that issues have "
|
1031 |
+
"been found in the dataset. Try using a newer version from "
|
1032 |
+
"this URL: {}".format(
|
1033 |
+
data_description["version"],
|
1034 |
+
data_description["name"],
|
1035 |
+
data_description["url"],
|
1036 |
+
)
|
1037 |
+
)
|
1038 |
+
if "error" in data_description:
|
1039 |
+
warn(
|
1040 |
+
"OpenML registered a problem with the dataset. It might be "
|
1041 |
+
"unusable. Error: {}".format(data_description["error"])
|
1042 |
+
)
|
1043 |
+
if "warning" in data_description:
|
1044 |
+
warn(
|
1045 |
+
"OpenML raised a warning on the dataset. It might be "
|
1046 |
+
"unusable. Warning: {}".format(data_description["warning"])
|
1047 |
+
)
|
1048 |
+
|
1049 |
+
return_sparse = data_description["format"].lower() == "sparse_arff"
|
1050 |
+
as_frame = not return_sparse if as_frame == "auto" else as_frame
|
1051 |
+
if parser == "auto":
|
1052 |
+
parser_ = "liac-arff" if return_sparse else "pandas"
|
1053 |
+
else:
|
1054 |
+
parser_ = parser
|
1055 |
+
|
1056 |
+
if parser_ == "pandas":
|
1057 |
+
try:
|
1058 |
+
check_pandas_support("`fetch_openml`")
|
1059 |
+
except ImportError as exc:
|
1060 |
+
if as_frame:
|
1061 |
+
err_msg = (
|
1062 |
+
"Returning pandas objects requires pandas to be installed. "
|
1063 |
+
"Alternatively, explicitly set `as_frame=False` and "
|
1064 |
+
"`parser='liac-arff'`."
|
1065 |
+
)
|
1066 |
+
else:
|
1067 |
+
err_msg = (
|
1068 |
+
f"Using `parser={parser!r}` wit dense data requires pandas to be "
|
1069 |
+
"installed. Alternatively, explicitly set `parser='liac-arff'`."
|
1070 |
+
)
|
1071 |
+
raise ImportError(err_msg) from exc
|
1072 |
+
|
1073 |
+
if return_sparse:
|
1074 |
+
if as_frame:
|
1075 |
+
raise ValueError(
|
1076 |
+
"Sparse ARFF datasets cannot be loaded with as_frame=True. "
|
1077 |
+
"Use as_frame=False or as_frame='auto' instead."
|
1078 |
+
)
|
1079 |
+
if parser_ == "pandas":
|
1080 |
+
raise ValueError(
|
1081 |
+
f"Sparse ARFF datasets cannot be loaded with parser={parser!r}. "
|
1082 |
+
"Use parser='liac-arff' or parser='auto' instead."
|
1083 |
+
)
|
1084 |
+
|
1085 |
+
# download data features, meta-info about column types
|
1086 |
+
features_list = _get_data_features(data_id, data_home)
|
1087 |
+
|
1088 |
+
if not as_frame:
|
1089 |
+
for feature in features_list:
|
1090 |
+
if "true" in (feature["is_ignore"], feature["is_row_identifier"]):
|
1091 |
+
continue
|
1092 |
+
if feature["data_type"] == "string":
|
1093 |
+
raise ValueError(
|
1094 |
+
"STRING attributes are not supported for "
|
1095 |
+
"array representation. Try as_frame=True"
|
1096 |
+
)
|
1097 |
+
|
1098 |
+
if target_column == "default-target":
|
1099 |
+
# determines the default target based on the data feature results
|
1100 |
+
# (which is currently more reliable than the data description;
|
1101 |
+
# see issue: https://github.com/openml/OpenML/issues/768)
|
1102 |
+
target_columns = [
|
1103 |
+
feature["name"]
|
1104 |
+
for feature in features_list
|
1105 |
+
if feature["is_target"] == "true"
|
1106 |
+
]
|
1107 |
+
elif isinstance(target_column, str):
|
1108 |
+
# for code-simplicity, make target_column by default a list
|
1109 |
+
target_columns = [target_column]
|
1110 |
+
elif target_column is None:
|
1111 |
+
target_columns = []
|
1112 |
+
else:
|
1113 |
+
# target_column already is of type list
|
1114 |
+
target_columns = target_column
|
1115 |
+
data_columns = _valid_data_column_names(features_list, target_columns)
|
1116 |
+
|
1117 |
+
shape: Optional[Tuple[int, int]]
|
1118 |
+
# determine arff encoding to return
|
1119 |
+
if not return_sparse:
|
1120 |
+
# The shape must include the ignored features to keep the right indexes
|
1121 |
+
# during the arff data conversion.
|
1122 |
+
data_qualities = _get_data_qualities(data_id, data_home)
|
1123 |
+
shape = _get_num_samples(data_qualities), len(features_list)
|
1124 |
+
else:
|
1125 |
+
shape = None
|
1126 |
+
|
1127 |
+
# obtain the data
|
1128 |
+
url = _DATA_FILE.format(data_description["file_id"])
|
1129 |
+
bunch = _download_data_to_bunch(
|
1130 |
+
url,
|
1131 |
+
return_sparse,
|
1132 |
+
data_home,
|
1133 |
+
as_frame=bool(as_frame),
|
1134 |
+
openml_columns_info=features_list,
|
1135 |
+
shape=shape,
|
1136 |
+
target_columns=target_columns,
|
1137 |
+
data_columns=data_columns,
|
1138 |
+
md5_checksum=data_description["md5_checksum"],
|
1139 |
+
n_retries=n_retries,
|
1140 |
+
delay=delay,
|
1141 |
+
parser=parser_,
|
1142 |
+
read_csv_kwargs=read_csv_kwargs,
|
1143 |
+
)
|
1144 |
+
|
1145 |
+
if return_X_y:
|
1146 |
+
return bunch.data, bunch.target
|
1147 |
+
|
1148 |
+
description = "{}\n\nDownloaded from openml.org.".format(
|
1149 |
+
data_description.pop("description")
|
1150 |
+
)
|
1151 |
+
|
1152 |
+
bunch.update(
|
1153 |
+
DESCR=description,
|
1154 |
+
details=data_description,
|
1155 |
+
url="https://www.openml.org/d/{}".format(data_id),
|
1156 |
+
)
|
1157 |
+
|
1158 |
+
return bunch
|
env-llmeval/lib/python3.10/site-packages/sklearn/datasets/_rcv1.py
ADDED
@@ -0,0 +1,306 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""RCV1 dataset.
|
2 |
+
|
3 |
+
The dataset page is available at
|
4 |
+
|
5 |
+
http://jmlr.csail.mit.edu/papers/volume5/lewis04a/
|
6 |
+
"""
|
7 |
+
|
8 |
+
# Author: Tom Dupre la Tour
|
9 |
+
# License: BSD 3 clause
|
10 |
+
|
11 |
+
import logging
|
12 |
+
from gzip import GzipFile
|
13 |
+
from os import PathLike, makedirs, remove
|
14 |
+
from os.path import exists, join
|
15 |
+
|
16 |
+
import joblib
|
17 |
+
import numpy as np
|
18 |
+
import scipy.sparse as sp
|
19 |
+
|
20 |
+
from ..utils import Bunch
|
21 |
+
from ..utils import shuffle as shuffle_
|
22 |
+
from ..utils._param_validation import StrOptions, validate_params
|
23 |
+
from . import get_data_home
|
24 |
+
from ._base import RemoteFileMetadata, _fetch_remote, _pkl_filepath, load_descr
|
25 |
+
from ._svmlight_format_io import load_svmlight_files
|
26 |
+
|
27 |
+
# The original vectorized data can be found at:
|
28 |
+
# http://www.ai.mit.edu/projects/jmlr/papers/volume5/lewis04a/a13-vector-files/lyrl2004_vectors_test_pt0.dat.gz
|
29 |
+
# http://www.ai.mit.edu/projects/jmlr/papers/volume5/lewis04a/a13-vector-files/lyrl2004_vectors_test_pt1.dat.gz
|
30 |
+
# http://www.ai.mit.edu/projects/jmlr/papers/volume5/lewis04a/a13-vector-files/lyrl2004_vectors_test_pt2.dat.gz
|
31 |
+
# http://www.ai.mit.edu/projects/jmlr/papers/volume5/lewis04a/a13-vector-files/lyrl2004_vectors_test_pt3.dat.gz
|
32 |
+
# http://www.ai.mit.edu/projects/jmlr/papers/volume5/lewis04a/a13-vector-files/lyrl2004_vectors_train.dat.gz
|
33 |
+
# while the original stemmed token files can be found
|
34 |
+
# in the README, section B.12.i.:
|
35 |
+
# http://www.ai.mit.edu/projects/jmlr/papers/volume5/lewis04a/lyrl2004_rcv1v2_README.htm
|
36 |
+
XY_METADATA = (
|
37 |
+
RemoteFileMetadata(
|
38 |
+
url="https://ndownloader.figshare.com/files/5976069",
|
39 |
+
checksum="ed40f7e418d10484091b059703eeb95ae3199fe042891dcec4be6696b9968374",
|
40 |
+
filename="lyrl2004_vectors_test_pt0.dat.gz",
|
41 |
+
),
|
42 |
+
RemoteFileMetadata(
|
43 |
+
url="https://ndownloader.figshare.com/files/5976066",
|
44 |
+
checksum="87700668ae45d45d5ca1ef6ae9bd81ab0f5ec88cc95dcef9ae7838f727a13aa6",
|
45 |
+
filename="lyrl2004_vectors_test_pt1.dat.gz",
|
46 |
+
),
|
47 |
+
RemoteFileMetadata(
|
48 |
+
url="https://ndownloader.figshare.com/files/5976063",
|
49 |
+
checksum="48143ac703cbe33299f7ae9f4995db49a258690f60e5debbff8995c34841c7f5",
|
50 |
+
filename="lyrl2004_vectors_test_pt2.dat.gz",
|
51 |
+
),
|
52 |
+
RemoteFileMetadata(
|
53 |
+
url="https://ndownloader.figshare.com/files/5976060",
|
54 |
+
checksum="dfcb0d658311481523c6e6ca0c3f5a3e1d3d12cde5d7a8ce629a9006ec7dbb39",
|
55 |
+
filename="lyrl2004_vectors_test_pt3.dat.gz",
|
56 |
+
),
|
57 |
+
RemoteFileMetadata(
|
58 |
+
url="https://ndownloader.figshare.com/files/5976057",
|
59 |
+
checksum="5468f656d0ba7a83afc7ad44841cf9a53048a5c083eedc005dcdb5cc768924ae",
|
60 |
+
filename="lyrl2004_vectors_train.dat.gz",
|
61 |
+
),
|
62 |
+
)
|
63 |
+
|
64 |
+
# The original data can be found at:
|
65 |
+
# http://jmlr.csail.mit.edu/papers/volume5/lewis04a/a08-topic-qrels/rcv1-v2.topics.qrels.gz
|
66 |
+
TOPICS_METADATA = RemoteFileMetadata(
|
67 |
+
url="https://ndownloader.figshare.com/files/5976048",
|
68 |
+
checksum="2a98e5e5d8b770bded93afc8930d88299474317fe14181aee1466cc754d0d1c1",
|
69 |
+
filename="rcv1v2.topics.qrels.gz",
|
70 |
+
)
|
71 |
+
|
72 |
+
logger = logging.getLogger(__name__)
|
73 |
+
|
74 |
+
|
75 |
+
@validate_params(
|
76 |
+
{
|
77 |
+
"data_home": [str, PathLike, None],
|
78 |
+
"subset": [StrOptions({"train", "test", "all"})],
|
79 |
+
"download_if_missing": ["boolean"],
|
80 |
+
"random_state": ["random_state"],
|
81 |
+
"shuffle": ["boolean"],
|
82 |
+
"return_X_y": ["boolean"],
|
83 |
+
},
|
84 |
+
prefer_skip_nested_validation=True,
|
85 |
+
)
|
86 |
+
def fetch_rcv1(
|
87 |
+
*,
|
88 |
+
data_home=None,
|
89 |
+
subset="all",
|
90 |
+
download_if_missing=True,
|
91 |
+
random_state=None,
|
92 |
+
shuffle=False,
|
93 |
+
return_X_y=False,
|
94 |
+
):
|
95 |
+
"""Load the RCV1 multilabel dataset (classification).
|
96 |
+
|
97 |
+
Download it if necessary.
|
98 |
+
|
99 |
+
Version: RCV1-v2, vectors, full sets, topics multilabels.
|
100 |
+
|
101 |
+
================= =====================
|
102 |
+
Classes 103
|
103 |
+
Samples total 804414
|
104 |
+
Dimensionality 47236
|
105 |
+
Features real, between 0 and 1
|
106 |
+
================= =====================
|
107 |
+
|
108 |
+
Read more in the :ref:`User Guide <rcv1_dataset>`.
|
109 |
+
|
110 |
+
.. versionadded:: 0.17
|
111 |
+
|
112 |
+
Parameters
|
113 |
+
----------
|
114 |
+
data_home : str or path-like, default=None
|
115 |
+
Specify another download and cache folder for the datasets. By default
|
116 |
+
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
|
117 |
+
|
118 |
+
subset : {'train', 'test', 'all'}, default='all'
|
119 |
+
Select the dataset to load: 'train' for the training set
|
120 |
+
(23149 samples), 'test' for the test set (781265 samples),
|
121 |
+
'all' for both, with the training samples first if shuffle is False.
|
122 |
+
This follows the official LYRL2004 chronological split.
|
123 |
+
|
124 |
+
download_if_missing : bool, default=True
|
125 |
+
If False, raise an OSError if the data is not locally available
|
126 |
+
instead of trying to download the data from the source site.
|
127 |
+
|
128 |
+
random_state : int, RandomState instance or None, default=None
|
129 |
+
Determines random number generation for dataset shuffling. Pass an int
|
130 |
+
for reproducible output across multiple function calls.
|
131 |
+
See :term:`Glossary <random_state>`.
|
132 |
+
|
133 |
+
shuffle : bool, default=False
|
134 |
+
Whether to shuffle dataset.
|
135 |
+
|
136 |
+
return_X_y : bool, default=False
|
137 |
+
If True, returns ``(dataset.data, dataset.target)`` instead of a Bunch
|
138 |
+
object. See below for more information about the `dataset.data` and
|
139 |
+
`dataset.target` object.
|
140 |
+
|
141 |
+
.. versionadded:: 0.20
|
142 |
+
|
143 |
+
Returns
|
144 |
+
-------
|
145 |
+
dataset : :class:`~sklearn.utils.Bunch`
|
146 |
+
Dictionary-like object. Returned only if `return_X_y` is False.
|
147 |
+
`dataset` has the following attributes:
|
148 |
+
|
149 |
+
- data : sparse matrix of shape (804414, 47236), dtype=np.float64
|
150 |
+
The array has 0.16% of non zero values. Will be of CSR format.
|
151 |
+
- target : sparse matrix of shape (804414, 103), dtype=np.uint8
|
152 |
+
Each sample has a value of 1 in its categories, and 0 in others.
|
153 |
+
The array has 3.15% of non zero values. Will be of CSR format.
|
154 |
+
- sample_id : ndarray of shape (804414,), dtype=np.uint32,
|
155 |
+
Identification number of each sample, as ordered in dataset.data.
|
156 |
+
- target_names : ndarray of shape (103,), dtype=object
|
157 |
+
Names of each target (RCV1 topics), as ordered in dataset.target.
|
158 |
+
- DESCR : str
|
159 |
+
Description of the RCV1 dataset.
|
160 |
+
|
161 |
+
(data, target) : tuple
|
162 |
+
A tuple consisting of `dataset.data` and `dataset.target`, as
|
163 |
+
described above. Returned only if `return_X_y` is True.
|
164 |
+
|
165 |
+
.. versionadded:: 0.20
|
166 |
+
"""
|
167 |
+
N_SAMPLES = 804414
|
168 |
+
N_FEATURES = 47236
|
169 |
+
N_CATEGORIES = 103
|
170 |
+
N_TRAIN = 23149
|
171 |
+
|
172 |
+
data_home = get_data_home(data_home=data_home)
|
173 |
+
rcv1_dir = join(data_home, "RCV1")
|
174 |
+
if download_if_missing:
|
175 |
+
if not exists(rcv1_dir):
|
176 |
+
makedirs(rcv1_dir)
|
177 |
+
|
178 |
+
samples_path = _pkl_filepath(rcv1_dir, "samples.pkl")
|
179 |
+
sample_id_path = _pkl_filepath(rcv1_dir, "sample_id.pkl")
|
180 |
+
sample_topics_path = _pkl_filepath(rcv1_dir, "sample_topics.pkl")
|
181 |
+
topics_path = _pkl_filepath(rcv1_dir, "topics_names.pkl")
|
182 |
+
|
183 |
+
# load data (X) and sample_id
|
184 |
+
if download_if_missing and (not exists(samples_path) or not exists(sample_id_path)):
|
185 |
+
files = []
|
186 |
+
for each in XY_METADATA:
|
187 |
+
logger.info("Downloading %s" % each.url)
|
188 |
+
file_path = _fetch_remote(each, dirname=rcv1_dir)
|
189 |
+
files.append(GzipFile(filename=file_path))
|
190 |
+
|
191 |
+
Xy = load_svmlight_files(files, n_features=N_FEATURES)
|
192 |
+
|
193 |
+
# Training data is before testing data
|
194 |
+
X = sp.vstack([Xy[8], Xy[0], Xy[2], Xy[4], Xy[6]]).tocsr()
|
195 |
+
sample_id = np.hstack((Xy[9], Xy[1], Xy[3], Xy[5], Xy[7]))
|
196 |
+
sample_id = sample_id.astype(np.uint32, copy=False)
|
197 |
+
|
198 |
+
joblib.dump(X, samples_path, compress=9)
|
199 |
+
joblib.dump(sample_id, sample_id_path, compress=9)
|
200 |
+
|
201 |
+
# delete archives
|
202 |
+
for f in files:
|
203 |
+
f.close()
|
204 |
+
remove(f.name)
|
205 |
+
else:
|
206 |
+
X = joblib.load(samples_path)
|
207 |
+
sample_id = joblib.load(sample_id_path)
|
208 |
+
|
209 |
+
# load target (y), categories, and sample_id_bis
|
210 |
+
if download_if_missing and (
|
211 |
+
not exists(sample_topics_path) or not exists(topics_path)
|
212 |
+
):
|
213 |
+
logger.info("Downloading %s" % TOPICS_METADATA.url)
|
214 |
+
topics_archive_path = _fetch_remote(TOPICS_METADATA, dirname=rcv1_dir)
|
215 |
+
|
216 |
+
# parse the target file
|
217 |
+
n_cat = -1
|
218 |
+
n_doc = -1
|
219 |
+
doc_previous = -1
|
220 |
+
y = np.zeros((N_SAMPLES, N_CATEGORIES), dtype=np.uint8)
|
221 |
+
sample_id_bis = np.zeros(N_SAMPLES, dtype=np.int32)
|
222 |
+
category_names = {}
|
223 |
+
with GzipFile(filename=topics_archive_path, mode="rb") as f:
|
224 |
+
for line in f:
|
225 |
+
line_components = line.decode("ascii").split(" ")
|
226 |
+
if len(line_components) == 3:
|
227 |
+
cat, doc, _ = line_components
|
228 |
+
if cat not in category_names:
|
229 |
+
n_cat += 1
|
230 |
+
category_names[cat] = n_cat
|
231 |
+
|
232 |
+
doc = int(doc)
|
233 |
+
if doc != doc_previous:
|
234 |
+
doc_previous = doc
|
235 |
+
n_doc += 1
|
236 |
+
sample_id_bis[n_doc] = doc
|
237 |
+
y[n_doc, category_names[cat]] = 1
|
238 |
+
|
239 |
+
# delete archive
|
240 |
+
remove(topics_archive_path)
|
241 |
+
|
242 |
+
# Samples in X are ordered with sample_id,
|
243 |
+
# whereas in y, they are ordered with sample_id_bis.
|
244 |
+
permutation = _find_permutation(sample_id_bis, sample_id)
|
245 |
+
y = y[permutation, :]
|
246 |
+
|
247 |
+
# save category names in a list, with same order than y
|
248 |
+
categories = np.empty(N_CATEGORIES, dtype=object)
|
249 |
+
for k in category_names.keys():
|
250 |
+
categories[category_names[k]] = k
|
251 |
+
|
252 |
+
# reorder categories in lexicographic order
|
253 |
+
order = np.argsort(categories)
|
254 |
+
categories = categories[order]
|
255 |
+
y = sp.csr_matrix(y[:, order])
|
256 |
+
|
257 |
+
joblib.dump(y, sample_topics_path, compress=9)
|
258 |
+
joblib.dump(categories, topics_path, compress=9)
|
259 |
+
else:
|
260 |
+
y = joblib.load(sample_topics_path)
|
261 |
+
categories = joblib.load(topics_path)
|
262 |
+
|
263 |
+
if subset == "all":
|
264 |
+
pass
|
265 |
+
elif subset == "train":
|
266 |
+
X = X[:N_TRAIN, :]
|
267 |
+
y = y[:N_TRAIN, :]
|
268 |
+
sample_id = sample_id[:N_TRAIN]
|
269 |
+
elif subset == "test":
|
270 |
+
X = X[N_TRAIN:, :]
|
271 |
+
y = y[N_TRAIN:, :]
|
272 |
+
sample_id = sample_id[N_TRAIN:]
|
273 |
+
else:
|
274 |
+
raise ValueError(
|
275 |
+
"Unknown subset parameter. Got '%s' instead of one"
|
276 |
+
" of ('all', 'train', test')" % subset
|
277 |
+
)
|
278 |
+
|
279 |
+
if shuffle:
|
280 |
+
X, y, sample_id = shuffle_(X, y, sample_id, random_state=random_state)
|
281 |
+
|
282 |
+
fdescr = load_descr("rcv1.rst")
|
283 |
+
|
284 |
+
if return_X_y:
|
285 |
+
return X, y
|
286 |
+
|
287 |
+
return Bunch(
|
288 |
+
data=X, target=y, sample_id=sample_id, target_names=categories, DESCR=fdescr
|
289 |
+
)
|
290 |
+
|
291 |
+
|
292 |
+
def _inverse_permutation(p):
|
293 |
+
"""Inverse permutation p."""
|
294 |
+
n = p.size
|
295 |
+
s = np.zeros(n, dtype=np.int32)
|
296 |
+
i = np.arange(n, dtype=np.int32)
|
297 |
+
np.put(s, p, i) # s[p] = i
|
298 |
+
return s
|
299 |
+
|
300 |
+
|
301 |
+
def _find_permutation(a, b):
|
302 |
+
"""Find the permutation from a to b."""
|
303 |
+
t = np.argsort(a)
|
304 |
+
u = np.argsort(b)
|
305 |
+
u_ = _inverse_permutation(u)
|
306 |
+
return t[u_]
|
env-llmeval/lib/python3.10/site-packages/sklearn/datasets/_samples_generator.py
ADDED
@@ -0,0 +1,2284 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Generate samples of synthetic data sets.
|
3 |
+
"""
|
4 |
+
|
5 |
+
# Authors: B. Thirion, G. Varoquaux, A. Gramfort, V. Michel, O. Grisel,
|
6 |
+
# G. Louppe, J. Nothman
|
7 |
+
# License: BSD 3 clause
|
8 |
+
|
9 |
+
import array
|
10 |
+
import numbers
|
11 |
+
import warnings
|
12 |
+
from collections.abc import Iterable
|
13 |
+
from numbers import Integral, Real
|
14 |
+
|
15 |
+
import numpy as np
|
16 |
+
import scipy.sparse as sp
|
17 |
+
from scipy import linalg
|
18 |
+
|
19 |
+
from ..preprocessing import MultiLabelBinarizer
|
20 |
+
from ..utils import check_array, check_random_state
|
21 |
+
from ..utils import shuffle as util_shuffle
|
22 |
+
from ..utils._param_validation import Hidden, Interval, StrOptions, validate_params
|
23 |
+
from ..utils.random import sample_without_replacement
|
24 |
+
|
25 |
+
|
26 |
+
def _generate_hypercube(samples, dimensions, rng):
|
27 |
+
"""Returns distinct binary samples of length dimensions."""
|
28 |
+
if dimensions > 30:
|
29 |
+
return np.hstack(
|
30 |
+
[
|
31 |
+
rng.randint(2, size=(samples, dimensions - 30)),
|
32 |
+
_generate_hypercube(samples, 30, rng),
|
33 |
+
]
|
34 |
+
)
|
35 |
+
out = sample_without_replacement(2**dimensions, samples, random_state=rng).astype(
|
36 |
+
dtype=">u4", copy=False
|
37 |
+
)
|
38 |
+
out = np.unpackbits(out.view(">u1")).reshape((-1, 32))[:, -dimensions:]
|
39 |
+
return out
|
40 |
+
|
41 |
+
|
42 |
+
@validate_params(
|
43 |
+
{
|
44 |
+
"n_samples": [Interval(Integral, 1, None, closed="left")],
|
45 |
+
"n_features": [Interval(Integral, 1, None, closed="left")],
|
46 |
+
"n_informative": [Interval(Integral, 1, None, closed="left")],
|
47 |
+
"n_redundant": [Interval(Integral, 0, None, closed="left")],
|
48 |
+
"n_repeated": [Interval(Integral, 0, None, closed="left")],
|
49 |
+
"n_classes": [Interval(Integral, 1, None, closed="left")],
|
50 |
+
"n_clusters_per_class": [Interval(Integral, 1, None, closed="left")],
|
51 |
+
"weights": ["array-like", None],
|
52 |
+
"flip_y": [Interval(Real, 0, 1, closed="both")],
|
53 |
+
"class_sep": [Interval(Real, 0, None, closed="neither")],
|
54 |
+
"hypercube": ["boolean"],
|
55 |
+
"shift": [Interval(Real, None, None, closed="neither"), "array-like", None],
|
56 |
+
"scale": [Interval(Real, 0, None, closed="neither"), "array-like", None],
|
57 |
+
"shuffle": ["boolean"],
|
58 |
+
"random_state": ["random_state"],
|
59 |
+
},
|
60 |
+
prefer_skip_nested_validation=True,
|
61 |
+
)
|
62 |
+
def make_classification(
|
63 |
+
n_samples=100,
|
64 |
+
n_features=20,
|
65 |
+
*,
|
66 |
+
n_informative=2,
|
67 |
+
n_redundant=2,
|
68 |
+
n_repeated=0,
|
69 |
+
n_classes=2,
|
70 |
+
n_clusters_per_class=2,
|
71 |
+
weights=None,
|
72 |
+
flip_y=0.01,
|
73 |
+
class_sep=1.0,
|
74 |
+
hypercube=True,
|
75 |
+
shift=0.0,
|
76 |
+
scale=1.0,
|
77 |
+
shuffle=True,
|
78 |
+
random_state=None,
|
79 |
+
):
|
80 |
+
"""Generate a random n-class classification problem.
|
81 |
+
|
82 |
+
This initially creates clusters of points normally distributed (std=1)
|
83 |
+
about vertices of an ``n_informative``-dimensional hypercube with sides of
|
84 |
+
length ``2*class_sep`` and assigns an equal number of clusters to each
|
85 |
+
class. It introduces interdependence between these features and adds
|
86 |
+
various types of further noise to the data.
|
87 |
+
|
88 |
+
Without shuffling, ``X`` horizontally stacks features in the following
|
89 |
+
order: the primary ``n_informative`` features, followed by ``n_redundant``
|
90 |
+
linear combinations of the informative features, followed by ``n_repeated``
|
91 |
+
duplicates, drawn randomly with replacement from the informative and
|
92 |
+
redundant features. The remaining features are filled with random noise.
|
93 |
+
Thus, without shuffling, all useful features are contained in the columns
|
94 |
+
``X[:, :n_informative + n_redundant + n_repeated]``.
|
95 |
+
|
96 |
+
For an example of usage, see
|
97 |
+
:ref:`sphx_glr_auto_examples_datasets_plot_random_dataset.py`.
|
98 |
+
|
99 |
+
Read more in the :ref:`User Guide <sample_generators>`.
|
100 |
+
|
101 |
+
Parameters
|
102 |
+
----------
|
103 |
+
n_samples : int, default=100
|
104 |
+
The number of samples.
|
105 |
+
|
106 |
+
n_features : int, default=20
|
107 |
+
The total number of features. These comprise ``n_informative``
|
108 |
+
informative features, ``n_redundant`` redundant features,
|
109 |
+
``n_repeated`` duplicated features and
|
110 |
+
``n_features-n_informative-n_redundant-n_repeated`` useless features
|
111 |
+
drawn at random.
|
112 |
+
|
113 |
+
n_informative : int, default=2
|
114 |
+
The number of informative features. Each class is composed of a number
|
115 |
+
of gaussian clusters each located around the vertices of a hypercube
|
116 |
+
in a subspace of dimension ``n_informative``. For each cluster,
|
117 |
+
informative features are drawn independently from N(0, 1) and then
|
118 |
+
randomly linearly combined within each cluster in order to add
|
119 |
+
covariance. The clusters are then placed on the vertices of the
|
120 |
+
hypercube.
|
121 |
+
|
122 |
+
n_redundant : int, default=2
|
123 |
+
The number of redundant features. These features are generated as
|
124 |
+
random linear combinations of the informative features.
|
125 |
+
|
126 |
+
n_repeated : int, default=0
|
127 |
+
The number of duplicated features, drawn randomly from the informative
|
128 |
+
and the redundant features.
|
129 |
+
|
130 |
+
n_classes : int, default=2
|
131 |
+
The number of classes (or labels) of the classification problem.
|
132 |
+
|
133 |
+
n_clusters_per_class : int, default=2
|
134 |
+
The number of clusters per class.
|
135 |
+
|
136 |
+
weights : array-like of shape (n_classes,) or (n_classes - 1,),\
|
137 |
+
default=None
|
138 |
+
The proportions of samples assigned to each class. If None, then
|
139 |
+
classes are balanced. Note that if ``len(weights) == n_classes - 1``,
|
140 |
+
then the last class weight is automatically inferred.
|
141 |
+
More than ``n_samples`` samples may be returned if the sum of
|
142 |
+
``weights`` exceeds 1. Note that the actual class proportions will
|
143 |
+
not exactly match ``weights`` when ``flip_y`` isn't 0.
|
144 |
+
|
145 |
+
flip_y : float, default=0.01
|
146 |
+
The fraction of samples whose class is assigned randomly. Larger
|
147 |
+
values introduce noise in the labels and make the classification
|
148 |
+
task harder. Note that the default setting flip_y > 0 might lead
|
149 |
+
to less than ``n_classes`` in y in some cases.
|
150 |
+
|
151 |
+
class_sep : float, default=1.0
|
152 |
+
The factor multiplying the hypercube size. Larger values spread
|
153 |
+
out the clusters/classes and make the classification task easier.
|
154 |
+
|
155 |
+
hypercube : bool, default=True
|
156 |
+
If True, the clusters are put on the vertices of a hypercube. If
|
157 |
+
False, the clusters are put on the vertices of a random polytope.
|
158 |
+
|
159 |
+
shift : float, ndarray of shape (n_features,) or None, default=0.0
|
160 |
+
Shift features by the specified value. If None, then features
|
161 |
+
are shifted by a random value drawn in [-class_sep, class_sep].
|
162 |
+
|
163 |
+
scale : float, ndarray of shape (n_features,) or None, default=1.0
|
164 |
+
Multiply features by the specified value. If None, then features
|
165 |
+
are scaled by a random value drawn in [1, 100]. Note that scaling
|
166 |
+
happens after shifting.
|
167 |
+
|
168 |
+
shuffle : bool, default=True
|
169 |
+
Shuffle the samples and the features.
|
170 |
+
|
171 |
+
random_state : int, RandomState instance or None, default=None
|
172 |
+
Determines random number generation for dataset creation. Pass an int
|
173 |
+
for reproducible output across multiple function calls.
|
174 |
+
See :term:`Glossary <random_state>`.
|
175 |
+
|
176 |
+
Returns
|
177 |
+
-------
|
178 |
+
X : ndarray of shape (n_samples, n_features)
|
179 |
+
The generated samples.
|
180 |
+
|
181 |
+
y : ndarray of shape (n_samples,)
|
182 |
+
The integer labels for class membership of each sample.
|
183 |
+
|
184 |
+
See Also
|
185 |
+
--------
|
186 |
+
make_blobs : Simplified variant.
|
187 |
+
make_multilabel_classification : Unrelated generator for multilabel tasks.
|
188 |
+
|
189 |
+
Notes
|
190 |
+
-----
|
191 |
+
The algorithm is adapted from Guyon [1] and was designed to generate
|
192 |
+
the "Madelon" dataset.
|
193 |
+
|
194 |
+
References
|
195 |
+
----------
|
196 |
+
.. [1] I. Guyon, "Design of experiments for the NIPS 2003 variable
|
197 |
+
selection benchmark", 2003.
|
198 |
+
|
199 |
+
Examples
|
200 |
+
--------
|
201 |
+
>>> from sklearn.datasets import make_classification
|
202 |
+
>>> X, y = make_classification(random_state=42)
|
203 |
+
>>> X.shape
|
204 |
+
(100, 20)
|
205 |
+
>>> y.shape
|
206 |
+
(100,)
|
207 |
+
>>> list(y[:5])
|
208 |
+
[0, 0, 1, 1, 0]
|
209 |
+
"""
|
210 |
+
generator = check_random_state(random_state)
|
211 |
+
|
212 |
+
# Count features, clusters and samples
|
213 |
+
if n_informative + n_redundant + n_repeated > n_features:
|
214 |
+
raise ValueError(
|
215 |
+
"Number of informative, redundant and repeated "
|
216 |
+
"features must sum to less than the number of total"
|
217 |
+
" features"
|
218 |
+
)
|
219 |
+
# Use log2 to avoid overflow errors
|
220 |
+
if n_informative < np.log2(n_classes * n_clusters_per_class):
|
221 |
+
msg = "n_classes({}) * n_clusters_per_class({}) must be"
|
222 |
+
msg += " smaller or equal 2**n_informative({})={}"
|
223 |
+
raise ValueError(
|
224 |
+
msg.format(
|
225 |
+
n_classes, n_clusters_per_class, n_informative, 2**n_informative
|
226 |
+
)
|
227 |
+
)
|
228 |
+
|
229 |
+
if weights is not None:
|
230 |
+
if len(weights) not in [n_classes, n_classes - 1]:
|
231 |
+
raise ValueError(
|
232 |
+
"Weights specified but incompatible with number of classes."
|
233 |
+
)
|
234 |
+
if len(weights) == n_classes - 1:
|
235 |
+
if isinstance(weights, list):
|
236 |
+
weights = weights + [1.0 - sum(weights)]
|
237 |
+
else:
|
238 |
+
weights = np.resize(weights, n_classes)
|
239 |
+
weights[-1] = 1.0 - sum(weights[:-1])
|
240 |
+
else:
|
241 |
+
weights = [1.0 / n_classes] * n_classes
|
242 |
+
|
243 |
+
n_useless = n_features - n_informative - n_redundant - n_repeated
|
244 |
+
n_clusters = n_classes * n_clusters_per_class
|
245 |
+
|
246 |
+
# Distribute samples among clusters by weight
|
247 |
+
n_samples_per_cluster = [
|
248 |
+
int(n_samples * weights[k % n_classes] / n_clusters_per_class)
|
249 |
+
for k in range(n_clusters)
|
250 |
+
]
|
251 |
+
|
252 |
+
for i in range(n_samples - sum(n_samples_per_cluster)):
|
253 |
+
n_samples_per_cluster[i % n_clusters] += 1
|
254 |
+
|
255 |
+
# Initialize X and y
|
256 |
+
X = np.zeros((n_samples, n_features))
|
257 |
+
y = np.zeros(n_samples, dtype=int)
|
258 |
+
|
259 |
+
# Build the polytope whose vertices become cluster centroids
|
260 |
+
centroids = _generate_hypercube(n_clusters, n_informative, generator).astype(
|
261 |
+
float, copy=False
|
262 |
+
)
|
263 |
+
centroids *= 2 * class_sep
|
264 |
+
centroids -= class_sep
|
265 |
+
if not hypercube:
|
266 |
+
centroids *= generator.uniform(size=(n_clusters, 1))
|
267 |
+
centroids *= generator.uniform(size=(1, n_informative))
|
268 |
+
|
269 |
+
# Initially draw informative features from the standard normal
|
270 |
+
X[:, :n_informative] = generator.standard_normal(size=(n_samples, n_informative))
|
271 |
+
|
272 |
+
# Create each cluster; a variant of make_blobs
|
273 |
+
stop = 0
|
274 |
+
for k, centroid in enumerate(centroids):
|
275 |
+
start, stop = stop, stop + n_samples_per_cluster[k]
|
276 |
+
y[start:stop] = k % n_classes # assign labels
|
277 |
+
X_k = X[start:stop, :n_informative] # slice a view of the cluster
|
278 |
+
|
279 |
+
A = 2 * generator.uniform(size=(n_informative, n_informative)) - 1
|
280 |
+
X_k[...] = np.dot(X_k, A) # introduce random covariance
|
281 |
+
|
282 |
+
X_k += centroid # shift the cluster to a vertex
|
283 |
+
|
284 |
+
# Create redundant features
|
285 |
+
if n_redundant > 0:
|
286 |
+
B = 2 * generator.uniform(size=(n_informative, n_redundant)) - 1
|
287 |
+
X[:, n_informative : n_informative + n_redundant] = np.dot(
|
288 |
+
X[:, :n_informative], B
|
289 |
+
)
|
290 |
+
|
291 |
+
# Repeat some features
|
292 |
+
if n_repeated > 0:
|
293 |
+
n = n_informative + n_redundant
|
294 |
+
indices = ((n - 1) * generator.uniform(size=n_repeated) + 0.5).astype(np.intp)
|
295 |
+
X[:, n : n + n_repeated] = X[:, indices]
|
296 |
+
|
297 |
+
# Fill useless features
|
298 |
+
if n_useless > 0:
|
299 |
+
X[:, -n_useless:] = generator.standard_normal(size=(n_samples, n_useless))
|
300 |
+
|
301 |
+
# Randomly replace labels
|
302 |
+
if flip_y >= 0.0:
|
303 |
+
flip_mask = generator.uniform(size=n_samples) < flip_y
|
304 |
+
y[flip_mask] = generator.randint(n_classes, size=flip_mask.sum())
|
305 |
+
|
306 |
+
# Randomly shift and scale
|
307 |
+
if shift is None:
|
308 |
+
shift = (2 * generator.uniform(size=n_features) - 1) * class_sep
|
309 |
+
X += shift
|
310 |
+
|
311 |
+
if scale is None:
|
312 |
+
scale = 1 + 100 * generator.uniform(size=n_features)
|
313 |
+
X *= scale
|
314 |
+
|
315 |
+
if shuffle:
|
316 |
+
# Randomly permute samples
|
317 |
+
X, y = util_shuffle(X, y, random_state=generator)
|
318 |
+
|
319 |
+
# Randomly permute features
|
320 |
+
indices = np.arange(n_features)
|
321 |
+
generator.shuffle(indices)
|
322 |
+
X[:, :] = X[:, indices]
|
323 |
+
|
324 |
+
return X, y
|
325 |
+
|
326 |
+
|
327 |
+
@validate_params(
|
328 |
+
{
|
329 |
+
"n_samples": [Interval(Integral, 1, None, closed="left")],
|
330 |
+
"n_features": [Interval(Integral, 1, None, closed="left")],
|
331 |
+
"n_classes": [Interval(Integral, 1, None, closed="left")],
|
332 |
+
"n_labels": [Interval(Integral, 0, None, closed="left")],
|
333 |
+
"length": [Interval(Integral, 1, None, closed="left")],
|
334 |
+
"allow_unlabeled": ["boolean"],
|
335 |
+
"sparse": ["boolean"],
|
336 |
+
"return_indicator": [StrOptions({"dense", "sparse"}), "boolean"],
|
337 |
+
"return_distributions": ["boolean"],
|
338 |
+
"random_state": ["random_state"],
|
339 |
+
},
|
340 |
+
prefer_skip_nested_validation=True,
|
341 |
+
)
|
342 |
+
def make_multilabel_classification(
|
343 |
+
n_samples=100,
|
344 |
+
n_features=20,
|
345 |
+
*,
|
346 |
+
n_classes=5,
|
347 |
+
n_labels=2,
|
348 |
+
length=50,
|
349 |
+
allow_unlabeled=True,
|
350 |
+
sparse=False,
|
351 |
+
return_indicator="dense",
|
352 |
+
return_distributions=False,
|
353 |
+
random_state=None,
|
354 |
+
):
|
355 |
+
"""Generate a random multilabel classification problem.
|
356 |
+
|
357 |
+
For each sample, the generative process is:
|
358 |
+
- pick the number of labels: n ~ Poisson(n_labels)
|
359 |
+
- n times, choose a class c: c ~ Multinomial(theta)
|
360 |
+
- pick the document length: k ~ Poisson(length)
|
361 |
+
- k times, choose a word: w ~ Multinomial(theta_c)
|
362 |
+
|
363 |
+
In the above process, rejection sampling is used to make sure that
|
364 |
+
n is never zero or more than `n_classes`, and that the document length
|
365 |
+
is never zero. Likewise, we reject classes which have already been chosen.
|
366 |
+
|
367 |
+
For an example of usage, see
|
368 |
+
:ref:`sphx_glr_auto_examples_datasets_plot_random_multilabel_dataset.py`.
|
369 |
+
|
370 |
+
Read more in the :ref:`User Guide <sample_generators>`.
|
371 |
+
|
372 |
+
Parameters
|
373 |
+
----------
|
374 |
+
n_samples : int, default=100
|
375 |
+
The number of samples.
|
376 |
+
|
377 |
+
n_features : int, default=20
|
378 |
+
The total number of features.
|
379 |
+
|
380 |
+
n_classes : int, default=5
|
381 |
+
The number of classes of the classification problem.
|
382 |
+
|
383 |
+
n_labels : int, default=2
|
384 |
+
The average number of labels per instance. More precisely, the number
|
385 |
+
of labels per sample is drawn from a Poisson distribution with
|
386 |
+
``n_labels`` as its expected value, but samples are bounded (using
|
387 |
+
rejection sampling) by ``n_classes``, and must be nonzero if
|
388 |
+
``allow_unlabeled`` is False.
|
389 |
+
|
390 |
+
length : int, default=50
|
391 |
+
The sum of the features (number of words if documents) is drawn from
|
392 |
+
a Poisson distribution with this expected value.
|
393 |
+
|
394 |
+
allow_unlabeled : bool, default=True
|
395 |
+
If ``True``, some instances might not belong to any class.
|
396 |
+
|
397 |
+
sparse : bool, default=False
|
398 |
+
If ``True``, return a sparse feature matrix.
|
399 |
+
|
400 |
+
.. versionadded:: 0.17
|
401 |
+
parameter to allow *sparse* output.
|
402 |
+
|
403 |
+
return_indicator : {'dense', 'sparse'} or False, default='dense'
|
404 |
+
If ``'dense'`` return ``Y`` in the dense binary indicator format. If
|
405 |
+
``'sparse'`` return ``Y`` in the sparse binary indicator format.
|
406 |
+
``False`` returns a list of lists of labels.
|
407 |
+
|
408 |
+
return_distributions : bool, default=False
|
409 |
+
If ``True``, return the prior class probability and conditional
|
410 |
+
probabilities of features given classes, from which the data was
|
411 |
+
drawn.
|
412 |
+
|
413 |
+
random_state : int, RandomState instance or None, default=None
|
414 |
+
Determines random number generation for dataset creation. Pass an int
|
415 |
+
for reproducible output across multiple function calls.
|
416 |
+
See :term:`Glossary <random_state>`.
|
417 |
+
|
418 |
+
Returns
|
419 |
+
-------
|
420 |
+
X : ndarray of shape (n_samples, n_features)
|
421 |
+
The generated samples.
|
422 |
+
|
423 |
+
Y : {ndarray, sparse matrix} of shape (n_samples, n_classes)
|
424 |
+
The label sets. Sparse matrix should be of CSR format.
|
425 |
+
|
426 |
+
p_c : ndarray of shape (n_classes,)
|
427 |
+
The probability of each class being drawn. Only returned if
|
428 |
+
``return_distributions=True``.
|
429 |
+
|
430 |
+
p_w_c : ndarray of shape (n_features, n_classes)
|
431 |
+
The probability of each feature being drawn given each class.
|
432 |
+
Only returned if ``return_distributions=True``.
|
433 |
+
|
434 |
+
Examples
|
435 |
+
--------
|
436 |
+
>>> from sklearn.datasets import make_multilabel_classification
|
437 |
+
>>> X, y = make_multilabel_classification(n_labels=3, random_state=42)
|
438 |
+
>>> X.shape
|
439 |
+
(100, 20)
|
440 |
+
>>> y.shape
|
441 |
+
(100, 5)
|
442 |
+
>>> list(y[:3])
|
443 |
+
[array([1, 1, 0, 1, 0]), array([0, 1, 1, 1, 0]), array([0, 1, 0, 0, 0])]
|
444 |
+
"""
|
445 |
+
|
446 |
+
generator = check_random_state(random_state)
|
447 |
+
p_c = generator.uniform(size=n_classes)
|
448 |
+
p_c /= p_c.sum()
|
449 |
+
cumulative_p_c = np.cumsum(p_c)
|
450 |
+
p_w_c = generator.uniform(size=(n_features, n_classes))
|
451 |
+
p_w_c /= np.sum(p_w_c, axis=0)
|
452 |
+
|
453 |
+
def sample_example():
|
454 |
+
_, n_classes = p_w_c.shape
|
455 |
+
|
456 |
+
# pick a nonzero number of labels per document by rejection sampling
|
457 |
+
y_size = n_classes + 1
|
458 |
+
while (not allow_unlabeled and y_size == 0) or y_size > n_classes:
|
459 |
+
y_size = generator.poisson(n_labels)
|
460 |
+
|
461 |
+
# pick n classes
|
462 |
+
y = set()
|
463 |
+
while len(y) != y_size:
|
464 |
+
# pick a class with probability P(c)
|
465 |
+
c = np.searchsorted(cumulative_p_c, generator.uniform(size=y_size - len(y)))
|
466 |
+
y.update(c)
|
467 |
+
y = list(y)
|
468 |
+
|
469 |
+
# pick a non-zero document length by rejection sampling
|
470 |
+
n_words = 0
|
471 |
+
while n_words == 0:
|
472 |
+
n_words = generator.poisson(length)
|
473 |
+
|
474 |
+
# generate a document of length n_words
|
475 |
+
if len(y) == 0:
|
476 |
+
# if sample does not belong to any class, generate noise word
|
477 |
+
words = generator.randint(n_features, size=n_words)
|
478 |
+
return words, y
|
479 |
+
|
480 |
+
# sample words with replacement from selected classes
|
481 |
+
cumulative_p_w_sample = p_w_c.take(y, axis=1).sum(axis=1).cumsum()
|
482 |
+
cumulative_p_w_sample /= cumulative_p_w_sample[-1]
|
483 |
+
words = np.searchsorted(cumulative_p_w_sample, generator.uniform(size=n_words))
|
484 |
+
return words, y
|
485 |
+
|
486 |
+
X_indices = array.array("i")
|
487 |
+
X_indptr = array.array("i", [0])
|
488 |
+
Y = []
|
489 |
+
for i in range(n_samples):
|
490 |
+
words, y = sample_example()
|
491 |
+
X_indices.extend(words)
|
492 |
+
X_indptr.append(len(X_indices))
|
493 |
+
Y.append(y)
|
494 |
+
X_data = np.ones(len(X_indices), dtype=np.float64)
|
495 |
+
X = sp.csr_matrix((X_data, X_indices, X_indptr), shape=(n_samples, n_features))
|
496 |
+
X.sum_duplicates()
|
497 |
+
if not sparse:
|
498 |
+
X = X.toarray()
|
499 |
+
|
500 |
+
# return_indicator can be True due to backward compatibility
|
501 |
+
if return_indicator in (True, "sparse", "dense"):
|
502 |
+
lb = MultiLabelBinarizer(sparse_output=(return_indicator == "sparse"))
|
503 |
+
Y = lb.fit([range(n_classes)]).transform(Y)
|
504 |
+
if return_distributions:
|
505 |
+
return X, Y, p_c, p_w_c
|
506 |
+
return X, Y
|
507 |
+
|
508 |
+
|
509 |
+
@validate_params(
|
510 |
+
{
|
511 |
+
"n_samples": [Interval(Integral, 1, None, closed="left")],
|
512 |
+
"random_state": ["random_state"],
|
513 |
+
},
|
514 |
+
prefer_skip_nested_validation=True,
|
515 |
+
)
|
516 |
+
def make_hastie_10_2(n_samples=12000, *, random_state=None):
|
517 |
+
"""Generate data for binary classification used in Hastie et al. 2009, Example 10.2.
|
518 |
+
|
519 |
+
The ten features are standard independent Gaussian and
|
520 |
+
the target ``y`` is defined by::
|
521 |
+
|
522 |
+
y[i] = 1 if np.sum(X[i] ** 2) > 9.34 else -1
|
523 |
+
|
524 |
+
Read more in the :ref:`User Guide <sample_generators>`.
|
525 |
+
|
526 |
+
Parameters
|
527 |
+
----------
|
528 |
+
n_samples : int, default=12000
|
529 |
+
The number of samples.
|
530 |
+
|
531 |
+
random_state : int, RandomState instance or None, default=None
|
532 |
+
Determines random number generation for dataset creation. Pass an int
|
533 |
+
for reproducible output across multiple function calls.
|
534 |
+
See :term:`Glossary <random_state>`.
|
535 |
+
|
536 |
+
Returns
|
537 |
+
-------
|
538 |
+
X : ndarray of shape (n_samples, 10)
|
539 |
+
The input samples.
|
540 |
+
|
541 |
+
y : ndarray of shape (n_samples,)
|
542 |
+
The output values.
|
543 |
+
|
544 |
+
See Also
|
545 |
+
--------
|
546 |
+
make_gaussian_quantiles : A generalization of this dataset approach.
|
547 |
+
|
548 |
+
References
|
549 |
+
----------
|
550 |
+
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
|
551 |
+
Learning Ed. 2", Springer, 2009.
|
552 |
+
"""
|
553 |
+
rs = check_random_state(random_state)
|
554 |
+
|
555 |
+
shape = (n_samples, 10)
|
556 |
+
X = rs.normal(size=shape).reshape(shape)
|
557 |
+
y = ((X**2.0).sum(axis=1) > 9.34).astype(np.float64, copy=False)
|
558 |
+
y[y == 0.0] = -1.0
|
559 |
+
|
560 |
+
return X, y
|
561 |
+
|
562 |
+
|
563 |
+
@validate_params(
|
564 |
+
{
|
565 |
+
"n_samples": [Interval(Integral, 1, None, closed="left")],
|
566 |
+
"n_features": [Interval(Integral, 1, None, closed="left")],
|
567 |
+
"n_informative": [Interval(Integral, 0, None, closed="left")],
|
568 |
+
"n_targets": [Interval(Integral, 1, None, closed="left")],
|
569 |
+
"bias": [Interval(Real, None, None, closed="neither")],
|
570 |
+
"effective_rank": [Interval(Integral, 1, None, closed="left"), None],
|
571 |
+
"tail_strength": [Interval(Real, 0, 1, closed="both")],
|
572 |
+
"noise": [Interval(Real, 0, None, closed="left")],
|
573 |
+
"shuffle": ["boolean"],
|
574 |
+
"coef": ["boolean"],
|
575 |
+
"random_state": ["random_state"],
|
576 |
+
},
|
577 |
+
prefer_skip_nested_validation=True,
|
578 |
+
)
|
579 |
+
def make_regression(
|
580 |
+
n_samples=100,
|
581 |
+
n_features=100,
|
582 |
+
*,
|
583 |
+
n_informative=10,
|
584 |
+
n_targets=1,
|
585 |
+
bias=0.0,
|
586 |
+
effective_rank=None,
|
587 |
+
tail_strength=0.5,
|
588 |
+
noise=0.0,
|
589 |
+
shuffle=True,
|
590 |
+
coef=False,
|
591 |
+
random_state=None,
|
592 |
+
):
|
593 |
+
"""Generate a random regression problem.
|
594 |
+
|
595 |
+
The input set can either be well conditioned (by default) or have a low
|
596 |
+
rank-fat tail singular profile. See :func:`make_low_rank_matrix` for
|
597 |
+
more details.
|
598 |
+
|
599 |
+
The output is generated by applying a (potentially biased) random linear
|
600 |
+
regression model with `n_informative` nonzero regressors to the previously
|
601 |
+
generated input and some gaussian centered noise with some adjustable
|
602 |
+
scale.
|
603 |
+
|
604 |
+
Read more in the :ref:`User Guide <sample_generators>`.
|
605 |
+
|
606 |
+
Parameters
|
607 |
+
----------
|
608 |
+
n_samples : int, default=100
|
609 |
+
The number of samples.
|
610 |
+
|
611 |
+
n_features : int, default=100
|
612 |
+
The number of features.
|
613 |
+
|
614 |
+
n_informative : int, default=10
|
615 |
+
The number of informative features, i.e., the number of features used
|
616 |
+
to build the linear model used to generate the output.
|
617 |
+
|
618 |
+
n_targets : int, default=1
|
619 |
+
The number of regression targets, i.e., the dimension of the y output
|
620 |
+
vector associated with a sample. By default, the output is a scalar.
|
621 |
+
|
622 |
+
bias : float, default=0.0
|
623 |
+
The bias term in the underlying linear model.
|
624 |
+
|
625 |
+
effective_rank : int, default=None
|
626 |
+
If not None:
|
627 |
+
The approximate number of singular vectors required to explain most
|
628 |
+
of the input data by linear combinations. Using this kind of
|
629 |
+
singular spectrum in the input allows the generator to reproduce
|
630 |
+
the correlations often observed in practice.
|
631 |
+
If None:
|
632 |
+
The input set is well conditioned, centered and gaussian with
|
633 |
+
unit variance.
|
634 |
+
|
635 |
+
tail_strength : float, default=0.5
|
636 |
+
The relative importance of the fat noisy tail of the singular values
|
637 |
+
profile if `effective_rank` is not None. When a float, it should be
|
638 |
+
between 0 and 1.
|
639 |
+
|
640 |
+
noise : float, default=0.0
|
641 |
+
The standard deviation of the gaussian noise applied to the output.
|
642 |
+
|
643 |
+
shuffle : bool, default=True
|
644 |
+
Shuffle the samples and the features.
|
645 |
+
|
646 |
+
coef : bool, default=False
|
647 |
+
If True, the coefficients of the underlying linear model are returned.
|
648 |
+
|
649 |
+
random_state : int, RandomState instance or None, default=None
|
650 |
+
Determines random number generation for dataset creation. Pass an int
|
651 |
+
for reproducible output across multiple function calls.
|
652 |
+
See :term:`Glossary <random_state>`.
|
653 |
+
|
654 |
+
Returns
|
655 |
+
-------
|
656 |
+
X : ndarray of shape (n_samples, n_features)
|
657 |
+
The input samples.
|
658 |
+
|
659 |
+
y : ndarray of shape (n_samples,) or (n_samples, n_targets)
|
660 |
+
The output values.
|
661 |
+
|
662 |
+
coef : ndarray of shape (n_features,) or (n_features, n_targets)
|
663 |
+
The coefficient of the underlying linear model. It is returned only if
|
664 |
+
coef is True.
|
665 |
+
|
666 |
+
Examples
|
667 |
+
--------
|
668 |
+
>>> from sklearn.datasets import make_regression
|
669 |
+
>>> X, y = make_regression(n_samples=5, n_features=2, noise=1, random_state=42)
|
670 |
+
>>> X
|
671 |
+
array([[ 0.4967..., -0.1382... ],
|
672 |
+
[ 0.6476..., 1.523...],
|
673 |
+
[-0.2341..., -0.2341...],
|
674 |
+
[-0.4694..., 0.5425...],
|
675 |
+
[ 1.579..., 0.7674...]])
|
676 |
+
>>> y
|
677 |
+
array([ 6.737..., 37.79..., -10.27..., 0.4017..., 42.22...])
|
678 |
+
"""
|
679 |
+
n_informative = min(n_features, n_informative)
|
680 |
+
generator = check_random_state(random_state)
|
681 |
+
|
682 |
+
if effective_rank is None:
|
683 |
+
# Randomly generate a well conditioned input set
|
684 |
+
X = generator.standard_normal(size=(n_samples, n_features))
|
685 |
+
|
686 |
+
else:
|
687 |
+
# Randomly generate a low rank, fat tail input set
|
688 |
+
X = make_low_rank_matrix(
|
689 |
+
n_samples=n_samples,
|
690 |
+
n_features=n_features,
|
691 |
+
effective_rank=effective_rank,
|
692 |
+
tail_strength=tail_strength,
|
693 |
+
random_state=generator,
|
694 |
+
)
|
695 |
+
|
696 |
+
# Generate a ground truth model with only n_informative features being non
|
697 |
+
# zeros (the other features are not correlated to y and should be ignored
|
698 |
+
# by a sparsifying regularizers such as L1 or elastic net)
|
699 |
+
ground_truth = np.zeros((n_features, n_targets))
|
700 |
+
ground_truth[:n_informative, :] = 100 * generator.uniform(
|
701 |
+
size=(n_informative, n_targets)
|
702 |
+
)
|
703 |
+
|
704 |
+
y = np.dot(X, ground_truth) + bias
|
705 |
+
|
706 |
+
# Add noise
|
707 |
+
if noise > 0.0:
|
708 |
+
y += generator.normal(scale=noise, size=y.shape)
|
709 |
+
|
710 |
+
# Randomly permute samples and features
|
711 |
+
if shuffle:
|
712 |
+
X, y = util_shuffle(X, y, random_state=generator)
|
713 |
+
|
714 |
+
indices = np.arange(n_features)
|
715 |
+
generator.shuffle(indices)
|
716 |
+
X[:, :] = X[:, indices]
|
717 |
+
ground_truth = ground_truth[indices]
|
718 |
+
|
719 |
+
y = np.squeeze(y)
|
720 |
+
|
721 |
+
if coef:
|
722 |
+
return X, y, np.squeeze(ground_truth)
|
723 |
+
|
724 |
+
else:
|
725 |
+
return X, y
|
726 |
+
|
727 |
+
|
728 |
+
@validate_params(
|
729 |
+
{
|
730 |
+
"n_samples": [Interval(Integral, 0, None, closed="left"), tuple],
|
731 |
+
"shuffle": ["boolean"],
|
732 |
+
"noise": [Interval(Real, 0, None, closed="left"), None],
|
733 |
+
"random_state": ["random_state"],
|
734 |
+
"factor": [Interval(Real, 0, 1, closed="left")],
|
735 |
+
},
|
736 |
+
prefer_skip_nested_validation=True,
|
737 |
+
)
|
738 |
+
def make_circles(
|
739 |
+
n_samples=100, *, shuffle=True, noise=None, random_state=None, factor=0.8
|
740 |
+
):
|
741 |
+
"""Make a large circle containing a smaller circle in 2d.
|
742 |
+
|
743 |
+
A simple toy dataset to visualize clustering and classification
|
744 |
+
algorithms.
|
745 |
+
|
746 |
+
Read more in the :ref:`User Guide <sample_generators>`.
|
747 |
+
|
748 |
+
Parameters
|
749 |
+
----------
|
750 |
+
n_samples : int or tuple of shape (2,), dtype=int, default=100
|
751 |
+
If int, it is the total number of points generated.
|
752 |
+
For odd numbers, the inner circle will have one point more than the
|
753 |
+
outer circle.
|
754 |
+
If two-element tuple, number of points in outer circle and inner
|
755 |
+
circle.
|
756 |
+
|
757 |
+
.. versionchanged:: 0.23
|
758 |
+
Added two-element tuple.
|
759 |
+
|
760 |
+
shuffle : bool, default=True
|
761 |
+
Whether to shuffle the samples.
|
762 |
+
|
763 |
+
noise : float, default=None
|
764 |
+
Standard deviation of Gaussian noise added to the data.
|
765 |
+
|
766 |
+
random_state : int, RandomState instance or None, default=None
|
767 |
+
Determines random number generation for dataset shuffling and noise.
|
768 |
+
Pass an int for reproducible output across multiple function calls.
|
769 |
+
See :term:`Glossary <random_state>`.
|
770 |
+
|
771 |
+
factor : float, default=.8
|
772 |
+
Scale factor between inner and outer circle in the range `[0, 1)`.
|
773 |
+
|
774 |
+
Returns
|
775 |
+
-------
|
776 |
+
X : ndarray of shape (n_samples, 2)
|
777 |
+
The generated samples.
|
778 |
+
|
779 |
+
y : ndarray of shape (n_samples,)
|
780 |
+
The integer labels (0 or 1) for class membership of each sample.
|
781 |
+
|
782 |
+
Examples
|
783 |
+
--------
|
784 |
+
>>> from sklearn.datasets import make_circles
|
785 |
+
>>> X, y = make_circles(random_state=42)
|
786 |
+
>>> X.shape
|
787 |
+
(100, 2)
|
788 |
+
>>> y.shape
|
789 |
+
(100,)
|
790 |
+
>>> list(y[:5])
|
791 |
+
[1, 1, 1, 0, 0]
|
792 |
+
"""
|
793 |
+
if isinstance(n_samples, numbers.Integral):
|
794 |
+
n_samples_out = n_samples // 2
|
795 |
+
n_samples_in = n_samples - n_samples_out
|
796 |
+
else: # n_samples is a tuple
|
797 |
+
if len(n_samples) != 2:
|
798 |
+
raise ValueError("When a tuple, n_samples must have exactly two elements.")
|
799 |
+
n_samples_out, n_samples_in = n_samples
|
800 |
+
|
801 |
+
generator = check_random_state(random_state)
|
802 |
+
# so as not to have the first point = last point, we set endpoint=False
|
803 |
+
linspace_out = np.linspace(0, 2 * np.pi, n_samples_out, endpoint=False)
|
804 |
+
linspace_in = np.linspace(0, 2 * np.pi, n_samples_in, endpoint=False)
|
805 |
+
outer_circ_x = np.cos(linspace_out)
|
806 |
+
outer_circ_y = np.sin(linspace_out)
|
807 |
+
inner_circ_x = np.cos(linspace_in) * factor
|
808 |
+
inner_circ_y = np.sin(linspace_in) * factor
|
809 |
+
|
810 |
+
X = np.vstack(
|
811 |
+
[np.append(outer_circ_x, inner_circ_x), np.append(outer_circ_y, inner_circ_y)]
|
812 |
+
).T
|
813 |
+
y = np.hstack(
|
814 |
+
[np.zeros(n_samples_out, dtype=np.intp), np.ones(n_samples_in, dtype=np.intp)]
|
815 |
+
)
|
816 |
+
if shuffle:
|
817 |
+
X, y = util_shuffle(X, y, random_state=generator)
|
818 |
+
|
819 |
+
if noise is not None:
|
820 |
+
X += generator.normal(scale=noise, size=X.shape)
|
821 |
+
|
822 |
+
return X, y
|
823 |
+
|
824 |
+
|
825 |
+
@validate_params(
|
826 |
+
{
|
827 |
+
"n_samples": [Interval(Integral, 1, None, closed="left"), tuple],
|
828 |
+
"shuffle": ["boolean"],
|
829 |
+
"noise": [Interval(Real, 0, None, closed="left"), None],
|
830 |
+
"random_state": ["random_state"],
|
831 |
+
},
|
832 |
+
prefer_skip_nested_validation=True,
|
833 |
+
)
|
834 |
+
def make_moons(n_samples=100, *, shuffle=True, noise=None, random_state=None):
|
835 |
+
"""Make two interleaving half circles.
|
836 |
+
|
837 |
+
A simple toy dataset to visualize clustering and classification
|
838 |
+
algorithms. Read more in the :ref:`User Guide <sample_generators>`.
|
839 |
+
|
840 |
+
Parameters
|
841 |
+
----------
|
842 |
+
n_samples : int or tuple of shape (2,), dtype=int, default=100
|
843 |
+
If int, the total number of points generated.
|
844 |
+
If two-element tuple, number of points in each of two moons.
|
845 |
+
|
846 |
+
.. versionchanged:: 0.23
|
847 |
+
Added two-element tuple.
|
848 |
+
|
849 |
+
shuffle : bool, default=True
|
850 |
+
Whether to shuffle the samples.
|
851 |
+
|
852 |
+
noise : float, default=None
|
853 |
+
Standard deviation of Gaussian noise added to the data.
|
854 |
+
|
855 |
+
random_state : int, RandomState instance or None, default=None
|
856 |
+
Determines random number generation for dataset shuffling and noise.
|
857 |
+
Pass an int for reproducible output across multiple function calls.
|
858 |
+
See :term:`Glossary <random_state>`.
|
859 |
+
|
860 |
+
Returns
|
861 |
+
-------
|
862 |
+
X : ndarray of shape (n_samples, 2)
|
863 |
+
The generated samples.
|
864 |
+
|
865 |
+
y : ndarray of shape (n_samples,)
|
866 |
+
The integer labels (0 or 1) for class membership of each sample.
|
867 |
+
"""
|
868 |
+
|
869 |
+
if isinstance(n_samples, numbers.Integral):
|
870 |
+
n_samples_out = n_samples // 2
|
871 |
+
n_samples_in = n_samples - n_samples_out
|
872 |
+
else:
|
873 |
+
try:
|
874 |
+
n_samples_out, n_samples_in = n_samples
|
875 |
+
except ValueError as e:
|
876 |
+
raise ValueError(
|
877 |
+
"`n_samples` can be either an int or a two-element tuple."
|
878 |
+
) from e
|
879 |
+
|
880 |
+
generator = check_random_state(random_state)
|
881 |
+
|
882 |
+
outer_circ_x = np.cos(np.linspace(0, np.pi, n_samples_out))
|
883 |
+
outer_circ_y = np.sin(np.linspace(0, np.pi, n_samples_out))
|
884 |
+
inner_circ_x = 1 - np.cos(np.linspace(0, np.pi, n_samples_in))
|
885 |
+
inner_circ_y = 1 - np.sin(np.linspace(0, np.pi, n_samples_in)) - 0.5
|
886 |
+
|
887 |
+
X = np.vstack(
|
888 |
+
[np.append(outer_circ_x, inner_circ_x), np.append(outer_circ_y, inner_circ_y)]
|
889 |
+
).T
|
890 |
+
y = np.hstack(
|
891 |
+
[np.zeros(n_samples_out, dtype=np.intp), np.ones(n_samples_in, dtype=np.intp)]
|
892 |
+
)
|
893 |
+
|
894 |
+
if shuffle:
|
895 |
+
X, y = util_shuffle(X, y, random_state=generator)
|
896 |
+
|
897 |
+
if noise is not None:
|
898 |
+
X += generator.normal(scale=noise, size=X.shape)
|
899 |
+
|
900 |
+
return X, y
|
901 |
+
|
902 |
+
|
903 |
+
@validate_params(
|
904 |
+
{
|
905 |
+
"n_samples": [Interval(Integral, 1, None, closed="left"), "array-like"],
|
906 |
+
"n_features": [Interval(Integral, 1, None, closed="left")],
|
907 |
+
"centers": [Interval(Integral, 1, None, closed="left"), "array-like", None],
|
908 |
+
"cluster_std": [Interval(Real, 0, None, closed="left"), "array-like"],
|
909 |
+
"center_box": [tuple],
|
910 |
+
"shuffle": ["boolean"],
|
911 |
+
"random_state": ["random_state"],
|
912 |
+
"return_centers": ["boolean"],
|
913 |
+
},
|
914 |
+
prefer_skip_nested_validation=True,
|
915 |
+
)
|
916 |
+
def make_blobs(
|
917 |
+
n_samples=100,
|
918 |
+
n_features=2,
|
919 |
+
*,
|
920 |
+
centers=None,
|
921 |
+
cluster_std=1.0,
|
922 |
+
center_box=(-10.0, 10.0),
|
923 |
+
shuffle=True,
|
924 |
+
random_state=None,
|
925 |
+
return_centers=False,
|
926 |
+
):
|
927 |
+
"""Generate isotropic Gaussian blobs for clustering.
|
928 |
+
|
929 |
+
For an example of usage, see
|
930 |
+
:ref:`sphx_glr_auto_examples_datasets_plot_random_dataset.py`.
|
931 |
+
|
932 |
+
Read more in the :ref:`User Guide <sample_generators>`.
|
933 |
+
|
934 |
+
Parameters
|
935 |
+
----------
|
936 |
+
n_samples : int or array-like, default=100
|
937 |
+
If int, it is the total number of points equally divided among
|
938 |
+
clusters.
|
939 |
+
If array-like, each element of the sequence indicates
|
940 |
+
the number of samples per cluster.
|
941 |
+
|
942 |
+
.. versionchanged:: v0.20
|
943 |
+
one can now pass an array-like to the ``n_samples`` parameter
|
944 |
+
|
945 |
+
n_features : int, default=2
|
946 |
+
The number of features for each sample.
|
947 |
+
|
948 |
+
centers : int or array-like of shape (n_centers, n_features), default=None
|
949 |
+
The number of centers to generate, or the fixed center locations.
|
950 |
+
If n_samples is an int and centers is None, 3 centers are generated.
|
951 |
+
If n_samples is array-like, centers must be
|
952 |
+
either None or an array of length equal to the length of n_samples.
|
953 |
+
|
954 |
+
cluster_std : float or array-like of float, default=1.0
|
955 |
+
The standard deviation of the clusters.
|
956 |
+
|
957 |
+
center_box : tuple of float (min, max), default=(-10.0, 10.0)
|
958 |
+
The bounding box for each cluster center when centers are
|
959 |
+
generated at random.
|
960 |
+
|
961 |
+
shuffle : bool, default=True
|
962 |
+
Shuffle the samples.
|
963 |
+
|
964 |
+
random_state : int, RandomState instance or None, default=None
|
965 |
+
Determines random number generation for dataset creation. Pass an int
|
966 |
+
for reproducible output across multiple function calls.
|
967 |
+
See :term:`Glossary <random_state>`.
|
968 |
+
|
969 |
+
return_centers : bool, default=False
|
970 |
+
If True, then return the centers of each cluster.
|
971 |
+
|
972 |
+
.. versionadded:: 0.23
|
973 |
+
|
974 |
+
Returns
|
975 |
+
-------
|
976 |
+
X : ndarray of shape (n_samples, n_features)
|
977 |
+
The generated samples.
|
978 |
+
|
979 |
+
y : ndarray of shape (n_samples,)
|
980 |
+
The integer labels for cluster membership of each sample.
|
981 |
+
|
982 |
+
centers : ndarray of shape (n_centers, n_features)
|
983 |
+
The centers of each cluster. Only returned if
|
984 |
+
``return_centers=True``.
|
985 |
+
|
986 |
+
See Also
|
987 |
+
--------
|
988 |
+
make_classification : A more intricate variant.
|
989 |
+
|
990 |
+
Examples
|
991 |
+
--------
|
992 |
+
>>> from sklearn.datasets import make_blobs
|
993 |
+
>>> X, y = make_blobs(n_samples=10, centers=3, n_features=2,
|
994 |
+
... random_state=0)
|
995 |
+
>>> print(X.shape)
|
996 |
+
(10, 2)
|
997 |
+
>>> y
|
998 |
+
array([0, 0, 1, 0, 2, 2, 2, 1, 1, 0])
|
999 |
+
>>> X, y = make_blobs(n_samples=[3, 3, 4], centers=None, n_features=2,
|
1000 |
+
... random_state=0)
|
1001 |
+
>>> print(X.shape)
|
1002 |
+
(10, 2)
|
1003 |
+
>>> y
|
1004 |
+
array([0, 1, 2, 0, 2, 2, 2, 1, 1, 0])
|
1005 |
+
"""
|
1006 |
+
generator = check_random_state(random_state)
|
1007 |
+
|
1008 |
+
if isinstance(n_samples, numbers.Integral):
|
1009 |
+
# Set n_centers by looking at centers arg
|
1010 |
+
if centers is None:
|
1011 |
+
centers = 3
|
1012 |
+
|
1013 |
+
if isinstance(centers, numbers.Integral):
|
1014 |
+
n_centers = centers
|
1015 |
+
centers = generator.uniform(
|
1016 |
+
center_box[0], center_box[1], size=(n_centers, n_features)
|
1017 |
+
)
|
1018 |
+
|
1019 |
+
else:
|
1020 |
+
centers = check_array(centers)
|
1021 |
+
n_features = centers.shape[1]
|
1022 |
+
n_centers = centers.shape[0]
|
1023 |
+
|
1024 |
+
else:
|
1025 |
+
# Set n_centers by looking at [n_samples] arg
|
1026 |
+
n_centers = len(n_samples)
|
1027 |
+
if centers is None:
|
1028 |
+
centers = generator.uniform(
|
1029 |
+
center_box[0], center_box[1], size=(n_centers, n_features)
|
1030 |
+
)
|
1031 |
+
if not isinstance(centers, Iterable):
|
1032 |
+
raise ValueError(
|
1033 |
+
"Parameter `centers` must be array-like. Got {!r} instead".format(
|
1034 |
+
centers
|
1035 |
+
)
|
1036 |
+
)
|
1037 |
+
if len(centers) != n_centers:
|
1038 |
+
raise ValueError(
|
1039 |
+
"Length of `n_samples` not consistent with number of "
|
1040 |
+
f"centers. Got n_samples = {n_samples} and centers = {centers}"
|
1041 |
+
)
|
1042 |
+
centers = check_array(centers)
|
1043 |
+
n_features = centers.shape[1]
|
1044 |
+
|
1045 |
+
# stds: if cluster_std is given as list, it must be consistent
|
1046 |
+
# with the n_centers
|
1047 |
+
if hasattr(cluster_std, "__len__") and len(cluster_std) != n_centers:
|
1048 |
+
raise ValueError(
|
1049 |
+
"Length of `clusters_std` not consistent with "
|
1050 |
+
"number of centers. Got centers = {} "
|
1051 |
+
"and cluster_std = {}".format(centers, cluster_std)
|
1052 |
+
)
|
1053 |
+
|
1054 |
+
if isinstance(cluster_std, numbers.Real):
|
1055 |
+
cluster_std = np.full(len(centers), cluster_std)
|
1056 |
+
|
1057 |
+
if isinstance(n_samples, Iterable):
|
1058 |
+
n_samples_per_center = n_samples
|
1059 |
+
else:
|
1060 |
+
n_samples_per_center = [int(n_samples // n_centers)] * n_centers
|
1061 |
+
|
1062 |
+
for i in range(n_samples % n_centers):
|
1063 |
+
n_samples_per_center[i] += 1
|
1064 |
+
|
1065 |
+
cum_sum_n_samples = np.cumsum(n_samples_per_center)
|
1066 |
+
X = np.empty(shape=(sum(n_samples_per_center), n_features), dtype=np.float64)
|
1067 |
+
y = np.empty(shape=(sum(n_samples_per_center),), dtype=int)
|
1068 |
+
|
1069 |
+
for i, (n, std) in enumerate(zip(n_samples_per_center, cluster_std)):
|
1070 |
+
start_idx = cum_sum_n_samples[i - 1] if i > 0 else 0
|
1071 |
+
end_idx = cum_sum_n_samples[i]
|
1072 |
+
X[start_idx:end_idx] = generator.normal(
|
1073 |
+
loc=centers[i], scale=std, size=(n, n_features)
|
1074 |
+
)
|
1075 |
+
y[start_idx:end_idx] = i
|
1076 |
+
|
1077 |
+
if shuffle:
|
1078 |
+
X, y = util_shuffle(X, y, random_state=generator)
|
1079 |
+
|
1080 |
+
if return_centers:
|
1081 |
+
return X, y, centers
|
1082 |
+
else:
|
1083 |
+
return X, y
|
1084 |
+
|
1085 |
+
|
1086 |
+
@validate_params(
|
1087 |
+
{
|
1088 |
+
"n_samples": [Interval(Integral, 1, None, closed="left")],
|
1089 |
+
"n_features": [Interval(Integral, 5, None, closed="left")],
|
1090 |
+
"noise": [Interval(Real, 0.0, None, closed="left")],
|
1091 |
+
"random_state": ["random_state"],
|
1092 |
+
},
|
1093 |
+
prefer_skip_nested_validation=True,
|
1094 |
+
)
|
1095 |
+
def make_friedman1(n_samples=100, n_features=10, *, noise=0.0, random_state=None):
|
1096 |
+
"""Generate the "Friedman #1" regression problem.
|
1097 |
+
|
1098 |
+
This dataset is described in Friedman [1] and Breiman [2].
|
1099 |
+
|
1100 |
+
Inputs `X` are independent features uniformly distributed on the interval
|
1101 |
+
[0, 1]. The output `y` is created according to the formula::
|
1102 |
+
|
1103 |
+
y(X) = 10 * sin(pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
|
1104 |
+
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * N(0, 1).
|
1105 |
+
|
1106 |
+
Out of the `n_features` features, only 5 are actually used to compute
|
1107 |
+
`y`. The remaining features are independent of `y`.
|
1108 |
+
|
1109 |
+
The number of features has to be >= 5.
|
1110 |
+
|
1111 |
+
Read more in the :ref:`User Guide <sample_generators>`.
|
1112 |
+
|
1113 |
+
Parameters
|
1114 |
+
----------
|
1115 |
+
n_samples : int, default=100
|
1116 |
+
The number of samples.
|
1117 |
+
|
1118 |
+
n_features : int, default=10
|
1119 |
+
The number of features. Should be at least 5.
|
1120 |
+
|
1121 |
+
noise : float, default=0.0
|
1122 |
+
The standard deviation of the gaussian noise applied to the output.
|
1123 |
+
|
1124 |
+
random_state : int, RandomState instance or None, default=None
|
1125 |
+
Determines random number generation for dataset noise. Pass an int
|
1126 |
+
for reproducible output across multiple function calls.
|
1127 |
+
See :term:`Glossary <random_state>`.
|
1128 |
+
|
1129 |
+
Returns
|
1130 |
+
-------
|
1131 |
+
X : ndarray of shape (n_samples, n_features)
|
1132 |
+
The input samples.
|
1133 |
+
|
1134 |
+
y : ndarray of shape (n_samples,)
|
1135 |
+
The output values.
|
1136 |
+
|
1137 |
+
References
|
1138 |
+
----------
|
1139 |
+
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
|
1140 |
+
of Statistics 19 (1), pages 1-67, 1991.
|
1141 |
+
|
1142 |
+
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
|
1143 |
+
pages 123-140, 1996.
|
1144 |
+
|
1145 |
+
Examples
|
1146 |
+
--------
|
1147 |
+
>>> from sklearn.datasets import make_friedman1
|
1148 |
+
>>> X, y = make_friedman1(random_state=42)
|
1149 |
+
>>> X.shape
|
1150 |
+
(100, 10)
|
1151 |
+
>>> y.shape
|
1152 |
+
(100,)
|
1153 |
+
>>> list(y[:3])
|
1154 |
+
[16.8..., 5.8..., 9.4...]
|
1155 |
+
"""
|
1156 |
+
generator = check_random_state(random_state)
|
1157 |
+
|
1158 |
+
X = generator.uniform(size=(n_samples, n_features))
|
1159 |
+
y = (
|
1160 |
+
10 * np.sin(np.pi * X[:, 0] * X[:, 1])
|
1161 |
+
+ 20 * (X[:, 2] - 0.5) ** 2
|
1162 |
+
+ 10 * X[:, 3]
|
1163 |
+
+ 5 * X[:, 4]
|
1164 |
+
+ noise * generator.standard_normal(size=(n_samples))
|
1165 |
+
)
|
1166 |
+
|
1167 |
+
return X, y
|
1168 |
+
|
1169 |
+
|
1170 |
+
@validate_params(
|
1171 |
+
{
|
1172 |
+
"n_samples": [Interval(Integral, 1, None, closed="left")],
|
1173 |
+
"noise": [Interval(Real, 0, None, closed="left")],
|
1174 |
+
"random_state": ["random_state"],
|
1175 |
+
},
|
1176 |
+
prefer_skip_nested_validation=True,
|
1177 |
+
)
|
1178 |
+
def make_friedman2(n_samples=100, *, noise=0.0, random_state=None):
|
1179 |
+
"""Generate the "Friedman #2" regression problem.
|
1180 |
+
|
1181 |
+
This dataset is described in Friedman [1] and Breiman [2].
|
1182 |
+
|
1183 |
+
Inputs `X` are 4 independent features uniformly distributed on the
|
1184 |
+
intervals::
|
1185 |
+
|
1186 |
+
0 <= X[:, 0] <= 100,
|
1187 |
+
40 * pi <= X[:, 1] <= 560 * pi,
|
1188 |
+
0 <= X[:, 2] <= 1,
|
1189 |
+
1 <= X[:, 3] <= 11.
|
1190 |
+
|
1191 |
+
The output `y` is created according to the formula::
|
1192 |
+
|
1193 |
+
y(X) = (X[:, 0] ** 2 + (X[:, 1] * X[:, 2] \
|
1194 |
+
- 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 + noise * N(0, 1).
|
1195 |
+
|
1196 |
+
Read more in the :ref:`User Guide <sample_generators>`.
|
1197 |
+
|
1198 |
+
Parameters
|
1199 |
+
----------
|
1200 |
+
n_samples : int, default=100
|
1201 |
+
The number of samples.
|
1202 |
+
|
1203 |
+
noise : float, default=0.0
|
1204 |
+
The standard deviation of the gaussian noise applied to the output.
|
1205 |
+
|
1206 |
+
random_state : int, RandomState instance or None, default=None
|
1207 |
+
Determines random number generation for dataset noise. Pass an int
|
1208 |
+
for reproducible output across multiple function calls.
|
1209 |
+
See :term:`Glossary <random_state>`.
|
1210 |
+
|
1211 |
+
Returns
|
1212 |
+
-------
|
1213 |
+
X : ndarray of shape (n_samples, 4)
|
1214 |
+
The input samples.
|
1215 |
+
|
1216 |
+
y : ndarray of shape (n_samples,)
|
1217 |
+
The output values.
|
1218 |
+
|
1219 |
+
References
|
1220 |
+
----------
|
1221 |
+
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
|
1222 |
+
of Statistics 19 (1), pages 1-67, 1991.
|
1223 |
+
|
1224 |
+
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
|
1225 |
+
pages 123-140, 1996.
|
1226 |
+
|
1227 |
+
Examples
|
1228 |
+
--------
|
1229 |
+
>>> from sklearn.datasets import make_friedman2
|
1230 |
+
>>> X, y = make_friedman2(random_state=42)
|
1231 |
+
>>> X.shape
|
1232 |
+
(100, 4)
|
1233 |
+
>>> y.shape
|
1234 |
+
(100,)
|
1235 |
+
>>> list(y[:3])
|
1236 |
+
[1229.4..., 27.0..., 65.6...]
|
1237 |
+
"""
|
1238 |
+
generator = check_random_state(random_state)
|
1239 |
+
|
1240 |
+
X = generator.uniform(size=(n_samples, 4))
|
1241 |
+
X[:, 0] *= 100
|
1242 |
+
X[:, 1] *= 520 * np.pi
|
1243 |
+
X[:, 1] += 40 * np.pi
|
1244 |
+
X[:, 3] *= 10
|
1245 |
+
X[:, 3] += 1
|
1246 |
+
|
1247 |
+
y = (
|
1248 |
+
X[:, 0] ** 2 + (X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) ** 2
|
1249 |
+
) ** 0.5 + noise * generator.standard_normal(size=(n_samples))
|
1250 |
+
|
1251 |
+
return X, y
|
1252 |
+
|
1253 |
+
|
1254 |
+
@validate_params(
|
1255 |
+
{
|
1256 |
+
"n_samples": [Interval(Integral, 1, None, closed="left")],
|
1257 |
+
"noise": [Interval(Real, 0, None, closed="left")],
|
1258 |
+
"random_state": ["random_state"],
|
1259 |
+
},
|
1260 |
+
prefer_skip_nested_validation=True,
|
1261 |
+
)
|
1262 |
+
def make_friedman3(n_samples=100, *, noise=0.0, random_state=None):
|
1263 |
+
"""Generate the "Friedman #3" regression problem.
|
1264 |
+
|
1265 |
+
This dataset is described in Friedman [1] and Breiman [2].
|
1266 |
+
|
1267 |
+
Inputs `X` are 4 independent features uniformly distributed on the
|
1268 |
+
intervals::
|
1269 |
+
|
1270 |
+
0 <= X[:, 0] <= 100,
|
1271 |
+
40 * pi <= X[:, 1] <= 560 * pi,
|
1272 |
+
0 <= X[:, 2] <= 1,
|
1273 |
+
1 <= X[:, 3] <= 11.
|
1274 |
+
|
1275 |
+
The output `y` is created according to the formula::
|
1276 |
+
|
1277 |
+
y(X) = arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) \
|
1278 |
+
/ X[:, 0]) + noise * N(0, 1).
|
1279 |
+
|
1280 |
+
Read more in the :ref:`User Guide <sample_generators>`.
|
1281 |
+
|
1282 |
+
Parameters
|
1283 |
+
----------
|
1284 |
+
n_samples : int, default=100
|
1285 |
+
The number of samples.
|
1286 |
+
|
1287 |
+
noise : float, default=0.0
|
1288 |
+
The standard deviation of the gaussian noise applied to the output.
|
1289 |
+
|
1290 |
+
random_state : int, RandomState instance or None, default=None
|
1291 |
+
Determines random number generation for dataset noise. Pass an int
|
1292 |
+
for reproducible output across multiple function calls.
|
1293 |
+
See :term:`Glossary <random_state>`.
|
1294 |
+
|
1295 |
+
Returns
|
1296 |
+
-------
|
1297 |
+
X : ndarray of shape (n_samples, 4)
|
1298 |
+
The input samples.
|
1299 |
+
|
1300 |
+
y : ndarray of shape (n_samples,)
|
1301 |
+
The output values.
|
1302 |
+
|
1303 |
+
References
|
1304 |
+
----------
|
1305 |
+
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
|
1306 |
+
of Statistics 19 (1), pages 1-67, 1991.
|
1307 |
+
|
1308 |
+
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
|
1309 |
+
pages 123-140, 1996.
|
1310 |
+
|
1311 |
+
Examples
|
1312 |
+
--------
|
1313 |
+
>>> from sklearn.datasets import make_friedman3
|
1314 |
+
>>> X, y = make_friedman3(random_state=42)
|
1315 |
+
>>> X.shape
|
1316 |
+
(100, 4)
|
1317 |
+
>>> y.shape
|
1318 |
+
(100,)
|
1319 |
+
>>> list(y[:3])
|
1320 |
+
[1.5..., 0.9..., 0.4...]
|
1321 |
+
"""
|
1322 |
+
generator = check_random_state(random_state)
|
1323 |
+
|
1324 |
+
X = generator.uniform(size=(n_samples, 4))
|
1325 |
+
X[:, 0] *= 100
|
1326 |
+
X[:, 1] *= 520 * np.pi
|
1327 |
+
X[:, 1] += 40 * np.pi
|
1328 |
+
X[:, 3] *= 10
|
1329 |
+
X[:, 3] += 1
|
1330 |
+
|
1331 |
+
y = np.arctan(
|
1332 |
+
(X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) / X[:, 0]
|
1333 |
+
) + noise * generator.standard_normal(size=(n_samples))
|
1334 |
+
|
1335 |
+
return X, y
|
1336 |
+
|
1337 |
+
|
1338 |
+
@validate_params(
|
1339 |
+
{
|
1340 |
+
"n_samples": [Interval(Integral, 1, None, closed="left")],
|
1341 |
+
"n_features": [Interval(Integral, 1, None, closed="left")],
|
1342 |
+
"effective_rank": [Interval(Integral, 1, None, closed="left")],
|
1343 |
+
"tail_strength": [Interval(Real, 0, 1, closed="both")],
|
1344 |
+
"random_state": ["random_state"],
|
1345 |
+
},
|
1346 |
+
prefer_skip_nested_validation=True,
|
1347 |
+
)
|
1348 |
+
def make_low_rank_matrix(
|
1349 |
+
n_samples=100,
|
1350 |
+
n_features=100,
|
1351 |
+
*,
|
1352 |
+
effective_rank=10,
|
1353 |
+
tail_strength=0.5,
|
1354 |
+
random_state=None,
|
1355 |
+
):
|
1356 |
+
"""Generate a mostly low rank matrix with bell-shaped singular values.
|
1357 |
+
|
1358 |
+
Most of the variance can be explained by a bell-shaped curve of width
|
1359 |
+
effective_rank: the low rank part of the singular values profile is::
|
1360 |
+
|
1361 |
+
(1 - tail_strength) * exp(-1.0 * (i / effective_rank) ** 2)
|
1362 |
+
|
1363 |
+
The remaining singular values' tail is fat, decreasing as::
|
1364 |
+
|
1365 |
+
tail_strength * exp(-0.1 * i / effective_rank).
|
1366 |
+
|
1367 |
+
The low rank part of the profile can be considered the structured
|
1368 |
+
signal part of the data while the tail can be considered the noisy
|
1369 |
+
part of the data that cannot be summarized by a low number of linear
|
1370 |
+
components (singular vectors).
|
1371 |
+
|
1372 |
+
This kind of singular profiles is often seen in practice, for instance:
|
1373 |
+
- gray level pictures of faces
|
1374 |
+
- TF-IDF vectors of text documents crawled from the web
|
1375 |
+
|
1376 |
+
Read more in the :ref:`User Guide <sample_generators>`.
|
1377 |
+
|
1378 |
+
Parameters
|
1379 |
+
----------
|
1380 |
+
n_samples : int, default=100
|
1381 |
+
The number of samples.
|
1382 |
+
|
1383 |
+
n_features : int, default=100
|
1384 |
+
The number of features.
|
1385 |
+
|
1386 |
+
effective_rank : int, default=10
|
1387 |
+
The approximate number of singular vectors required to explain most of
|
1388 |
+
the data by linear combinations.
|
1389 |
+
|
1390 |
+
tail_strength : float, default=0.5
|
1391 |
+
The relative importance of the fat noisy tail of the singular values
|
1392 |
+
profile. The value should be between 0 and 1.
|
1393 |
+
|
1394 |
+
random_state : int, RandomState instance or None, default=None
|
1395 |
+
Determines random number generation for dataset creation. Pass an int
|
1396 |
+
for reproducible output across multiple function calls.
|
1397 |
+
See :term:`Glossary <random_state>`.
|
1398 |
+
|
1399 |
+
Returns
|
1400 |
+
-------
|
1401 |
+
X : ndarray of shape (n_samples, n_features)
|
1402 |
+
The matrix.
|
1403 |
+
"""
|
1404 |
+
generator = check_random_state(random_state)
|
1405 |
+
n = min(n_samples, n_features)
|
1406 |
+
|
1407 |
+
# Random (ortho normal) vectors
|
1408 |
+
u, _ = linalg.qr(
|
1409 |
+
generator.standard_normal(size=(n_samples, n)),
|
1410 |
+
mode="economic",
|
1411 |
+
check_finite=False,
|
1412 |
+
)
|
1413 |
+
v, _ = linalg.qr(
|
1414 |
+
generator.standard_normal(size=(n_features, n)),
|
1415 |
+
mode="economic",
|
1416 |
+
check_finite=False,
|
1417 |
+
)
|
1418 |
+
|
1419 |
+
# Index of the singular values
|
1420 |
+
singular_ind = np.arange(n, dtype=np.float64)
|
1421 |
+
|
1422 |
+
# Build the singular profile by assembling signal and noise components
|
1423 |
+
low_rank = (1 - tail_strength) * np.exp(-1.0 * (singular_ind / effective_rank) ** 2)
|
1424 |
+
tail = tail_strength * np.exp(-0.1 * singular_ind / effective_rank)
|
1425 |
+
s = np.identity(n) * (low_rank + tail)
|
1426 |
+
|
1427 |
+
return np.dot(np.dot(u, s), v.T)
|
1428 |
+
|
1429 |
+
|
1430 |
+
@validate_params(
|
1431 |
+
{
|
1432 |
+
"n_samples": [Interval(Integral, 1, None, closed="left")],
|
1433 |
+
"n_components": [Interval(Integral, 1, None, closed="left")],
|
1434 |
+
"n_features": [Interval(Integral, 1, None, closed="left")],
|
1435 |
+
"n_nonzero_coefs": [Interval(Integral, 1, None, closed="left")],
|
1436 |
+
"random_state": ["random_state"],
|
1437 |
+
"data_transposed": ["boolean", Hidden(StrOptions({"deprecated"}))],
|
1438 |
+
},
|
1439 |
+
prefer_skip_nested_validation=True,
|
1440 |
+
)
|
1441 |
+
def make_sparse_coded_signal(
|
1442 |
+
n_samples,
|
1443 |
+
*,
|
1444 |
+
n_components,
|
1445 |
+
n_features,
|
1446 |
+
n_nonzero_coefs,
|
1447 |
+
random_state=None,
|
1448 |
+
data_transposed="deprecated",
|
1449 |
+
):
|
1450 |
+
"""Generate a signal as a sparse combination of dictionary elements.
|
1451 |
+
|
1452 |
+
Returns a matrix `Y = DX`, such that `D` is of shape `(n_features, n_components)`,
|
1453 |
+
`X` is of shape `(n_components, n_samples)` and each column of `X` has exactly
|
1454 |
+
`n_nonzero_coefs` non-zero elements.
|
1455 |
+
|
1456 |
+
Read more in the :ref:`User Guide <sample_generators>`.
|
1457 |
+
|
1458 |
+
Parameters
|
1459 |
+
----------
|
1460 |
+
n_samples : int
|
1461 |
+
Number of samples to generate.
|
1462 |
+
|
1463 |
+
n_components : int
|
1464 |
+
Number of components in the dictionary.
|
1465 |
+
|
1466 |
+
n_features : int
|
1467 |
+
Number of features of the dataset to generate.
|
1468 |
+
|
1469 |
+
n_nonzero_coefs : int
|
1470 |
+
Number of active (non-zero) coefficients in each sample.
|
1471 |
+
|
1472 |
+
random_state : int, RandomState instance or None, default=None
|
1473 |
+
Determines random number generation for dataset creation. Pass an int
|
1474 |
+
for reproducible output across multiple function calls.
|
1475 |
+
See :term:`Glossary <random_state>`.
|
1476 |
+
|
1477 |
+
data_transposed : bool, default=False
|
1478 |
+
By default, Y, D and X are not transposed.
|
1479 |
+
|
1480 |
+
.. versionadded:: 1.1
|
1481 |
+
|
1482 |
+
.. versionchanged:: 1.3
|
1483 |
+
Default value changed from True to False.
|
1484 |
+
|
1485 |
+
.. deprecated:: 1.3
|
1486 |
+
`data_transposed` is deprecated and will be removed in 1.5.
|
1487 |
+
|
1488 |
+
Returns
|
1489 |
+
-------
|
1490 |
+
data : ndarray of shape (n_features, n_samples) or (n_samples, n_features)
|
1491 |
+
The encoded signal (Y). The shape is `(n_samples, n_features)` if
|
1492 |
+
`data_transposed` is False, otherwise it's `(n_features, n_samples)`.
|
1493 |
+
|
1494 |
+
dictionary : ndarray of shape (n_features, n_components) or \
|
1495 |
+
(n_components, n_features)
|
1496 |
+
The dictionary with normalized components (D). The shape is
|
1497 |
+
`(n_components, n_features)` if `data_transposed` is False, otherwise it's
|
1498 |
+
`(n_features, n_components)`.
|
1499 |
+
|
1500 |
+
code : ndarray of shape (n_components, n_samples) or (n_samples, n_components)
|
1501 |
+
The sparse code such that each column of this matrix has exactly
|
1502 |
+
n_nonzero_coefs non-zero items (X). The shape is `(n_samples, n_components)`
|
1503 |
+
if `data_transposed` is False, otherwise it's `(n_components, n_samples)`.
|
1504 |
+
"""
|
1505 |
+
generator = check_random_state(random_state)
|
1506 |
+
|
1507 |
+
# generate dictionary
|
1508 |
+
D = generator.standard_normal(size=(n_features, n_components))
|
1509 |
+
D /= np.sqrt(np.sum((D**2), axis=0))
|
1510 |
+
|
1511 |
+
# generate code
|
1512 |
+
X = np.zeros((n_components, n_samples))
|
1513 |
+
for i in range(n_samples):
|
1514 |
+
idx = np.arange(n_components)
|
1515 |
+
generator.shuffle(idx)
|
1516 |
+
idx = idx[:n_nonzero_coefs]
|
1517 |
+
X[idx, i] = generator.standard_normal(size=n_nonzero_coefs)
|
1518 |
+
|
1519 |
+
# encode signal
|
1520 |
+
Y = np.dot(D, X)
|
1521 |
+
|
1522 |
+
# TODO(1.5) remove data_transposed
|
1523 |
+
# raise warning if data_transposed is not passed explicitly
|
1524 |
+
if data_transposed != "deprecated":
|
1525 |
+
warnings.warn(
|
1526 |
+
"data_transposed was deprecated in version 1.3 and will be removed in 1.5.",
|
1527 |
+
FutureWarning,
|
1528 |
+
)
|
1529 |
+
else:
|
1530 |
+
data_transposed = False
|
1531 |
+
|
1532 |
+
# transpose if needed
|
1533 |
+
if not data_transposed:
|
1534 |
+
Y, D, X = Y.T, D.T, X.T
|
1535 |
+
|
1536 |
+
return map(np.squeeze, (Y, D, X))
|
1537 |
+
|
1538 |
+
|
1539 |
+
@validate_params(
|
1540 |
+
{
|
1541 |
+
"n_samples": [Interval(Integral, 1, None, closed="left")],
|
1542 |
+
"n_features": [Interval(Integral, 1, None, closed="left")],
|
1543 |
+
"random_state": ["random_state"],
|
1544 |
+
},
|
1545 |
+
prefer_skip_nested_validation=True,
|
1546 |
+
)
|
1547 |
+
def make_sparse_uncorrelated(n_samples=100, n_features=10, *, random_state=None):
|
1548 |
+
"""Generate a random regression problem with sparse uncorrelated design.
|
1549 |
+
|
1550 |
+
This dataset is described in Celeux et al [1]. as::
|
1551 |
+
|
1552 |
+
X ~ N(0, 1)
|
1553 |
+
y(X) = X[:, 0] + 2 * X[:, 1] - 2 * X[:, 2] - 1.5 * X[:, 3]
|
1554 |
+
|
1555 |
+
Only the first 4 features are informative. The remaining features are
|
1556 |
+
useless.
|
1557 |
+
|
1558 |
+
Read more in the :ref:`User Guide <sample_generators>`.
|
1559 |
+
|
1560 |
+
Parameters
|
1561 |
+
----------
|
1562 |
+
n_samples : int, default=100
|
1563 |
+
The number of samples.
|
1564 |
+
|
1565 |
+
n_features : int, default=10
|
1566 |
+
The number of features.
|
1567 |
+
|
1568 |
+
random_state : int, RandomState instance or None, default=None
|
1569 |
+
Determines random number generation for dataset creation. Pass an int
|
1570 |
+
for reproducible output across multiple function calls.
|
1571 |
+
See :term:`Glossary <random_state>`.
|
1572 |
+
|
1573 |
+
Returns
|
1574 |
+
-------
|
1575 |
+
X : ndarray of shape (n_samples, n_features)
|
1576 |
+
The input samples.
|
1577 |
+
|
1578 |
+
y : ndarray of shape (n_samples,)
|
1579 |
+
The output values.
|
1580 |
+
|
1581 |
+
References
|
1582 |
+
----------
|
1583 |
+
.. [1] G. Celeux, M. El Anbari, J.-M. Marin, C. P. Robert,
|
1584 |
+
"Regularization in regression: comparing Bayesian and frequentist
|
1585 |
+
methods in a poorly informative situation", 2009.
|
1586 |
+
"""
|
1587 |
+
generator = check_random_state(random_state)
|
1588 |
+
|
1589 |
+
X = generator.normal(loc=0, scale=1, size=(n_samples, n_features))
|
1590 |
+
y = generator.normal(
|
1591 |
+
loc=(X[:, 0] + 2 * X[:, 1] - 2 * X[:, 2] - 1.5 * X[:, 3]),
|
1592 |
+
scale=np.ones(n_samples),
|
1593 |
+
)
|
1594 |
+
|
1595 |
+
return X, y
|
1596 |
+
|
1597 |
+
|
1598 |
+
@validate_params(
|
1599 |
+
{
|
1600 |
+
"n_dim": [Interval(Integral, 1, None, closed="left")],
|
1601 |
+
"random_state": ["random_state"],
|
1602 |
+
},
|
1603 |
+
prefer_skip_nested_validation=True,
|
1604 |
+
)
|
1605 |
+
def make_spd_matrix(n_dim, *, random_state=None):
|
1606 |
+
"""Generate a random symmetric, positive-definite matrix.
|
1607 |
+
|
1608 |
+
Read more in the :ref:`User Guide <sample_generators>`.
|
1609 |
+
|
1610 |
+
Parameters
|
1611 |
+
----------
|
1612 |
+
n_dim : int
|
1613 |
+
The matrix dimension.
|
1614 |
+
|
1615 |
+
random_state : int, RandomState instance or None, default=None
|
1616 |
+
Determines random number generation for dataset creation. Pass an int
|
1617 |
+
for reproducible output across multiple function calls.
|
1618 |
+
See :term:`Glossary <random_state>`.
|
1619 |
+
|
1620 |
+
Returns
|
1621 |
+
-------
|
1622 |
+
X : ndarray of shape (n_dim, n_dim)
|
1623 |
+
The random symmetric, positive-definite matrix.
|
1624 |
+
|
1625 |
+
See Also
|
1626 |
+
--------
|
1627 |
+
make_sparse_spd_matrix: Generate a sparse symmetric definite positive matrix.
|
1628 |
+
|
1629 |
+
Examples
|
1630 |
+
--------
|
1631 |
+
>>> from sklearn.datasets import make_spd_matrix
|
1632 |
+
>>> make_spd_matrix(n_dim=2, random_state=42)
|
1633 |
+
array([[2.09..., 0.34...],
|
1634 |
+
[0.34..., 0.21...]])
|
1635 |
+
"""
|
1636 |
+
generator = check_random_state(random_state)
|
1637 |
+
|
1638 |
+
A = generator.uniform(size=(n_dim, n_dim))
|
1639 |
+
U, _, Vt = linalg.svd(np.dot(A.T, A), check_finite=False)
|
1640 |
+
X = np.dot(np.dot(U, 1.0 + np.diag(generator.uniform(size=n_dim))), Vt)
|
1641 |
+
|
1642 |
+
return X
|
1643 |
+
|
1644 |
+
|
1645 |
+
@validate_params(
|
1646 |
+
{
|
1647 |
+
"n_dim": [Hidden(None), Interval(Integral, 1, None, closed="left")],
|
1648 |
+
"alpha": [Interval(Real, 0, 1, closed="both")],
|
1649 |
+
"norm_diag": ["boolean"],
|
1650 |
+
"smallest_coef": [Interval(Real, 0, 1, closed="both")],
|
1651 |
+
"largest_coef": [Interval(Real, 0, 1, closed="both")],
|
1652 |
+
"sparse_format": [
|
1653 |
+
StrOptions({"bsr", "coo", "csc", "csr", "dia", "dok", "lil"}),
|
1654 |
+
None,
|
1655 |
+
],
|
1656 |
+
"random_state": ["random_state"],
|
1657 |
+
"dim": [
|
1658 |
+
Interval(Integral, 1, None, closed="left"),
|
1659 |
+
Hidden(StrOptions({"deprecated"})),
|
1660 |
+
],
|
1661 |
+
},
|
1662 |
+
prefer_skip_nested_validation=True,
|
1663 |
+
)
|
1664 |
+
def make_sparse_spd_matrix(
|
1665 |
+
n_dim=None,
|
1666 |
+
*,
|
1667 |
+
alpha=0.95,
|
1668 |
+
norm_diag=False,
|
1669 |
+
smallest_coef=0.1,
|
1670 |
+
largest_coef=0.9,
|
1671 |
+
sparse_format=None,
|
1672 |
+
random_state=None,
|
1673 |
+
dim="deprecated",
|
1674 |
+
):
|
1675 |
+
"""Generate a sparse symmetric definite positive matrix.
|
1676 |
+
|
1677 |
+
Read more in the :ref:`User Guide <sample_generators>`.
|
1678 |
+
|
1679 |
+
Parameters
|
1680 |
+
----------
|
1681 |
+
n_dim : int, default=1
|
1682 |
+
The size of the random matrix to generate.
|
1683 |
+
|
1684 |
+
.. versionchanged:: 1.4
|
1685 |
+
Renamed from ``dim`` to ``n_dim``.
|
1686 |
+
|
1687 |
+
alpha : float, default=0.95
|
1688 |
+
The probability that a coefficient is zero (see notes). Larger values
|
1689 |
+
enforce more sparsity. The value should be in the range 0 and 1.
|
1690 |
+
|
1691 |
+
norm_diag : bool, default=False
|
1692 |
+
Whether to normalize the output matrix to make the leading diagonal
|
1693 |
+
elements all 1.
|
1694 |
+
|
1695 |
+
smallest_coef : float, default=0.1
|
1696 |
+
The value of the smallest coefficient between 0 and 1.
|
1697 |
+
|
1698 |
+
largest_coef : float, default=0.9
|
1699 |
+
The value of the largest coefficient between 0 and 1.
|
1700 |
+
|
1701 |
+
sparse_format : str, default=None
|
1702 |
+
String representing the output sparse format, such as 'csc', 'csr', etc.
|
1703 |
+
If ``None``, return a dense numpy ndarray.
|
1704 |
+
|
1705 |
+
.. versionadded:: 1.4
|
1706 |
+
|
1707 |
+
random_state : int, RandomState instance or None, default=None
|
1708 |
+
Determines random number generation for dataset creation. Pass an int
|
1709 |
+
for reproducible output across multiple function calls.
|
1710 |
+
See :term:`Glossary <random_state>`.
|
1711 |
+
|
1712 |
+
dim : int, default=1
|
1713 |
+
The size of the random matrix to generate.
|
1714 |
+
|
1715 |
+
.. deprecated:: 1.4
|
1716 |
+
`dim` is deprecated and will be removed in 1.6.
|
1717 |
+
|
1718 |
+
Returns
|
1719 |
+
-------
|
1720 |
+
prec : ndarray or sparse matrix of shape (dim, dim)
|
1721 |
+
The generated matrix. If ``sparse_format=None``, this would be an ndarray.
|
1722 |
+
Otherwise, this will be a sparse matrix of the specified format.
|
1723 |
+
|
1724 |
+
See Also
|
1725 |
+
--------
|
1726 |
+
make_spd_matrix : Generate a random symmetric, positive-definite matrix.
|
1727 |
+
|
1728 |
+
Notes
|
1729 |
+
-----
|
1730 |
+
The sparsity is actually imposed on the cholesky factor of the matrix.
|
1731 |
+
Thus alpha does not translate directly into the filling fraction of
|
1732 |
+
the matrix itself.
|
1733 |
+
|
1734 |
+
Examples
|
1735 |
+
--------
|
1736 |
+
>>> from sklearn.datasets import make_sparse_spd_matrix
|
1737 |
+
>>> make_sparse_spd_matrix(n_dim=4, norm_diag=False, random_state=42)
|
1738 |
+
array([[1., 0., 0., 0.],
|
1739 |
+
[0., 1., 0., 0.],
|
1740 |
+
[0., 0., 1., 0.],
|
1741 |
+
[0., 0., 0., 1.]])
|
1742 |
+
"""
|
1743 |
+
random_state = check_random_state(random_state)
|
1744 |
+
|
1745 |
+
# TODO(1.6): remove in 1.6
|
1746 |
+
# Also make sure to change `n_dim` default back to 1 and deprecate None
|
1747 |
+
if n_dim is not None and dim != "deprecated":
|
1748 |
+
raise ValueError(
|
1749 |
+
"`dim` and `n_dim` cannot be both specified. Please use `n_dim` only "
|
1750 |
+
"as `dim` is deprecated in v1.4 and will be removed in v1.6."
|
1751 |
+
)
|
1752 |
+
|
1753 |
+
if dim != "deprecated":
|
1754 |
+
warnings.warn(
|
1755 |
+
(
|
1756 |
+
"dim was deprecated in version 1.4 and will be removed in 1.6."
|
1757 |
+
"Please use ``n_dim`` instead."
|
1758 |
+
),
|
1759 |
+
FutureWarning,
|
1760 |
+
)
|
1761 |
+
_n_dim = dim
|
1762 |
+
elif n_dim is None:
|
1763 |
+
_n_dim = 1
|
1764 |
+
else:
|
1765 |
+
_n_dim = n_dim
|
1766 |
+
|
1767 |
+
chol = -sp.eye(_n_dim)
|
1768 |
+
aux = sp.random(
|
1769 |
+
m=_n_dim,
|
1770 |
+
n=_n_dim,
|
1771 |
+
density=1 - alpha,
|
1772 |
+
data_rvs=lambda x: random_state.uniform(
|
1773 |
+
low=smallest_coef, high=largest_coef, size=x
|
1774 |
+
),
|
1775 |
+
random_state=random_state,
|
1776 |
+
)
|
1777 |
+
# We need to avoid "coo" format because it does not support slicing
|
1778 |
+
aux = sp.tril(aux, k=-1, format="csc")
|
1779 |
+
|
1780 |
+
# Permute the lines: we don't want to have asymmetries in the final
|
1781 |
+
# SPD matrix
|
1782 |
+
permutation = random_state.permutation(_n_dim)
|
1783 |
+
aux = aux[permutation].T[permutation]
|
1784 |
+
chol += aux
|
1785 |
+
prec = chol.T @ chol
|
1786 |
+
|
1787 |
+
if norm_diag:
|
1788 |
+
# Form the diagonal vector into a row matrix
|
1789 |
+
d = sp.diags(1.0 / np.sqrt(prec.diagonal()))
|
1790 |
+
prec = d @ prec @ d
|
1791 |
+
|
1792 |
+
if sparse_format is None:
|
1793 |
+
return prec.toarray()
|
1794 |
+
else:
|
1795 |
+
return prec.asformat(sparse_format)
|
1796 |
+
|
1797 |
+
|
1798 |
+
@validate_params(
|
1799 |
+
{
|
1800 |
+
"n_samples": [Interval(Integral, 1, None, closed="left")],
|
1801 |
+
"noise": [Interval(Real, 0, None, closed="left")],
|
1802 |
+
"random_state": ["random_state"],
|
1803 |
+
"hole": ["boolean"],
|
1804 |
+
},
|
1805 |
+
prefer_skip_nested_validation=True,
|
1806 |
+
)
|
1807 |
+
def make_swiss_roll(n_samples=100, *, noise=0.0, random_state=None, hole=False):
|
1808 |
+
"""Generate a swiss roll dataset.
|
1809 |
+
|
1810 |
+
Read more in the :ref:`User Guide <sample_generators>`.
|
1811 |
+
|
1812 |
+
Parameters
|
1813 |
+
----------
|
1814 |
+
n_samples : int, default=100
|
1815 |
+
The number of sample points on the Swiss Roll.
|
1816 |
+
|
1817 |
+
noise : float, default=0.0
|
1818 |
+
The standard deviation of the gaussian noise.
|
1819 |
+
|
1820 |
+
random_state : int, RandomState instance or None, default=None
|
1821 |
+
Determines random number generation for dataset creation. Pass an int
|
1822 |
+
for reproducible output across multiple function calls.
|
1823 |
+
See :term:`Glossary <random_state>`.
|
1824 |
+
|
1825 |
+
hole : bool, default=False
|
1826 |
+
If True generates the swiss roll with hole dataset.
|
1827 |
+
|
1828 |
+
Returns
|
1829 |
+
-------
|
1830 |
+
X : ndarray of shape (n_samples, 3)
|
1831 |
+
The points.
|
1832 |
+
|
1833 |
+
t : ndarray of shape (n_samples,)
|
1834 |
+
The univariate position of the sample according to the main dimension
|
1835 |
+
of the points in the manifold.
|
1836 |
+
|
1837 |
+
Notes
|
1838 |
+
-----
|
1839 |
+
The algorithm is from Marsland [1].
|
1840 |
+
|
1841 |
+
References
|
1842 |
+
----------
|
1843 |
+
.. [1] S. Marsland, "Machine Learning: An Algorithmic Perspective", 2nd edition,
|
1844 |
+
Chapter 6, 2014.
|
1845 |
+
https://homepages.ecs.vuw.ac.nz/~marslast/Code/Ch6/lle.py
|
1846 |
+
"""
|
1847 |
+
generator = check_random_state(random_state)
|
1848 |
+
|
1849 |
+
if not hole:
|
1850 |
+
t = 1.5 * np.pi * (1 + 2 * generator.uniform(size=n_samples))
|
1851 |
+
y = 21 * generator.uniform(size=n_samples)
|
1852 |
+
else:
|
1853 |
+
corners = np.array(
|
1854 |
+
[[np.pi * (1.5 + i), j * 7] for i in range(3) for j in range(3)]
|
1855 |
+
)
|
1856 |
+
corners = np.delete(corners, 4, axis=0)
|
1857 |
+
corner_index = generator.choice(8, n_samples)
|
1858 |
+
parameters = generator.uniform(size=(2, n_samples)) * np.array([[np.pi], [7]])
|
1859 |
+
t, y = corners[corner_index].T + parameters
|
1860 |
+
|
1861 |
+
x = t * np.cos(t)
|
1862 |
+
z = t * np.sin(t)
|
1863 |
+
|
1864 |
+
X = np.vstack((x, y, z))
|
1865 |
+
X += noise * generator.standard_normal(size=(3, n_samples))
|
1866 |
+
X = X.T
|
1867 |
+
t = np.squeeze(t)
|
1868 |
+
|
1869 |
+
return X, t
|
1870 |
+
|
1871 |
+
|
1872 |
+
@validate_params(
|
1873 |
+
{
|
1874 |
+
"n_samples": [Interval(Integral, 1, None, closed="left")],
|
1875 |
+
"noise": [Interval(Real, 0, None, closed="left")],
|
1876 |
+
"random_state": ["random_state"],
|
1877 |
+
},
|
1878 |
+
prefer_skip_nested_validation=True,
|
1879 |
+
)
|
1880 |
+
def make_s_curve(n_samples=100, *, noise=0.0, random_state=None):
|
1881 |
+
"""Generate an S curve dataset.
|
1882 |
+
|
1883 |
+
Read more in the :ref:`User Guide <sample_generators>`.
|
1884 |
+
|
1885 |
+
Parameters
|
1886 |
+
----------
|
1887 |
+
n_samples : int, default=100
|
1888 |
+
The number of sample points on the S curve.
|
1889 |
+
|
1890 |
+
noise : float, default=0.0
|
1891 |
+
The standard deviation of the gaussian noise.
|
1892 |
+
|
1893 |
+
random_state : int, RandomState instance or None, default=None
|
1894 |
+
Determines random number generation for dataset creation. Pass an int
|
1895 |
+
for reproducible output across multiple function calls.
|
1896 |
+
See :term:`Glossary <random_state>`.
|
1897 |
+
|
1898 |
+
Returns
|
1899 |
+
-------
|
1900 |
+
X : ndarray of shape (n_samples, 3)
|
1901 |
+
The points.
|
1902 |
+
|
1903 |
+
t : ndarray of shape (n_samples,)
|
1904 |
+
The univariate position of the sample according to the main dimension
|
1905 |
+
of the points in the manifold.
|
1906 |
+
"""
|
1907 |
+
generator = check_random_state(random_state)
|
1908 |
+
|
1909 |
+
t = 3 * np.pi * (generator.uniform(size=(1, n_samples)) - 0.5)
|
1910 |
+
X = np.empty(shape=(n_samples, 3), dtype=np.float64)
|
1911 |
+
X[:, 0] = np.sin(t)
|
1912 |
+
X[:, 1] = 2.0 * generator.uniform(size=n_samples)
|
1913 |
+
X[:, 2] = np.sign(t) * (np.cos(t) - 1)
|
1914 |
+
X += noise * generator.standard_normal(size=(3, n_samples)).T
|
1915 |
+
t = np.squeeze(t)
|
1916 |
+
|
1917 |
+
return X, t
|
1918 |
+
|
1919 |
+
|
1920 |
+
@validate_params(
|
1921 |
+
{
|
1922 |
+
"mean": ["array-like", None],
|
1923 |
+
"cov": [Interval(Real, 0, None, closed="left")],
|
1924 |
+
"n_samples": [Interval(Integral, 1, None, closed="left")],
|
1925 |
+
"n_features": [Interval(Integral, 1, None, closed="left")],
|
1926 |
+
"n_classes": [Interval(Integral, 1, None, closed="left")],
|
1927 |
+
"shuffle": ["boolean"],
|
1928 |
+
"random_state": ["random_state"],
|
1929 |
+
},
|
1930 |
+
prefer_skip_nested_validation=True,
|
1931 |
+
)
|
1932 |
+
def make_gaussian_quantiles(
|
1933 |
+
*,
|
1934 |
+
mean=None,
|
1935 |
+
cov=1.0,
|
1936 |
+
n_samples=100,
|
1937 |
+
n_features=2,
|
1938 |
+
n_classes=3,
|
1939 |
+
shuffle=True,
|
1940 |
+
random_state=None,
|
1941 |
+
):
|
1942 |
+
r"""Generate isotropic Gaussian and label samples by quantile.
|
1943 |
+
|
1944 |
+
This classification dataset is constructed by taking a multi-dimensional
|
1945 |
+
standard normal distribution and defining classes separated by nested
|
1946 |
+
concentric multi-dimensional spheres such that roughly equal numbers of
|
1947 |
+
samples are in each class (quantiles of the :math:`\chi^2` distribution).
|
1948 |
+
|
1949 |
+
For an example of usage, see
|
1950 |
+
:ref:`sphx_glr_auto_examples_datasets_plot_random_dataset.py`.
|
1951 |
+
|
1952 |
+
Read more in the :ref:`User Guide <sample_generators>`.
|
1953 |
+
|
1954 |
+
Parameters
|
1955 |
+
----------
|
1956 |
+
mean : array-like of shape (n_features,), default=None
|
1957 |
+
The mean of the multi-dimensional normal distribution.
|
1958 |
+
If None then use the origin (0, 0, ...).
|
1959 |
+
|
1960 |
+
cov : float, default=1.0
|
1961 |
+
The covariance matrix will be this value times the unit matrix. This
|
1962 |
+
dataset only produces symmetric normal distributions.
|
1963 |
+
|
1964 |
+
n_samples : int, default=100
|
1965 |
+
The total number of points equally divided among classes.
|
1966 |
+
|
1967 |
+
n_features : int, default=2
|
1968 |
+
The number of features for each sample.
|
1969 |
+
|
1970 |
+
n_classes : int, default=3
|
1971 |
+
The number of classes.
|
1972 |
+
|
1973 |
+
shuffle : bool, default=True
|
1974 |
+
Shuffle the samples.
|
1975 |
+
|
1976 |
+
random_state : int, RandomState instance or None, default=None
|
1977 |
+
Determines random number generation for dataset creation. Pass an int
|
1978 |
+
for reproducible output across multiple function calls.
|
1979 |
+
See :term:`Glossary <random_state>`.
|
1980 |
+
|
1981 |
+
Returns
|
1982 |
+
-------
|
1983 |
+
X : ndarray of shape (n_samples, n_features)
|
1984 |
+
The generated samples.
|
1985 |
+
|
1986 |
+
y : ndarray of shape (n_samples,)
|
1987 |
+
The integer labels for quantile membership of each sample.
|
1988 |
+
|
1989 |
+
Notes
|
1990 |
+
-----
|
1991 |
+
The dataset is from Zhu et al [1].
|
1992 |
+
|
1993 |
+
References
|
1994 |
+
----------
|
1995 |
+
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
|
1996 |
+
|
1997 |
+
Examples
|
1998 |
+
--------
|
1999 |
+
>>> from sklearn.datasets import make_gaussian_quantiles
|
2000 |
+
>>> X, y = make_gaussian_quantiles(random_state=42)
|
2001 |
+
>>> X.shape
|
2002 |
+
(100, 2)
|
2003 |
+
>>> y.shape
|
2004 |
+
(100,)
|
2005 |
+
>>> list(y[:5])
|
2006 |
+
[2, 0, 1, 0, 2]
|
2007 |
+
"""
|
2008 |
+
if n_samples < n_classes:
|
2009 |
+
raise ValueError("n_samples must be at least n_classes")
|
2010 |
+
|
2011 |
+
generator = check_random_state(random_state)
|
2012 |
+
|
2013 |
+
if mean is None:
|
2014 |
+
mean = np.zeros(n_features)
|
2015 |
+
else:
|
2016 |
+
mean = np.array(mean)
|
2017 |
+
|
2018 |
+
# Build multivariate normal distribution
|
2019 |
+
X = generator.multivariate_normal(mean, cov * np.identity(n_features), (n_samples,))
|
2020 |
+
|
2021 |
+
# Sort by distance from origin
|
2022 |
+
idx = np.argsort(np.sum((X - mean[np.newaxis, :]) ** 2, axis=1))
|
2023 |
+
X = X[idx, :]
|
2024 |
+
|
2025 |
+
# Label by quantile
|
2026 |
+
step = n_samples // n_classes
|
2027 |
+
|
2028 |
+
y = np.hstack(
|
2029 |
+
[
|
2030 |
+
np.repeat(np.arange(n_classes), step),
|
2031 |
+
np.repeat(n_classes - 1, n_samples - step * n_classes),
|
2032 |
+
]
|
2033 |
+
)
|
2034 |
+
|
2035 |
+
if shuffle:
|
2036 |
+
X, y = util_shuffle(X, y, random_state=generator)
|
2037 |
+
|
2038 |
+
return X, y
|
2039 |
+
|
2040 |
+
|
2041 |
+
def _shuffle(data, random_state=None):
|
2042 |
+
generator = check_random_state(random_state)
|
2043 |
+
n_rows, n_cols = data.shape
|
2044 |
+
row_idx = generator.permutation(n_rows)
|
2045 |
+
col_idx = generator.permutation(n_cols)
|
2046 |
+
result = data[row_idx][:, col_idx]
|
2047 |
+
return result, row_idx, col_idx
|
2048 |
+
|
2049 |
+
|
2050 |
+
@validate_params(
|
2051 |
+
{
|
2052 |
+
"shape": [tuple],
|
2053 |
+
"n_clusters": [Interval(Integral, 1, None, closed="left")],
|
2054 |
+
"noise": [Interval(Real, 0, None, closed="left")],
|
2055 |
+
"minval": [Interval(Real, None, None, closed="neither")],
|
2056 |
+
"maxval": [Interval(Real, None, None, closed="neither")],
|
2057 |
+
"shuffle": ["boolean"],
|
2058 |
+
"random_state": ["random_state"],
|
2059 |
+
},
|
2060 |
+
prefer_skip_nested_validation=True,
|
2061 |
+
)
|
2062 |
+
def make_biclusters(
|
2063 |
+
shape,
|
2064 |
+
n_clusters,
|
2065 |
+
*,
|
2066 |
+
noise=0.0,
|
2067 |
+
minval=10,
|
2068 |
+
maxval=100,
|
2069 |
+
shuffle=True,
|
2070 |
+
random_state=None,
|
2071 |
+
):
|
2072 |
+
"""Generate a constant block diagonal structure array for biclustering.
|
2073 |
+
|
2074 |
+
Read more in the :ref:`User Guide <sample_generators>`.
|
2075 |
+
|
2076 |
+
Parameters
|
2077 |
+
----------
|
2078 |
+
shape : tuple of shape (n_rows, n_cols)
|
2079 |
+
The shape of the result.
|
2080 |
+
|
2081 |
+
n_clusters : int
|
2082 |
+
The number of biclusters.
|
2083 |
+
|
2084 |
+
noise : float, default=0.0
|
2085 |
+
The standard deviation of the gaussian noise.
|
2086 |
+
|
2087 |
+
minval : float, default=10
|
2088 |
+
Minimum value of a bicluster.
|
2089 |
+
|
2090 |
+
maxval : float, default=100
|
2091 |
+
Maximum value of a bicluster.
|
2092 |
+
|
2093 |
+
shuffle : bool, default=True
|
2094 |
+
Shuffle the samples.
|
2095 |
+
|
2096 |
+
random_state : int, RandomState instance or None, default=None
|
2097 |
+
Determines random number generation for dataset creation. Pass an int
|
2098 |
+
for reproducible output across multiple function calls.
|
2099 |
+
See :term:`Glossary <random_state>`.
|
2100 |
+
|
2101 |
+
Returns
|
2102 |
+
-------
|
2103 |
+
X : ndarray of shape `shape`
|
2104 |
+
The generated array.
|
2105 |
+
|
2106 |
+
rows : ndarray of shape (n_clusters, X.shape[0])
|
2107 |
+
The indicators for cluster membership of each row.
|
2108 |
+
|
2109 |
+
cols : ndarray of shape (n_clusters, X.shape[1])
|
2110 |
+
The indicators for cluster membership of each column.
|
2111 |
+
|
2112 |
+
See Also
|
2113 |
+
--------
|
2114 |
+
make_checkerboard: Generate an array with block checkerboard structure for
|
2115 |
+
biclustering.
|
2116 |
+
|
2117 |
+
References
|
2118 |
+
----------
|
2119 |
+
|
2120 |
+
.. [1] Dhillon, I. S. (2001, August). Co-clustering documents and
|
2121 |
+
words using bipartite spectral graph partitioning. In Proceedings
|
2122 |
+
of the seventh ACM SIGKDD international conference on Knowledge
|
2123 |
+
discovery and data mining (pp. 269-274). ACM.
|
2124 |
+
"""
|
2125 |
+
generator = check_random_state(random_state)
|
2126 |
+
n_rows, n_cols = shape
|
2127 |
+
consts = generator.uniform(minval, maxval, n_clusters)
|
2128 |
+
|
2129 |
+
# row and column clusters of approximately equal sizes
|
2130 |
+
row_sizes = generator.multinomial(n_rows, np.repeat(1.0 / n_clusters, n_clusters))
|
2131 |
+
col_sizes = generator.multinomial(n_cols, np.repeat(1.0 / n_clusters, n_clusters))
|
2132 |
+
|
2133 |
+
row_labels = np.hstack(
|
2134 |
+
[np.repeat(val, rep) for val, rep in zip(range(n_clusters), row_sizes)]
|
2135 |
+
)
|
2136 |
+
col_labels = np.hstack(
|
2137 |
+
[np.repeat(val, rep) for val, rep in zip(range(n_clusters), col_sizes)]
|
2138 |
+
)
|
2139 |
+
|
2140 |
+
result = np.zeros(shape, dtype=np.float64)
|
2141 |
+
for i in range(n_clusters):
|
2142 |
+
selector = np.outer(row_labels == i, col_labels == i)
|
2143 |
+
result[selector] += consts[i]
|
2144 |
+
|
2145 |
+
if noise > 0:
|
2146 |
+
result += generator.normal(scale=noise, size=result.shape)
|
2147 |
+
|
2148 |
+
if shuffle:
|
2149 |
+
result, row_idx, col_idx = _shuffle(result, random_state)
|
2150 |
+
row_labels = row_labels[row_idx]
|
2151 |
+
col_labels = col_labels[col_idx]
|
2152 |
+
|
2153 |
+
rows = np.vstack([row_labels == c for c in range(n_clusters)])
|
2154 |
+
cols = np.vstack([col_labels == c for c in range(n_clusters)])
|
2155 |
+
|
2156 |
+
return result, rows, cols
|
2157 |
+
|
2158 |
+
|
2159 |
+
@validate_params(
|
2160 |
+
{
|
2161 |
+
"shape": [tuple],
|
2162 |
+
"n_clusters": [Interval(Integral, 1, None, closed="left"), "array-like"],
|
2163 |
+
"noise": [Interval(Real, 0, None, closed="left")],
|
2164 |
+
"minval": [Interval(Real, None, None, closed="neither")],
|
2165 |
+
"maxval": [Interval(Real, None, None, closed="neither")],
|
2166 |
+
"shuffle": ["boolean"],
|
2167 |
+
"random_state": ["random_state"],
|
2168 |
+
},
|
2169 |
+
prefer_skip_nested_validation=True,
|
2170 |
+
)
|
2171 |
+
def make_checkerboard(
|
2172 |
+
shape,
|
2173 |
+
n_clusters,
|
2174 |
+
*,
|
2175 |
+
noise=0.0,
|
2176 |
+
minval=10,
|
2177 |
+
maxval=100,
|
2178 |
+
shuffle=True,
|
2179 |
+
random_state=None,
|
2180 |
+
):
|
2181 |
+
"""Generate an array with block checkerboard structure for biclustering.
|
2182 |
+
|
2183 |
+
Read more in the :ref:`User Guide <sample_generators>`.
|
2184 |
+
|
2185 |
+
Parameters
|
2186 |
+
----------
|
2187 |
+
shape : tuple of shape (n_rows, n_cols)
|
2188 |
+
The shape of the result.
|
2189 |
+
|
2190 |
+
n_clusters : int or array-like or shape (n_row_clusters, n_column_clusters)
|
2191 |
+
The number of row and column clusters.
|
2192 |
+
|
2193 |
+
noise : float, default=0.0
|
2194 |
+
The standard deviation of the gaussian noise.
|
2195 |
+
|
2196 |
+
minval : float, default=10
|
2197 |
+
Minimum value of a bicluster.
|
2198 |
+
|
2199 |
+
maxval : float, default=100
|
2200 |
+
Maximum value of a bicluster.
|
2201 |
+
|
2202 |
+
shuffle : bool, default=True
|
2203 |
+
Shuffle the samples.
|
2204 |
+
|
2205 |
+
random_state : int, RandomState instance or None, default=None
|
2206 |
+
Determines random number generation for dataset creation. Pass an int
|
2207 |
+
for reproducible output across multiple function calls.
|
2208 |
+
See :term:`Glossary <random_state>`.
|
2209 |
+
|
2210 |
+
Returns
|
2211 |
+
-------
|
2212 |
+
X : ndarray of shape `shape`
|
2213 |
+
The generated array.
|
2214 |
+
|
2215 |
+
rows : ndarray of shape (n_clusters, X.shape[0])
|
2216 |
+
The indicators for cluster membership of each row.
|
2217 |
+
|
2218 |
+
cols : ndarray of shape (n_clusters, X.shape[1])
|
2219 |
+
The indicators for cluster membership of each column.
|
2220 |
+
|
2221 |
+
See Also
|
2222 |
+
--------
|
2223 |
+
make_biclusters : Generate an array with constant block diagonal structure
|
2224 |
+
for biclustering.
|
2225 |
+
|
2226 |
+
References
|
2227 |
+
----------
|
2228 |
+
.. [1] Kluger, Y., Basri, R., Chang, J. T., & Gerstein, M. (2003).
|
2229 |
+
Spectral biclustering of microarray data: coclustering genes
|
2230 |
+
and conditions. Genome research, 13(4), 703-716.
|
2231 |
+
"""
|
2232 |
+
generator = check_random_state(random_state)
|
2233 |
+
|
2234 |
+
if hasattr(n_clusters, "__len__"):
|
2235 |
+
n_row_clusters, n_col_clusters = n_clusters
|
2236 |
+
else:
|
2237 |
+
n_row_clusters = n_col_clusters = n_clusters
|
2238 |
+
|
2239 |
+
# row and column clusters of approximately equal sizes
|
2240 |
+
n_rows, n_cols = shape
|
2241 |
+
row_sizes = generator.multinomial(
|
2242 |
+
n_rows, np.repeat(1.0 / n_row_clusters, n_row_clusters)
|
2243 |
+
)
|
2244 |
+
col_sizes = generator.multinomial(
|
2245 |
+
n_cols, np.repeat(1.0 / n_col_clusters, n_col_clusters)
|
2246 |
+
)
|
2247 |
+
|
2248 |
+
row_labels = np.hstack(
|
2249 |
+
[np.repeat(val, rep) for val, rep in zip(range(n_row_clusters), row_sizes)]
|
2250 |
+
)
|
2251 |
+
col_labels = np.hstack(
|
2252 |
+
[np.repeat(val, rep) for val, rep in zip(range(n_col_clusters), col_sizes)]
|
2253 |
+
)
|
2254 |
+
|
2255 |
+
result = np.zeros(shape, dtype=np.float64)
|
2256 |
+
for i in range(n_row_clusters):
|
2257 |
+
for j in range(n_col_clusters):
|
2258 |
+
selector = np.outer(row_labels == i, col_labels == j)
|
2259 |
+
result[selector] += generator.uniform(minval, maxval)
|
2260 |
+
|
2261 |
+
if noise > 0:
|
2262 |
+
result += generator.normal(scale=noise, size=result.shape)
|
2263 |
+
|
2264 |
+
if shuffle:
|
2265 |
+
result, row_idx, col_idx = _shuffle(result, random_state)
|
2266 |
+
row_labels = row_labels[row_idx]
|
2267 |
+
col_labels = col_labels[col_idx]
|
2268 |
+
|
2269 |
+
rows = np.vstack(
|
2270 |
+
[
|
2271 |
+
row_labels == label
|
2272 |
+
for label in range(n_row_clusters)
|
2273 |
+
for _ in range(n_col_clusters)
|
2274 |
+
]
|
2275 |
+
)
|
2276 |
+
cols = np.vstack(
|
2277 |
+
[
|
2278 |
+
col_labels == label
|
2279 |
+
for _ in range(n_row_clusters)
|
2280 |
+
for label in range(n_col_clusters)
|
2281 |
+
]
|
2282 |
+
)
|
2283 |
+
|
2284 |
+
return result, rows, cols
|
env-llmeval/lib/python3.10/site-packages/sklearn/datasets/_species_distributions.py
ADDED
@@ -0,0 +1,273 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
=============================
|
3 |
+
Species distribution dataset
|
4 |
+
=============================
|
5 |
+
|
6 |
+
This dataset represents the geographic distribution of species.
|
7 |
+
The dataset is provided by Phillips et. al. (2006).
|
8 |
+
|
9 |
+
The two species are:
|
10 |
+
|
11 |
+
- `"Bradypus variegatus"
|
12 |
+
<http://www.iucnredlist.org/details/3038/0>`_ ,
|
13 |
+
the Brown-throated Sloth.
|
14 |
+
|
15 |
+
- `"Microryzomys minutus"
|
16 |
+
<http://www.iucnredlist.org/details/13408/0>`_ ,
|
17 |
+
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
|
18 |
+
Colombia, Ecuador, Peru, and Venezuela.
|
19 |
+
|
20 |
+
References
|
21 |
+
----------
|
22 |
+
|
23 |
+
`"Maximum entropy modeling of species geographic distributions"
|
24 |
+
<http://rob.schapire.net/papers/ecolmod.pdf>`_ S. J. Phillips,
|
25 |
+
R. P. Anderson, R. E. Schapire - Ecological Modelling, 190:231-259, 2006.
|
26 |
+
|
27 |
+
Notes
|
28 |
+
-----
|
29 |
+
|
30 |
+
For an example of using this dataset, see
|
31 |
+
:ref:`examples/applications/plot_species_distribution_modeling.py
|
32 |
+
<sphx_glr_auto_examples_applications_plot_species_distribution_modeling.py>`.
|
33 |
+
"""
|
34 |
+
|
35 |
+
# Authors: Peter Prettenhofer <[email protected]>
|
36 |
+
# Jake Vanderplas <[email protected]>
|
37 |
+
#
|
38 |
+
# License: BSD 3 clause
|
39 |
+
|
40 |
+
import logging
|
41 |
+
from io import BytesIO
|
42 |
+
from os import PathLike, makedirs, remove
|
43 |
+
from os.path import exists
|
44 |
+
|
45 |
+
import joblib
|
46 |
+
import numpy as np
|
47 |
+
|
48 |
+
from ..utils import Bunch
|
49 |
+
from ..utils._param_validation import validate_params
|
50 |
+
from . import get_data_home
|
51 |
+
from ._base import RemoteFileMetadata, _fetch_remote, _pkl_filepath
|
52 |
+
|
53 |
+
# The original data can be found at:
|
54 |
+
# https://biodiversityinformatics.amnh.org/open_source/maxent/samples.zip
|
55 |
+
SAMPLES = RemoteFileMetadata(
|
56 |
+
filename="samples.zip",
|
57 |
+
url="https://ndownloader.figshare.com/files/5976075",
|
58 |
+
checksum="abb07ad284ac50d9e6d20f1c4211e0fd3c098f7f85955e89d321ee8efe37ac28",
|
59 |
+
)
|
60 |
+
|
61 |
+
# The original data can be found at:
|
62 |
+
# https://biodiversityinformatics.amnh.org/open_source/maxent/coverages.zip
|
63 |
+
COVERAGES = RemoteFileMetadata(
|
64 |
+
filename="coverages.zip",
|
65 |
+
url="https://ndownloader.figshare.com/files/5976078",
|
66 |
+
checksum="4d862674d72e79d6cee77e63b98651ec7926043ba7d39dcb31329cf3f6073807",
|
67 |
+
)
|
68 |
+
|
69 |
+
DATA_ARCHIVE_NAME = "species_coverage.pkz"
|
70 |
+
|
71 |
+
|
72 |
+
logger = logging.getLogger(__name__)
|
73 |
+
|
74 |
+
|
75 |
+
def _load_coverage(F, header_length=6, dtype=np.int16):
|
76 |
+
"""Load a coverage file from an open file object.
|
77 |
+
|
78 |
+
This will return a numpy array of the given dtype
|
79 |
+
"""
|
80 |
+
header = [F.readline() for _ in range(header_length)]
|
81 |
+
make_tuple = lambda t: (t.split()[0], float(t.split()[1]))
|
82 |
+
header = dict([make_tuple(line) for line in header])
|
83 |
+
|
84 |
+
M = np.loadtxt(F, dtype=dtype)
|
85 |
+
nodata = int(header[b"NODATA_value"])
|
86 |
+
if nodata != -9999:
|
87 |
+
M[nodata] = -9999
|
88 |
+
return M
|
89 |
+
|
90 |
+
|
91 |
+
def _load_csv(F):
|
92 |
+
"""Load csv file.
|
93 |
+
|
94 |
+
Parameters
|
95 |
+
----------
|
96 |
+
F : file object
|
97 |
+
CSV file open in byte mode.
|
98 |
+
|
99 |
+
Returns
|
100 |
+
-------
|
101 |
+
rec : np.ndarray
|
102 |
+
record array representing the data
|
103 |
+
"""
|
104 |
+
names = F.readline().decode("ascii").strip().split(",")
|
105 |
+
|
106 |
+
rec = np.loadtxt(F, skiprows=0, delimiter=",", dtype="S22,f4,f4")
|
107 |
+
rec.dtype.names = names
|
108 |
+
return rec
|
109 |
+
|
110 |
+
|
111 |
+
def construct_grids(batch):
|
112 |
+
"""Construct the map grid from the batch object
|
113 |
+
|
114 |
+
Parameters
|
115 |
+
----------
|
116 |
+
batch : Batch object
|
117 |
+
The object returned by :func:`fetch_species_distributions`
|
118 |
+
|
119 |
+
Returns
|
120 |
+
-------
|
121 |
+
(xgrid, ygrid) : 1-D arrays
|
122 |
+
The grid corresponding to the values in batch.coverages
|
123 |
+
"""
|
124 |
+
# x,y coordinates for corner cells
|
125 |
+
xmin = batch.x_left_lower_corner + batch.grid_size
|
126 |
+
xmax = xmin + (batch.Nx * batch.grid_size)
|
127 |
+
ymin = batch.y_left_lower_corner + batch.grid_size
|
128 |
+
ymax = ymin + (batch.Ny * batch.grid_size)
|
129 |
+
|
130 |
+
# x coordinates of the grid cells
|
131 |
+
xgrid = np.arange(xmin, xmax, batch.grid_size)
|
132 |
+
# y coordinates of the grid cells
|
133 |
+
ygrid = np.arange(ymin, ymax, batch.grid_size)
|
134 |
+
|
135 |
+
return (xgrid, ygrid)
|
136 |
+
|
137 |
+
|
138 |
+
@validate_params(
|
139 |
+
{"data_home": [str, PathLike, None], "download_if_missing": ["boolean"]},
|
140 |
+
prefer_skip_nested_validation=True,
|
141 |
+
)
|
142 |
+
def fetch_species_distributions(*, data_home=None, download_if_missing=True):
|
143 |
+
"""Loader for species distribution dataset from Phillips et. al. (2006).
|
144 |
+
|
145 |
+
Read more in the :ref:`User Guide <species_distribution_dataset>`.
|
146 |
+
|
147 |
+
Parameters
|
148 |
+
----------
|
149 |
+
data_home : str or path-like, default=None
|
150 |
+
Specify another download and cache folder for the datasets. By default
|
151 |
+
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
|
152 |
+
|
153 |
+
download_if_missing : bool, default=True
|
154 |
+
If False, raise an OSError if the data is not locally available
|
155 |
+
instead of trying to download the data from the source site.
|
156 |
+
|
157 |
+
Returns
|
158 |
+
-------
|
159 |
+
data : :class:`~sklearn.utils.Bunch`
|
160 |
+
Dictionary-like object, with the following attributes.
|
161 |
+
|
162 |
+
coverages : array, shape = [14, 1592, 1212]
|
163 |
+
These represent the 14 features measured
|
164 |
+
at each point of the map grid.
|
165 |
+
The latitude/longitude values for the grid are discussed below.
|
166 |
+
Missing data is represented by the value -9999.
|
167 |
+
train : record array, shape = (1624,)
|
168 |
+
The training points for the data. Each point has three fields:
|
169 |
+
|
170 |
+
- train['species'] is the species name
|
171 |
+
- train['dd long'] is the longitude, in degrees
|
172 |
+
- train['dd lat'] is the latitude, in degrees
|
173 |
+
test : record array, shape = (620,)
|
174 |
+
The test points for the data. Same format as the training data.
|
175 |
+
Nx, Ny : integers
|
176 |
+
The number of longitudes (x) and latitudes (y) in the grid
|
177 |
+
x_left_lower_corner, y_left_lower_corner : floats
|
178 |
+
The (x,y) position of the lower-left corner, in degrees
|
179 |
+
grid_size : float
|
180 |
+
The spacing between points of the grid, in degrees
|
181 |
+
|
182 |
+
Notes
|
183 |
+
-----
|
184 |
+
|
185 |
+
This dataset represents the geographic distribution of species.
|
186 |
+
The dataset is provided by Phillips et. al. (2006).
|
187 |
+
|
188 |
+
The two species are:
|
189 |
+
|
190 |
+
- `"Bradypus variegatus"
|
191 |
+
<http://www.iucnredlist.org/details/3038/0>`_ ,
|
192 |
+
the Brown-throated Sloth.
|
193 |
+
|
194 |
+
- `"Microryzomys minutus"
|
195 |
+
<http://www.iucnredlist.org/details/13408/0>`_ ,
|
196 |
+
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
|
197 |
+
Colombia, Ecuador, Peru, and Venezuela.
|
198 |
+
|
199 |
+
- For an example of using this dataset with scikit-learn, see
|
200 |
+
:ref:`examples/applications/plot_species_distribution_modeling.py
|
201 |
+
<sphx_glr_auto_examples_applications_plot_species_distribution_modeling.py>`.
|
202 |
+
|
203 |
+
References
|
204 |
+
----------
|
205 |
+
|
206 |
+
* `"Maximum entropy modeling of species geographic distributions"
|
207 |
+
<http://rob.schapire.net/papers/ecolmod.pdf>`_
|
208 |
+
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
|
209 |
+
190:231-259, 2006.
|
210 |
+
|
211 |
+
Examples
|
212 |
+
--------
|
213 |
+
>>> from sklearn.datasets import fetch_species_distributions
|
214 |
+
>>> species = fetch_species_distributions()
|
215 |
+
>>> species.train[:5]
|
216 |
+
array([(b'microryzomys_minutus', -64.7 , -17.85 ),
|
217 |
+
(b'microryzomys_minutus', -67.8333, -16.3333),
|
218 |
+
(b'microryzomys_minutus', -67.8833, -16.3 ),
|
219 |
+
(b'microryzomys_minutus', -67.8 , -16.2667),
|
220 |
+
(b'microryzomys_minutus', -67.9833, -15.9 )],
|
221 |
+
dtype=[('species', 'S22'), ('dd long', '<f4'), ('dd lat', '<f4')])
|
222 |
+
"""
|
223 |
+
data_home = get_data_home(data_home)
|
224 |
+
if not exists(data_home):
|
225 |
+
makedirs(data_home)
|
226 |
+
|
227 |
+
# Define parameters for the data files. These should not be changed
|
228 |
+
# unless the data model changes. They will be saved in the npz file
|
229 |
+
# with the downloaded data.
|
230 |
+
extra_params = dict(
|
231 |
+
x_left_lower_corner=-94.8,
|
232 |
+
Nx=1212,
|
233 |
+
y_left_lower_corner=-56.05,
|
234 |
+
Ny=1592,
|
235 |
+
grid_size=0.05,
|
236 |
+
)
|
237 |
+
dtype = np.int16
|
238 |
+
|
239 |
+
archive_path = _pkl_filepath(data_home, DATA_ARCHIVE_NAME)
|
240 |
+
|
241 |
+
if not exists(archive_path):
|
242 |
+
if not download_if_missing:
|
243 |
+
raise OSError("Data not found and `download_if_missing` is False")
|
244 |
+
logger.info("Downloading species data from %s to %s" % (SAMPLES.url, data_home))
|
245 |
+
samples_path = _fetch_remote(SAMPLES, dirname=data_home)
|
246 |
+
with np.load(samples_path) as X: # samples.zip is a valid npz
|
247 |
+
for f in X.files:
|
248 |
+
fhandle = BytesIO(X[f])
|
249 |
+
if "train" in f:
|
250 |
+
train = _load_csv(fhandle)
|
251 |
+
if "test" in f:
|
252 |
+
test = _load_csv(fhandle)
|
253 |
+
remove(samples_path)
|
254 |
+
|
255 |
+
logger.info(
|
256 |
+
"Downloading coverage data from %s to %s" % (COVERAGES.url, data_home)
|
257 |
+
)
|
258 |
+
coverages_path = _fetch_remote(COVERAGES, dirname=data_home)
|
259 |
+
with np.load(coverages_path) as X: # coverages.zip is a valid npz
|
260 |
+
coverages = []
|
261 |
+
for f in X.files:
|
262 |
+
fhandle = BytesIO(X[f])
|
263 |
+
logger.debug(" - converting {}".format(f))
|
264 |
+
coverages.append(_load_coverage(fhandle))
|
265 |
+
coverages = np.asarray(coverages, dtype=dtype)
|
266 |
+
remove(coverages_path)
|
267 |
+
|
268 |
+
bunch = Bunch(coverages=coverages, test=test, train=train, **extra_params)
|
269 |
+
joblib.dump(bunch, archive_path, compress=9)
|
270 |
+
else:
|
271 |
+
bunch = joblib.load(archive_path)
|
272 |
+
|
273 |
+
return bunch
|
env-llmeval/lib/python3.10/site-packages/sklearn/datasets/_svmlight_format_fast.cpython-310-x86_64-linux-gnu.so
ADDED
Binary file (590 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/sklearn/datasets/_svmlight_format_io.py
ADDED
@@ -0,0 +1,584 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""This module implements a loader and dumper for the svmlight format
|
2 |
+
|
3 |
+
This format is a text-based format, with one sample per line. It does
|
4 |
+
not store zero valued features hence is suitable for sparse dataset.
|
5 |
+
|
6 |
+
The first element of each line can be used to store a target variable to
|
7 |
+
predict.
|
8 |
+
|
9 |
+
This format is used as the default format for both svmlight and the
|
10 |
+
libsvm command line programs.
|
11 |
+
"""
|
12 |
+
|
13 |
+
# Authors: Mathieu Blondel <[email protected]>
|
14 |
+
# Lars Buitinck
|
15 |
+
# Olivier Grisel <[email protected]>
|
16 |
+
# License: BSD 3 clause
|
17 |
+
|
18 |
+
import os.path
|
19 |
+
from contextlib import closing
|
20 |
+
from numbers import Integral
|
21 |
+
|
22 |
+
import numpy as np
|
23 |
+
import scipy.sparse as sp
|
24 |
+
|
25 |
+
from .. import __version__
|
26 |
+
from ..utils import IS_PYPY, check_array
|
27 |
+
from ..utils._param_validation import HasMethods, Interval, StrOptions, validate_params
|
28 |
+
|
29 |
+
if not IS_PYPY:
|
30 |
+
from ._svmlight_format_fast import (
|
31 |
+
_dump_svmlight_file,
|
32 |
+
_load_svmlight_file,
|
33 |
+
)
|
34 |
+
else:
|
35 |
+
|
36 |
+
def _load_svmlight_file(*args, **kwargs):
|
37 |
+
raise NotImplementedError(
|
38 |
+
"load_svmlight_file is currently not "
|
39 |
+
"compatible with PyPy (see "
|
40 |
+
"https://github.com/scikit-learn/scikit-learn/issues/11543 "
|
41 |
+
"for the status updates)."
|
42 |
+
)
|
43 |
+
|
44 |
+
|
45 |
+
@validate_params(
|
46 |
+
{
|
47 |
+
"f": [
|
48 |
+
str,
|
49 |
+
Interval(Integral, 0, None, closed="left"),
|
50 |
+
os.PathLike,
|
51 |
+
HasMethods("read"),
|
52 |
+
],
|
53 |
+
"n_features": [Interval(Integral, 1, None, closed="left"), None],
|
54 |
+
"dtype": "no_validation", # delegate validation to numpy
|
55 |
+
"multilabel": ["boolean"],
|
56 |
+
"zero_based": ["boolean", StrOptions({"auto"})],
|
57 |
+
"query_id": ["boolean"],
|
58 |
+
"offset": [Interval(Integral, 0, None, closed="left")],
|
59 |
+
"length": [Integral],
|
60 |
+
},
|
61 |
+
prefer_skip_nested_validation=True,
|
62 |
+
)
|
63 |
+
def load_svmlight_file(
|
64 |
+
f,
|
65 |
+
*,
|
66 |
+
n_features=None,
|
67 |
+
dtype=np.float64,
|
68 |
+
multilabel=False,
|
69 |
+
zero_based="auto",
|
70 |
+
query_id=False,
|
71 |
+
offset=0,
|
72 |
+
length=-1,
|
73 |
+
):
|
74 |
+
"""Load datasets in the svmlight / libsvm format into sparse CSR matrix.
|
75 |
+
|
76 |
+
This format is a text-based format, with one sample per line. It does
|
77 |
+
not store zero valued features hence is suitable for sparse dataset.
|
78 |
+
|
79 |
+
The first element of each line can be used to store a target variable
|
80 |
+
to predict.
|
81 |
+
|
82 |
+
This format is used as the default format for both svmlight and the
|
83 |
+
libsvm command line programs.
|
84 |
+
|
85 |
+
Parsing a text based source can be expensive. When repeatedly
|
86 |
+
working on the same dataset, it is recommended to wrap this
|
87 |
+
loader with joblib.Memory.cache to store a memmapped backup of the
|
88 |
+
CSR results of the first call and benefit from the near instantaneous
|
89 |
+
loading of memmapped structures for the subsequent calls.
|
90 |
+
|
91 |
+
In case the file contains a pairwise preference constraint (known
|
92 |
+
as "qid" in the svmlight format) these are ignored unless the
|
93 |
+
query_id parameter is set to True. These pairwise preference
|
94 |
+
constraints can be used to constraint the combination of samples
|
95 |
+
when using pairwise loss functions (as is the case in some
|
96 |
+
learning to rank problems) so that only pairs with the same
|
97 |
+
query_id value are considered.
|
98 |
+
|
99 |
+
This implementation is written in Cython and is reasonably fast.
|
100 |
+
However, a faster API-compatible loader is also available at:
|
101 |
+
|
102 |
+
https://github.com/mblondel/svmlight-loader
|
103 |
+
|
104 |
+
Parameters
|
105 |
+
----------
|
106 |
+
f : str, path-like, file-like or int
|
107 |
+
(Path to) a file to load. If a path ends in ".gz" or ".bz2", it will
|
108 |
+
be uncompressed on the fly. If an integer is passed, it is assumed to
|
109 |
+
be a file descriptor. A file-like or file descriptor will not be closed
|
110 |
+
by this function. A file-like object must be opened in binary mode.
|
111 |
+
|
112 |
+
.. versionchanged:: 1.2
|
113 |
+
Path-like objects are now accepted.
|
114 |
+
|
115 |
+
n_features : int, default=None
|
116 |
+
The number of features to use. If None, it will be inferred. This
|
117 |
+
argument is useful to load several files that are subsets of a
|
118 |
+
bigger sliced dataset: each subset might not have examples of
|
119 |
+
every feature, hence the inferred shape might vary from one
|
120 |
+
slice to another.
|
121 |
+
n_features is only required if ``offset`` or ``length`` are passed a
|
122 |
+
non-default value.
|
123 |
+
|
124 |
+
dtype : numpy data type, default=np.float64
|
125 |
+
Data type of dataset to be loaded. This will be the data type of the
|
126 |
+
output numpy arrays ``X`` and ``y``.
|
127 |
+
|
128 |
+
multilabel : bool, default=False
|
129 |
+
Samples may have several labels each (see
|
130 |
+
https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html).
|
131 |
+
|
132 |
+
zero_based : bool or "auto", default="auto"
|
133 |
+
Whether column indices in f are zero-based (True) or one-based
|
134 |
+
(False). If column indices are one-based, they are transformed to
|
135 |
+
zero-based to match Python/NumPy conventions.
|
136 |
+
If set to "auto", a heuristic check is applied to determine this from
|
137 |
+
the file contents. Both kinds of files occur "in the wild", but they
|
138 |
+
are unfortunately not self-identifying. Using "auto" or True should
|
139 |
+
always be safe when no ``offset`` or ``length`` is passed.
|
140 |
+
If ``offset`` or ``length`` are passed, the "auto" mode falls back
|
141 |
+
to ``zero_based=True`` to avoid having the heuristic check yield
|
142 |
+
inconsistent results on different segments of the file.
|
143 |
+
|
144 |
+
query_id : bool, default=False
|
145 |
+
If True, will return the query_id array for each file.
|
146 |
+
|
147 |
+
offset : int, default=0
|
148 |
+
Ignore the offset first bytes by seeking forward, then
|
149 |
+
discarding the following bytes up until the next new line
|
150 |
+
character.
|
151 |
+
|
152 |
+
length : int, default=-1
|
153 |
+
If strictly positive, stop reading any new line of data once the
|
154 |
+
position in the file has reached the (offset + length) bytes threshold.
|
155 |
+
|
156 |
+
Returns
|
157 |
+
-------
|
158 |
+
X : scipy.sparse matrix of shape (n_samples, n_features)
|
159 |
+
The data matrix.
|
160 |
+
|
161 |
+
y : ndarray of shape (n_samples,), or a list of tuples of length n_samples
|
162 |
+
The target. It is a list of tuples when ``multilabel=True``, else a
|
163 |
+
ndarray.
|
164 |
+
|
165 |
+
query_id : array of shape (n_samples,)
|
166 |
+
The query_id for each sample. Only returned when query_id is set to
|
167 |
+
True.
|
168 |
+
|
169 |
+
See Also
|
170 |
+
--------
|
171 |
+
load_svmlight_files : Similar function for loading multiple files in this
|
172 |
+
format, enforcing the same number of features/columns on all of them.
|
173 |
+
|
174 |
+
Examples
|
175 |
+
--------
|
176 |
+
To use joblib.Memory to cache the svmlight file::
|
177 |
+
|
178 |
+
from joblib import Memory
|
179 |
+
from .datasets import load_svmlight_file
|
180 |
+
mem = Memory("./mycache")
|
181 |
+
|
182 |
+
@mem.cache
|
183 |
+
def get_data():
|
184 |
+
data = load_svmlight_file("mysvmlightfile")
|
185 |
+
return data[0], data[1]
|
186 |
+
|
187 |
+
X, y = get_data()
|
188 |
+
"""
|
189 |
+
return tuple(
|
190 |
+
load_svmlight_files(
|
191 |
+
[f],
|
192 |
+
n_features=n_features,
|
193 |
+
dtype=dtype,
|
194 |
+
multilabel=multilabel,
|
195 |
+
zero_based=zero_based,
|
196 |
+
query_id=query_id,
|
197 |
+
offset=offset,
|
198 |
+
length=length,
|
199 |
+
)
|
200 |
+
)
|
201 |
+
|
202 |
+
|
203 |
+
def _gen_open(f):
|
204 |
+
if isinstance(f, int): # file descriptor
|
205 |
+
return open(f, "rb", closefd=False)
|
206 |
+
elif isinstance(f, os.PathLike):
|
207 |
+
f = os.fspath(f)
|
208 |
+
elif not isinstance(f, str):
|
209 |
+
raise TypeError("expected {str, int, path-like, file-like}, got %s" % type(f))
|
210 |
+
|
211 |
+
_, ext = os.path.splitext(f)
|
212 |
+
if ext == ".gz":
|
213 |
+
import gzip
|
214 |
+
|
215 |
+
return gzip.open(f, "rb")
|
216 |
+
elif ext == ".bz2":
|
217 |
+
from bz2 import BZ2File
|
218 |
+
|
219 |
+
return BZ2File(f, "rb")
|
220 |
+
else:
|
221 |
+
return open(f, "rb")
|
222 |
+
|
223 |
+
|
224 |
+
def _open_and_load(f, dtype, multilabel, zero_based, query_id, offset=0, length=-1):
|
225 |
+
if hasattr(f, "read"):
|
226 |
+
actual_dtype, data, ind, indptr, labels, query = _load_svmlight_file(
|
227 |
+
f, dtype, multilabel, zero_based, query_id, offset, length
|
228 |
+
)
|
229 |
+
else:
|
230 |
+
with closing(_gen_open(f)) as f:
|
231 |
+
actual_dtype, data, ind, indptr, labels, query = _load_svmlight_file(
|
232 |
+
f, dtype, multilabel, zero_based, query_id, offset, length
|
233 |
+
)
|
234 |
+
|
235 |
+
# convert from array.array, give data the right dtype
|
236 |
+
if not multilabel:
|
237 |
+
labels = np.frombuffer(labels, np.float64)
|
238 |
+
data = np.frombuffer(data, actual_dtype)
|
239 |
+
indices = np.frombuffer(ind, np.longlong)
|
240 |
+
indptr = np.frombuffer(indptr, dtype=np.longlong) # never empty
|
241 |
+
query = np.frombuffer(query, np.int64)
|
242 |
+
|
243 |
+
data = np.asarray(data, dtype=dtype) # no-op for float{32,64}
|
244 |
+
return data, indices, indptr, labels, query
|
245 |
+
|
246 |
+
|
247 |
+
@validate_params(
|
248 |
+
{
|
249 |
+
"files": [
|
250 |
+
"array-like",
|
251 |
+
str,
|
252 |
+
os.PathLike,
|
253 |
+
HasMethods("read"),
|
254 |
+
Interval(Integral, 0, None, closed="left"),
|
255 |
+
],
|
256 |
+
"n_features": [Interval(Integral, 1, None, closed="left"), None],
|
257 |
+
"dtype": "no_validation", # delegate validation to numpy
|
258 |
+
"multilabel": ["boolean"],
|
259 |
+
"zero_based": ["boolean", StrOptions({"auto"})],
|
260 |
+
"query_id": ["boolean"],
|
261 |
+
"offset": [Interval(Integral, 0, None, closed="left")],
|
262 |
+
"length": [Integral],
|
263 |
+
},
|
264 |
+
prefer_skip_nested_validation=True,
|
265 |
+
)
|
266 |
+
def load_svmlight_files(
|
267 |
+
files,
|
268 |
+
*,
|
269 |
+
n_features=None,
|
270 |
+
dtype=np.float64,
|
271 |
+
multilabel=False,
|
272 |
+
zero_based="auto",
|
273 |
+
query_id=False,
|
274 |
+
offset=0,
|
275 |
+
length=-1,
|
276 |
+
):
|
277 |
+
"""Load dataset from multiple files in SVMlight format.
|
278 |
+
|
279 |
+
This function is equivalent to mapping load_svmlight_file over a list of
|
280 |
+
files, except that the results are concatenated into a single, flat list
|
281 |
+
and the samples vectors are constrained to all have the same number of
|
282 |
+
features.
|
283 |
+
|
284 |
+
In case the file contains a pairwise preference constraint (known
|
285 |
+
as "qid" in the svmlight format) these are ignored unless the
|
286 |
+
query_id parameter is set to True. These pairwise preference
|
287 |
+
constraints can be used to constraint the combination of samples
|
288 |
+
when using pairwise loss functions (as is the case in some
|
289 |
+
learning to rank problems) so that only pairs with the same
|
290 |
+
query_id value are considered.
|
291 |
+
|
292 |
+
Parameters
|
293 |
+
----------
|
294 |
+
files : array-like, dtype=str, path-like, file-like or int
|
295 |
+
(Paths of) files to load. If a path ends in ".gz" or ".bz2", it will
|
296 |
+
be uncompressed on the fly. If an integer is passed, it is assumed to
|
297 |
+
be a file descriptor. File-likes and file descriptors will not be
|
298 |
+
closed by this function. File-like objects must be opened in binary
|
299 |
+
mode.
|
300 |
+
|
301 |
+
.. versionchanged:: 1.2
|
302 |
+
Path-like objects are now accepted.
|
303 |
+
|
304 |
+
n_features : int, default=None
|
305 |
+
The number of features to use. If None, it will be inferred from the
|
306 |
+
maximum column index occurring in any of the files.
|
307 |
+
|
308 |
+
This can be set to a higher value than the actual number of features
|
309 |
+
in any of the input files, but setting it to a lower value will cause
|
310 |
+
an exception to be raised.
|
311 |
+
|
312 |
+
dtype : numpy data type, default=np.float64
|
313 |
+
Data type of dataset to be loaded. This will be the data type of the
|
314 |
+
output numpy arrays ``X`` and ``y``.
|
315 |
+
|
316 |
+
multilabel : bool, default=False
|
317 |
+
Samples may have several labels each (see
|
318 |
+
https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html).
|
319 |
+
|
320 |
+
zero_based : bool or "auto", default="auto"
|
321 |
+
Whether column indices in f are zero-based (True) or one-based
|
322 |
+
(False). If column indices are one-based, they are transformed to
|
323 |
+
zero-based to match Python/NumPy conventions.
|
324 |
+
If set to "auto", a heuristic check is applied to determine this from
|
325 |
+
the file contents. Both kinds of files occur "in the wild", but they
|
326 |
+
are unfortunately not self-identifying. Using "auto" or True should
|
327 |
+
always be safe when no offset or length is passed.
|
328 |
+
If offset or length are passed, the "auto" mode falls back
|
329 |
+
to zero_based=True to avoid having the heuristic check yield
|
330 |
+
inconsistent results on different segments of the file.
|
331 |
+
|
332 |
+
query_id : bool, default=False
|
333 |
+
If True, will return the query_id array for each file.
|
334 |
+
|
335 |
+
offset : int, default=0
|
336 |
+
Ignore the offset first bytes by seeking forward, then
|
337 |
+
discarding the following bytes up until the next new line
|
338 |
+
character.
|
339 |
+
|
340 |
+
length : int, default=-1
|
341 |
+
If strictly positive, stop reading any new line of data once the
|
342 |
+
position in the file has reached the (offset + length) bytes threshold.
|
343 |
+
|
344 |
+
Returns
|
345 |
+
-------
|
346 |
+
[X1, y1, ..., Xn, yn] or [X1, y1, q1, ..., Xn, yn, qn]: list of arrays
|
347 |
+
Each (Xi, yi) pair is the result from load_svmlight_file(files[i]).
|
348 |
+
If query_id is set to True, this will return instead (Xi, yi, qi)
|
349 |
+
triplets.
|
350 |
+
|
351 |
+
See Also
|
352 |
+
--------
|
353 |
+
load_svmlight_file: Similar function for loading a single file in this
|
354 |
+
format.
|
355 |
+
|
356 |
+
Notes
|
357 |
+
-----
|
358 |
+
When fitting a model to a matrix X_train and evaluating it against a
|
359 |
+
matrix X_test, it is essential that X_train and X_test have the same
|
360 |
+
number of features (X_train.shape[1] == X_test.shape[1]). This may not
|
361 |
+
be the case if you load the files individually with load_svmlight_file.
|
362 |
+
"""
|
363 |
+
if (offset != 0 or length > 0) and zero_based == "auto":
|
364 |
+
# disable heuristic search to avoid getting inconsistent results on
|
365 |
+
# different segments of the file
|
366 |
+
zero_based = True
|
367 |
+
|
368 |
+
if (offset != 0 or length > 0) and n_features is None:
|
369 |
+
raise ValueError("n_features is required when offset or length is specified.")
|
370 |
+
|
371 |
+
r = [
|
372 |
+
_open_and_load(
|
373 |
+
f,
|
374 |
+
dtype,
|
375 |
+
multilabel,
|
376 |
+
bool(zero_based),
|
377 |
+
bool(query_id),
|
378 |
+
offset=offset,
|
379 |
+
length=length,
|
380 |
+
)
|
381 |
+
for f in files
|
382 |
+
]
|
383 |
+
|
384 |
+
if (
|
385 |
+
zero_based is False
|
386 |
+
or zero_based == "auto"
|
387 |
+
and all(len(tmp[1]) and np.min(tmp[1]) > 0 for tmp in r)
|
388 |
+
):
|
389 |
+
for _, indices, _, _, _ in r:
|
390 |
+
indices -= 1
|
391 |
+
|
392 |
+
n_f = max(ind[1].max() if len(ind[1]) else 0 for ind in r) + 1
|
393 |
+
|
394 |
+
if n_features is None:
|
395 |
+
n_features = n_f
|
396 |
+
elif n_features < n_f:
|
397 |
+
raise ValueError(
|
398 |
+
"n_features was set to {}, but input file contains {} features".format(
|
399 |
+
n_features, n_f
|
400 |
+
)
|
401 |
+
)
|
402 |
+
|
403 |
+
result = []
|
404 |
+
for data, indices, indptr, y, query_values in r:
|
405 |
+
shape = (indptr.shape[0] - 1, n_features)
|
406 |
+
X = sp.csr_matrix((data, indices, indptr), shape)
|
407 |
+
X.sort_indices()
|
408 |
+
result += X, y
|
409 |
+
if query_id:
|
410 |
+
result.append(query_values)
|
411 |
+
|
412 |
+
return result
|
413 |
+
|
414 |
+
|
415 |
+
def _dump_svmlight(X, y, f, multilabel, one_based, comment, query_id):
|
416 |
+
if comment:
|
417 |
+
f.write(
|
418 |
+
(
|
419 |
+
"# Generated by dump_svmlight_file from scikit-learn %s\n" % __version__
|
420 |
+
).encode()
|
421 |
+
)
|
422 |
+
f.write(
|
423 |
+
("# Column indices are %s-based\n" % ["zero", "one"][one_based]).encode()
|
424 |
+
)
|
425 |
+
|
426 |
+
f.write(b"#\n")
|
427 |
+
f.writelines(b"# %s\n" % line for line in comment.splitlines())
|
428 |
+
X_is_sp = sp.issparse(X)
|
429 |
+
y_is_sp = sp.issparse(y)
|
430 |
+
if not multilabel and not y_is_sp:
|
431 |
+
y = y[:, np.newaxis]
|
432 |
+
_dump_svmlight_file(
|
433 |
+
X,
|
434 |
+
y,
|
435 |
+
f,
|
436 |
+
multilabel,
|
437 |
+
one_based,
|
438 |
+
query_id,
|
439 |
+
X_is_sp,
|
440 |
+
y_is_sp,
|
441 |
+
)
|
442 |
+
|
443 |
+
|
444 |
+
@validate_params(
|
445 |
+
{
|
446 |
+
"X": ["array-like", "sparse matrix"],
|
447 |
+
"y": ["array-like", "sparse matrix"],
|
448 |
+
"f": [str, HasMethods(["write"])],
|
449 |
+
"zero_based": ["boolean"],
|
450 |
+
"comment": [str, bytes, None],
|
451 |
+
"query_id": ["array-like", None],
|
452 |
+
"multilabel": ["boolean"],
|
453 |
+
},
|
454 |
+
prefer_skip_nested_validation=True,
|
455 |
+
)
|
456 |
+
def dump_svmlight_file(
|
457 |
+
X,
|
458 |
+
y,
|
459 |
+
f,
|
460 |
+
*,
|
461 |
+
zero_based=True,
|
462 |
+
comment=None,
|
463 |
+
query_id=None,
|
464 |
+
multilabel=False,
|
465 |
+
):
|
466 |
+
"""Dump the dataset in svmlight / libsvm file format.
|
467 |
+
|
468 |
+
This format is a text-based format, with one sample per line. It does
|
469 |
+
not store zero valued features hence is suitable for sparse dataset.
|
470 |
+
|
471 |
+
The first element of each line can be used to store a target variable
|
472 |
+
to predict.
|
473 |
+
|
474 |
+
Parameters
|
475 |
+
----------
|
476 |
+
X : {array-like, sparse matrix} of shape (n_samples, n_features)
|
477 |
+
Training vectors, where `n_samples` is the number of samples and
|
478 |
+
`n_features` is the number of features.
|
479 |
+
|
480 |
+
y : {array-like, sparse matrix}, shape = (n_samples,) or (n_samples, n_labels)
|
481 |
+
Target values. Class labels must be an
|
482 |
+
integer or float, or array-like objects of integer or float for
|
483 |
+
multilabel classifications.
|
484 |
+
|
485 |
+
f : str or file-like in binary mode
|
486 |
+
If string, specifies the path that will contain the data.
|
487 |
+
If file-like, data will be written to f. f should be opened in binary
|
488 |
+
mode.
|
489 |
+
|
490 |
+
zero_based : bool, default=True
|
491 |
+
Whether column indices should be written zero-based (True) or one-based
|
492 |
+
(False).
|
493 |
+
|
494 |
+
comment : str or bytes, default=None
|
495 |
+
Comment to insert at the top of the file. This should be either a
|
496 |
+
Unicode string, which will be encoded as UTF-8, or an ASCII byte
|
497 |
+
string.
|
498 |
+
If a comment is given, then it will be preceded by one that identifies
|
499 |
+
the file as having been dumped by scikit-learn. Note that not all
|
500 |
+
tools grok comments in SVMlight files.
|
501 |
+
|
502 |
+
query_id : array-like of shape (n_samples,), default=None
|
503 |
+
Array containing pairwise preference constraints (qid in svmlight
|
504 |
+
format).
|
505 |
+
|
506 |
+
multilabel : bool, default=False
|
507 |
+
Samples may have several labels each (see
|
508 |
+
https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html).
|
509 |
+
|
510 |
+
.. versionadded:: 0.17
|
511 |
+
parameter `multilabel` to support multilabel datasets.
|
512 |
+
|
513 |
+
Examples
|
514 |
+
--------
|
515 |
+
>>> from sklearn.datasets import dump_svmlight_file, make_classification
|
516 |
+
>>> X, y = make_classification(random_state=0)
|
517 |
+
>>> output_file = "my_dataset.svmlight"
|
518 |
+
>>> dump_svmlight_file(X, y, output_file) # doctest: +SKIP
|
519 |
+
"""
|
520 |
+
if comment is not None:
|
521 |
+
# Convert comment string to list of lines in UTF-8.
|
522 |
+
# If a byte string is passed, then check whether it's ASCII;
|
523 |
+
# if a user wants to get fancy, they'll have to decode themselves.
|
524 |
+
if isinstance(comment, bytes):
|
525 |
+
comment.decode("ascii") # just for the exception
|
526 |
+
else:
|
527 |
+
comment = comment.encode("utf-8")
|
528 |
+
if b"\0" in comment:
|
529 |
+
raise ValueError("comment string contains NUL byte")
|
530 |
+
|
531 |
+
yval = check_array(y, accept_sparse="csr", ensure_2d=False)
|
532 |
+
if sp.issparse(yval):
|
533 |
+
if yval.shape[1] != 1 and not multilabel:
|
534 |
+
raise ValueError(
|
535 |
+
"expected y of shape (n_samples, 1), got %r" % (yval.shape,)
|
536 |
+
)
|
537 |
+
else:
|
538 |
+
if yval.ndim != 1 and not multilabel:
|
539 |
+
raise ValueError("expected y of shape (n_samples,), got %r" % (yval.shape,))
|
540 |
+
|
541 |
+
Xval = check_array(X, accept_sparse="csr")
|
542 |
+
if Xval.shape[0] != yval.shape[0]:
|
543 |
+
raise ValueError(
|
544 |
+
"X.shape[0] and y.shape[0] should be the same, got %r and %r instead."
|
545 |
+
% (Xval.shape[0], yval.shape[0])
|
546 |
+
)
|
547 |
+
|
548 |
+
# We had some issues with CSR matrices with unsorted indices (e.g. #1501),
|
549 |
+
# so sort them here, but first make sure we don't modify the user's X.
|
550 |
+
# TODO We can do this cheaper; sorted_indices copies the whole matrix.
|
551 |
+
if yval is y and hasattr(yval, "sorted_indices"):
|
552 |
+
y = yval.sorted_indices()
|
553 |
+
else:
|
554 |
+
y = yval
|
555 |
+
if hasattr(y, "sort_indices"):
|
556 |
+
y.sort_indices()
|
557 |
+
|
558 |
+
if Xval is X and hasattr(Xval, "sorted_indices"):
|
559 |
+
X = Xval.sorted_indices()
|
560 |
+
else:
|
561 |
+
X = Xval
|
562 |
+
if hasattr(X, "sort_indices"):
|
563 |
+
X.sort_indices()
|
564 |
+
|
565 |
+
if query_id is None:
|
566 |
+
# NOTE: query_id is passed to Cython functions using a fused type on query_id.
|
567 |
+
# Yet as of Cython>=3.0, memory views can't be None otherwise the runtime
|
568 |
+
# would not known which concrete implementation to dispatch the Python call to.
|
569 |
+
# TODO: simplify interfaces and implementations in _svmlight_format_fast.pyx.
|
570 |
+
query_id = np.array([], dtype=np.int32)
|
571 |
+
else:
|
572 |
+
query_id = np.asarray(query_id)
|
573 |
+
if query_id.shape[0] != y.shape[0]:
|
574 |
+
raise ValueError(
|
575 |
+
"expected query_id of shape (n_samples,), got %r" % (query_id.shape,)
|
576 |
+
)
|
577 |
+
|
578 |
+
one_based = not zero_based
|
579 |
+
|
580 |
+
if hasattr(f, "write"):
|
581 |
+
_dump_svmlight(X, y, f, multilabel, one_based, comment, query_id)
|
582 |
+
else:
|
583 |
+
with open(f, "wb") as f:
|
584 |
+
_dump_svmlight(X, y, f, multilabel, one_based, comment, query_id)
|
env-llmeval/lib/python3.10/site-packages/sklearn/datasets/_twenty_newsgroups.py
ADDED
@@ -0,0 +1,561 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Caching loader for the 20 newsgroups text classification dataset.
|
2 |
+
|
3 |
+
|
4 |
+
The description of the dataset is available on the official website at:
|
5 |
+
|
6 |
+
http://people.csail.mit.edu/jrennie/20Newsgroups/
|
7 |
+
|
8 |
+
Quoting the introduction:
|
9 |
+
|
10 |
+
The 20 Newsgroups data set is a collection of approximately 20,000
|
11 |
+
newsgroup documents, partitioned (nearly) evenly across 20 different
|
12 |
+
newsgroups. To the best of my knowledge, it was originally collected
|
13 |
+
by Ken Lang, probably for his Newsweeder: Learning to filter netnews
|
14 |
+
paper, though he does not explicitly mention this collection. The 20
|
15 |
+
newsgroups collection has become a popular data set for experiments
|
16 |
+
in text applications of machine learning techniques, such as text
|
17 |
+
classification and text clustering.
|
18 |
+
|
19 |
+
This dataset loader will download the recommended "by date" variant of the
|
20 |
+
dataset and which features a point in time split between the train and
|
21 |
+
test sets. The compressed dataset size is around 14 Mb compressed. Once
|
22 |
+
uncompressed the train set is 52 MB and the test set is 34 MB.
|
23 |
+
"""
|
24 |
+
# Copyright (c) 2011 Olivier Grisel <[email protected]>
|
25 |
+
# License: BSD 3 clause
|
26 |
+
|
27 |
+
import codecs
|
28 |
+
import logging
|
29 |
+
import os
|
30 |
+
import pickle
|
31 |
+
import re
|
32 |
+
import shutil
|
33 |
+
import tarfile
|
34 |
+
from contextlib import suppress
|
35 |
+
|
36 |
+
import joblib
|
37 |
+
import numpy as np
|
38 |
+
import scipy.sparse as sp
|
39 |
+
|
40 |
+
from .. import preprocessing
|
41 |
+
from ..feature_extraction.text import CountVectorizer
|
42 |
+
from ..utils import Bunch, check_random_state
|
43 |
+
from ..utils._param_validation import StrOptions, validate_params
|
44 |
+
from . import get_data_home, load_files
|
45 |
+
from ._base import (
|
46 |
+
RemoteFileMetadata,
|
47 |
+
_convert_data_dataframe,
|
48 |
+
_fetch_remote,
|
49 |
+
_pkl_filepath,
|
50 |
+
load_descr,
|
51 |
+
)
|
52 |
+
|
53 |
+
logger = logging.getLogger(__name__)
|
54 |
+
|
55 |
+
# The original data can be found at:
|
56 |
+
# https://people.csail.mit.edu/jrennie/20Newsgroups/20news-bydate.tar.gz
|
57 |
+
ARCHIVE = RemoteFileMetadata(
|
58 |
+
filename="20news-bydate.tar.gz",
|
59 |
+
url="https://ndownloader.figshare.com/files/5975967",
|
60 |
+
checksum="8f1b2514ca22a5ade8fbb9cfa5727df95fa587f4c87b786e15c759fa66d95610",
|
61 |
+
)
|
62 |
+
|
63 |
+
CACHE_NAME = "20news-bydate.pkz"
|
64 |
+
TRAIN_FOLDER = "20news-bydate-train"
|
65 |
+
TEST_FOLDER = "20news-bydate-test"
|
66 |
+
|
67 |
+
|
68 |
+
def _download_20newsgroups(target_dir, cache_path):
|
69 |
+
"""Download the 20 newsgroups data and stored it as a zipped pickle."""
|
70 |
+
train_path = os.path.join(target_dir, TRAIN_FOLDER)
|
71 |
+
test_path = os.path.join(target_dir, TEST_FOLDER)
|
72 |
+
|
73 |
+
os.makedirs(target_dir, exist_ok=True)
|
74 |
+
|
75 |
+
logger.info("Downloading dataset from %s (14 MB)", ARCHIVE.url)
|
76 |
+
archive_path = _fetch_remote(ARCHIVE, dirname=target_dir)
|
77 |
+
|
78 |
+
logger.debug("Decompressing %s", archive_path)
|
79 |
+
tarfile.open(archive_path, "r:gz").extractall(path=target_dir)
|
80 |
+
|
81 |
+
with suppress(FileNotFoundError):
|
82 |
+
os.remove(archive_path)
|
83 |
+
|
84 |
+
# Store a zipped pickle
|
85 |
+
cache = dict(
|
86 |
+
train=load_files(train_path, encoding="latin1"),
|
87 |
+
test=load_files(test_path, encoding="latin1"),
|
88 |
+
)
|
89 |
+
compressed_content = codecs.encode(pickle.dumps(cache), "zlib_codec")
|
90 |
+
with open(cache_path, "wb") as f:
|
91 |
+
f.write(compressed_content)
|
92 |
+
|
93 |
+
shutil.rmtree(target_dir)
|
94 |
+
return cache
|
95 |
+
|
96 |
+
|
97 |
+
def strip_newsgroup_header(text):
|
98 |
+
"""
|
99 |
+
Given text in "news" format, strip the headers, by removing everything
|
100 |
+
before the first blank line.
|
101 |
+
|
102 |
+
Parameters
|
103 |
+
----------
|
104 |
+
text : str
|
105 |
+
The text from which to remove the signature block.
|
106 |
+
"""
|
107 |
+
_before, _blankline, after = text.partition("\n\n")
|
108 |
+
return after
|
109 |
+
|
110 |
+
|
111 |
+
_QUOTE_RE = re.compile(
|
112 |
+
r"(writes in|writes:|wrote:|says:|said:" r"|^In article|^Quoted from|^\||^>)"
|
113 |
+
)
|
114 |
+
|
115 |
+
|
116 |
+
def strip_newsgroup_quoting(text):
|
117 |
+
"""
|
118 |
+
Given text in "news" format, strip lines beginning with the quote
|
119 |
+
characters > or |, plus lines that often introduce a quoted section
|
120 |
+
(for example, because they contain the string 'writes:'.)
|
121 |
+
|
122 |
+
Parameters
|
123 |
+
----------
|
124 |
+
text : str
|
125 |
+
The text from which to remove the signature block.
|
126 |
+
"""
|
127 |
+
good_lines = [line for line in text.split("\n") if not _QUOTE_RE.search(line)]
|
128 |
+
return "\n".join(good_lines)
|
129 |
+
|
130 |
+
|
131 |
+
def strip_newsgroup_footer(text):
|
132 |
+
"""
|
133 |
+
Given text in "news" format, attempt to remove a signature block.
|
134 |
+
|
135 |
+
As a rough heuristic, we assume that signatures are set apart by either
|
136 |
+
a blank line or a line made of hyphens, and that it is the last such line
|
137 |
+
in the file (disregarding blank lines at the end).
|
138 |
+
|
139 |
+
Parameters
|
140 |
+
----------
|
141 |
+
text : str
|
142 |
+
The text from which to remove the signature block.
|
143 |
+
"""
|
144 |
+
lines = text.strip().split("\n")
|
145 |
+
for line_num in range(len(lines) - 1, -1, -1):
|
146 |
+
line = lines[line_num]
|
147 |
+
if line.strip().strip("-") == "":
|
148 |
+
break
|
149 |
+
|
150 |
+
if line_num > 0:
|
151 |
+
return "\n".join(lines[:line_num])
|
152 |
+
else:
|
153 |
+
return text
|
154 |
+
|
155 |
+
|
156 |
+
@validate_params(
|
157 |
+
{
|
158 |
+
"data_home": [str, os.PathLike, None],
|
159 |
+
"subset": [StrOptions({"train", "test", "all"})],
|
160 |
+
"categories": ["array-like", None],
|
161 |
+
"shuffle": ["boolean"],
|
162 |
+
"random_state": ["random_state"],
|
163 |
+
"remove": [tuple],
|
164 |
+
"download_if_missing": ["boolean"],
|
165 |
+
"return_X_y": ["boolean"],
|
166 |
+
},
|
167 |
+
prefer_skip_nested_validation=True,
|
168 |
+
)
|
169 |
+
def fetch_20newsgroups(
|
170 |
+
*,
|
171 |
+
data_home=None,
|
172 |
+
subset="train",
|
173 |
+
categories=None,
|
174 |
+
shuffle=True,
|
175 |
+
random_state=42,
|
176 |
+
remove=(),
|
177 |
+
download_if_missing=True,
|
178 |
+
return_X_y=False,
|
179 |
+
):
|
180 |
+
"""Load the filenames and data from the 20 newsgroups dataset \
|
181 |
+
(classification).
|
182 |
+
|
183 |
+
Download it if necessary.
|
184 |
+
|
185 |
+
================= ==========
|
186 |
+
Classes 20
|
187 |
+
Samples total 18846
|
188 |
+
Dimensionality 1
|
189 |
+
Features text
|
190 |
+
================= ==========
|
191 |
+
|
192 |
+
Read more in the :ref:`User Guide <20newsgroups_dataset>`.
|
193 |
+
|
194 |
+
Parameters
|
195 |
+
----------
|
196 |
+
data_home : str or path-like, default=None
|
197 |
+
Specify a download and cache folder for the datasets. If None,
|
198 |
+
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
|
199 |
+
|
200 |
+
subset : {'train', 'test', 'all'}, default='train'
|
201 |
+
Select the dataset to load: 'train' for the training set, 'test'
|
202 |
+
for the test set, 'all' for both, with shuffled ordering.
|
203 |
+
|
204 |
+
categories : array-like, dtype=str, default=None
|
205 |
+
If None (default), load all the categories.
|
206 |
+
If not None, list of category names to load (other categories
|
207 |
+
ignored).
|
208 |
+
|
209 |
+
shuffle : bool, default=True
|
210 |
+
Whether or not to shuffle the data: might be important for models that
|
211 |
+
make the assumption that the samples are independent and identically
|
212 |
+
distributed (i.i.d.), such as stochastic gradient descent.
|
213 |
+
|
214 |
+
random_state : int, RandomState instance or None, default=42
|
215 |
+
Determines random number generation for dataset shuffling. Pass an int
|
216 |
+
for reproducible output across multiple function calls.
|
217 |
+
See :term:`Glossary <random_state>`.
|
218 |
+
|
219 |
+
remove : tuple, default=()
|
220 |
+
May contain any subset of ('headers', 'footers', 'quotes'). Each of
|
221 |
+
these are kinds of text that will be detected and removed from the
|
222 |
+
newsgroup posts, preventing classifiers from overfitting on
|
223 |
+
metadata.
|
224 |
+
|
225 |
+
'headers' removes newsgroup headers, 'footers' removes blocks at the
|
226 |
+
ends of posts that look like signatures, and 'quotes' removes lines
|
227 |
+
that appear to be quoting another post.
|
228 |
+
|
229 |
+
'headers' follows an exact standard; the other filters are not always
|
230 |
+
correct.
|
231 |
+
|
232 |
+
download_if_missing : bool, default=True
|
233 |
+
If False, raise an OSError if the data is not locally available
|
234 |
+
instead of trying to download the data from the source site.
|
235 |
+
|
236 |
+
return_X_y : bool, default=False
|
237 |
+
If True, returns `(data.data, data.target)` instead of a Bunch
|
238 |
+
object.
|
239 |
+
|
240 |
+
.. versionadded:: 0.22
|
241 |
+
|
242 |
+
Returns
|
243 |
+
-------
|
244 |
+
bunch : :class:`~sklearn.utils.Bunch`
|
245 |
+
Dictionary-like object, with the following attributes.
|
246 |
+
|
247 |
+
data : list of shape (n_samples,)
|
248 |
+
The data list to learn.
|
249 |
+
target: ndarray of shape (n_samples,)
|
250 |
+
The target labels.
|
251 |
+
filenames: list of shape (n_samples,)
|
252 |
+
The path to the location of the data.
|
253 |
+
DESCR: str
|
254 |
+
The full description of the dataset.
|
255 |
+
target_names: list of shape (n_classes,)
|
256 |
+
The names of target classes.
|
257 |
+
|
258 |
+
(data, target) : tuple if `return_X_y=True`
|
259 |
+
A tuple of two ndarrays. The first contains a 2D array of shape
|
260 |
+
(n_samples, n_classes) with each row representing one sample and each
|
261 |
+
column representing the features. The second array of shape
|
262 |
+
(n_samples,) contains the target samples.
|
263 |
+
|
264 |
+
.. versionadded:: 0.22
|
265 |
+
"""
|
266 |
+
|
267 |
+
data_home = get_data_home(data_home=data_home)
|
268 |
+
cache_path = _pkl_filepath(data_home, CACHE_NAME)
|
269 |
+
twenty_home = os.path.join(data_home, "20news_home")
|
270 |
+
cache = None
|
271 |
+
if os.path.exists(cache_path):
|
272 |
+
try:
|
273 |
+
with open(cache_path, "rb") as f:
|
274 |
+
compressed_content = f.read()
|
275 |
+
uncompressed_content = codecs.decode(compressed_content, "zlib_codec")
|
276 |
+
cache = pickle.loads(uncompressed_content)
|
277 |
+
except Exception as e:
|
278 |
+
print(80 * "_")
|
279 |
+
print("Cache loading failed")
|
280 |
+
print(80 * "_")
|
281 |
+
print(e)
|
282 |
+
|
283 |
+
if cache is None:
|
284 |
+
if download_if_missing:
|
285 |
+
logger.info("Downloading 20news dataset. This may take a few minutes.")
|
286 |
+
cache = _download_20newsgroups(
|
287 |
+
target_dir=twenty_home, cache_path=cache_path
|
288 |
+
)
|
289 |
+
else:
|
290 |
+
raise OSError("20Newsgroups dataset not found")
|
291 |
+
|
292 |
+
if subset in ("train", "test"):
|
293 |
+
data = cache[subset]
|
294 |
+
elif subset == "all":
|
295 |
+
data_lst = list()
|
296 |
+
target = list()
|
297 |
+
filenames = list()
|
298 |
+
for subset in ("train", "test"):
|
299 |
+
data = cache[subset]
|
300 |
+
data_lst.extend(data.data)
|
301 |
+
target.extend(data.target)
|
302 |
+
filenames.extend(data.filenames)
|
303 |
+
|
304 |
+
data.data = data_lst
|
305 |
+
data.target = np.array(target)
|
306 |
+
data.filenames = np.array(filenames)
|
307 |
+
|
308 |
+
fdescr = load_descr("twenty_newsgroups.rst")
|
309 |
+
|
310 |
+
data.DESCR = fdescr
|
311 |
+
|
312 |
+
if "headers" in remove:
|
313 |
+
data.data = [strip_newsgroup_header(text) for text in data.data]
|
314 |
+
if "footers" in remove:
|
315 |
+
data.data = [strip_newsgroup_footer(text) for text in data.data]
|
316 |
+
if "quotes" in remove:
|
317 |
+
data.data = [strip_newsgroup_quoting(text) for text in data.data]
|
318 |
+
|
319 |
+
if categories is not None:
|
320 |
+
labels = [(data.target_names.index(cat), cat) for cat in categories]
|
321 |
+
# Sort the categories to have the ordering of the labels
|
322 |
+
labels.sort()
|
323 |
+
labels, categories = zip(*labels)
|
324 |
+
mask = np.isin(data.target, labels)
|
325 |
+
data.filenames = data.filenames[mask]
|
326 |
+
data.target = data.target[mask]
|
327 |
+
# searchsorted to have continuous labels
|
328 |
+
data.target = np.searchsorted(labels, data.target)
|
329 |
+
data.target_names = list(categories)
|
330 |
+
# Use an object array to shuffle: avoids memory copy
|
331 |
+
data_lst = np.array(data.data, dtype=object)
|
332 |
+
data_lst = data_lst[mask]
|
333 |
+
data.data = data_lst.tolist()
|
334 |
+
|
335 |
+
if shuffle:
|
336 |
+
random_state = check_random_state(random_state)
|
337 |
+
indices = np.arange(data.target.shape[0])
|
338 |
+
random_state.shuffle(indices)
|
339 |
+
data.filenames = data.filenames[indices]
|
340 |
+
data.target = data.target[indices]
|
341 |
+
# Use an object array to shuffle: avoids memory copy
|
342 |
+
data_lst = np.array(data.data, dtype=object)
|
343 |
+
data_lst = data_lst[indices]
|
344 |
+
data.data = data_lst.tolist()
|
345 |
+
|
346 |
+
if return_X_y:
|
347 |
+
return data.data, data.target
|
348 |
+
|
349 |
+
return data
|
350 |
+
|
351 |
+
|
352 |
+
@validate_params(
|
353 |
+
{
|
354 |
+
"subset": [StrOptions({"train", "test", "all"})],
|
355 |
+
"remove": [tuple],
|
356 |
+
"data_home": [str, os.PathLike, None],
|
357 |
+
"download_if_missing": ["boolean"],
|
358 |
+
"return_X_y": ["boolean"],
|
359 |
+
"normalize": ["boolean"],
|
360 |
+
"as_frame": ["boolean"],
|
361 |
+
},
|
362 |
+
prefer_skip_nested_validation=True,
|
363 |
+
)
|
364 |
+
def fetch_20newsgroups_vectorized(
|
365 |
+
*,
|
366 |
+
subset="train",
|
367 |
+
remove=(),
|
368 |
+
data_home=None,
|
369 |
+
download_if_missing=True,
|
370 |
+
return_X_y=False,
|
371 |
+
normalize=True,
|
372 |
+
as_frame=False,
|
373 |
+
):
|
374 |
+
"""Load and vectorize the 20 newsgroups dataset (classification).
|
375 |
+
|
376 |
+
Download it if necessary.
|
377 |
+
|
378 |
+
This is a convenience function; the transformation is done using the
|
379 |
+
default settings for
|
380 |
+
:class:`~sklearn.feature_extraction.text.CountVectorizer`. For more
|
381 |
+
advanced usage (stopword filtering, n-gram extraction, etc.), combine
|
382 |
+
fetch_20newsgroups with a custom
|
383 |
+
:class:`~sklearn.feature_extraction.text.CountVectorizer`,
|
384 |
+
:class:`~sklearn.feature_extraction.text.HashingVectorizer`,
|
385 |
+
:class:`~sklearn.feature_extraction.text.TfidfTransformer` or
|
386 |
+
:class:`~sklearn.feature_extraction.text.TfidfVectorizer`.
|
387 |
+
|
388 |
+
The resulting counts are normalized using
|
389 |
+
:func:`sklearn.preprocessing.normalize` unless normalize is set to False.
|
390 |
+
|
391 |
+
================= ==========
|
392 |
+
Classes 20
|
393 |
+
Samples total 18846
|
394 |
+
Dimensionality 130107
|
395 |
+
Features real
|
396 |
+
================= ==========
|
397 |
+
|
398 |
+
Read more in the :ref:`User Guide <20newsgroups_dataset>`.
|
399 |
+
|
400 |
+
Parameters
|
401 |
+
----------
|
402 |
+
subset : {'train', 'test', 'all'}, default='train'
|
403 |
+
Select the dataset to load: 'train' for the training set, 'test'
|
404 |
+
for the test set, 'all' for both, with shuffled ordering.
|
405 |
+
|
406 |
+
remove : tuple, default=()
|
407 |
+
May contain any subset of ('headers', 'footers', 'quotes'). Each of
|
408 |
+
these are kinds of text that will be detected and removed from the
|
409 |
+
newsgroup posts, preventing classifiers from overfitting on
|
410 |
+
metadata.
|
411 |
+
|
412 |
+
'headers' removes newsgroup headers, 'footers' removes blocks at the
|
413 |
+
ends of posts that look like signatures, and 'quotes' removes lines
|
414 |
+
that appear to be quoting another post.
|
415 |
+
|
416 |
+
data_home : str or path-like, default=None
|
417 |
+
Specify an download and cache folder for the datasets. If None,
|
418 |
+
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
|
419 |
+
|
420 |
+
download_if_missing : bool, default=True
|
421 |
+
If False, raise an OSError if the data is not locally available
|
422 |
+
instead of trying to download the data from the source site.
|
423 |
+
|
424 |
+
return_X_y : bool, default=False
|
425 |
+
If True, returns ``(data.data, data.target)`` instead of a Bunch
|
426 |
+
object.
|
427 |
+
|
428 |
+
.. versionadded:: 0.20
|
429 |
+
|
430 |
+
normalize : bool, default=True
|
431 |
+
If True, normalizes each document's feature vector to unit norm using
|
432 |
+
:func:`sklearn.preprocessing.normalize`.
|
433 |
+
|
434 |
+
.. versionadded:: 0.22
|
435 |
+
|
436 |
+
as_frame : bool, default=False
|
437 |
+
If True, the data is a pandas DataFrame including columns with
|
438 |
+
appropriate dtypes (numeric, string, or categorical). The target is
|
439 |
+
a pandas DataFrame or Series depending on the number of
|
440 |
+
`target_columns`.
|
441 |
+
|
442 |
+
.. versionadded:: 0.24
|
443 |
+
|
444 |
+
Returns
|
445 |
+
-------
|
446 |
+
bunch : :class:`~sklearn.utils.Bunch`
|
447 |
+
Dictionary-like object, with the following attributes.
|
448 |
+
|
449 |
+
data: {sparse matrix, dataframe} of shape (n_samples, n_features)
|
450 |
+
The input data matrix. If ``as_frame`` is `True`, ``data`` is
|
451 |
+
a pandas DataFrame with sparse columns.
|
452 |
+
target: {ndarray, series} of shape (n_samples,)
|
453 |
+
The target labels. If ``as_frame`` is `True`, ``target`` is a
|
454 |
+
pandas Series.
|
455 |
+
target_names: list of shape (n_classes,)
|
456 |
+
The names of target classes.
|
457 |
+
DESCR: str
|
458 |
+
The full description of the dataset.
|
459 |
+
frame: dataframe of shape (n_samples, n_features + 1)
|
460 |
+
Only present when `as_frame=True`. Pandas DataFrame with ``data``
|
461 |
+
and ``target``.
|
462 |
+
|
463 |
+
.. versionadded:: 0.24
|
464 |
+
|
465 |
+
(data, target) : tuple if ``return_X_y`` is True
|
466 |
+
`data` and `target` would be of the format defined in the `Bunch`
|
467 |
+
description above.
|
468 |
+
|
469 |
+
.. versionadded:: 0.20
|
470 |
+
"""
|
471 |
+
data_home = get_data_home(data_home=data_home)
|
472 |
+
filebase = "20newsgroup_vectorized"
|
473 |
+
if remove:
|
474 |
+
filebase += "remove-" + "-".join(remove)
|
475 |
+
target_file = _pkl_filepath(data_home, filebase + ".pkl")
|
476 |
+
|
477 |
+
# we shuffle but use a fixed seed for the memoization
|
478 |
+
data_train = fetch_20newsgroups(
|
479 |
+
data_home=data_home,
|
480 |
+
subset="train",
|
481 |
+
categories=None,
|
482 |
+
shuffle=True,
|
483 |
+
random_state=12,
|
484 |
+
remove=remove,
|
485 |
+
download_if_missing=download_if_missing,
|
486 |
+
)
|
487 |
+
|
488 |
+
data_test = fetch_20newsgroups(
|
489 |
+
data_home=data_home,
|
490 |
+
subset="test",
|
491 |
+
categories=None,
|
492 |
+
shuffle=True,
|
493 |
+
random_state=12,
|
494 |
+
remove=remove,
|
495 |
+
download_if_missing=download_if_missing,
|
496 |
+
)
|
497 |
+
|
498 |
+
if os.path.exists(target_file):
|
499 |
+
try:
|
500 |
+
X_train, X_test, feature_names = joblib.load(target_file)
|
501 |
+
except ValueError as e:
|
502 |
+
raise ValueError(
|
503 |
+
f"The cached dataset located in {target_file} was fetched "
|
504 |
+
"with an older scikit-learn version and it is not compatible "
|
505 |
+
"with the scikit-learn version imported. You need to "
|
506 |
+
f"manually delete the file: {target_file}."
|
507 |
+
) from e
|
508 |
+
else:
|
509 |
+
vectorizer = CountVectorizer(dtype=np.int16)
|
510 |
+
X_train = vectorizer.fit_transform(data_train.data).tocsr()
|
511 |
+
X_test = vectorizer.transform(data_test.data).tocsr()
|
512 |
+
feature_names = vectorizer.get_feature_names_out()
|
513 |
+
|
514 |
+
joblib.dump((X_train, X_test, feature_names), target_file, compress=9)
|
515 |
+
|
516 |
+
# the data is stored as int16 for compactness
|
517 |
+
# but normalize needs floats
|
518 |
+
if normalize:
|
519 |
+
X_train = X_train.astype(np.float64)
|
520 |
+
X_test = X_test.astype(np.float64)
|
521 |
+
preprocessing.normalize(X_train, copy=False)
|
522 |
+
preprocessing.normalize(X_test, copy=False)
|
523 |
+
|
524 |
+
target_names = data_train.target_names
|
525 |
+
|
526 |
+
if subset == "train":
|
527 |
+
data = X_train
|
528 |
+
target = data_train.target
|
529 |
+
elif subset == "test":
|
530 |
+
data = X_test
|
531 |
+
target = data_test.target
|
532 |
+
elif subset == "all":
|
533 |
+
data = sp.vstack((X_train, X_test)).tocsr()
|
534 |
+
target = np.concatenate((data_train.target, data_test.target))
|
535 |
+
|
536 |
+
fdescr = load_descr("twenty_newsgroups.rst")
|
537 |
+
|
538 |
+
frame = None
|
539 |
+
target_name = ["category_class"]
|
540 |
+
|
541 |
+
if as_frame:
|
542 |
+
frame, data, target = _convert_data_dataframe(
|
543 |
+
"fetch_20newsgroups_vectorized",
|
544 |
+
data,
|
545 |
+
target,
|
546 |
+
feature_names,
|
547 |
+
target_names=target_name,
|
548 |
+
sparse_data=True,
|
549 |
+
)
|
550 |
+
|
551 |
+
if return_X_y:
|
552 |
+
return data, target
|
553 |
+
|
554 |
+
return Bunch(
|
555 |
+
data=data,
|
556 |
+
target=target,
|
557 |
+
frame=frame,
|
558 |
+
target_names=target_names,
|
559 |
+
feature_names=feature_names,
|
560 |
+
DESCR=fdescr,
|
561 |
+
)
|
env-llmeval/lib/python3.10/site-packages/sklearn/datasets/descr/breast_cancer.rst
ADDED
@@ -0,0 +1,122 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.. _breast_cancer_dataset:
|
2 |
+
|
3 |
+
Breast cancer wisconsin (diagnostic) dataset
|
4 |
+
--------------------------------------------
|
5 |
+
|
6 |
+
**Data Set Characteristics:**
|
7 |
+
|
8 |
+
:Number of Instances: 569
|
9 |
+
|
10 |
+
:Number of Attributes: 30 numeric, predictive attributes and the class
|
11 |
+
|
12 |
+
:Attribute Information:
|
13 |
+
- radius (mean of distances from center to points on the perimeter)
|
14 |
+
- texture (standard deviation of gray-scale values)
|
15 |
+
- perimeter
|
16 |
+
- area
|
17 |
+
- smoothness (local variation in radius lengths)
|
18 |
+
- compactness (perimeter^2 / area - 1.0)
|
19 |
+
- concavity (severity of concave portions of the contour)
|
20 |
+
- concave points (number of concave portions of the contour)
|
21 |
+
- symmetry
|
22 |
+
- fractal dimension ("coastline approximation" - 1)
|
23 |
+
|
24 |
+
The mean, standard error, and "worst" or largest (mean of the three
|
25 |
+
worst/largest values) of these features were computed for each image,
|
26 |
+
resulting in 30 features. For instance, field 0 is Mean Radius, field
|
27 |
+
10 is Radius SE, field 20 is Worst Radius.
|
28 |
+
|
29 |
+
- class:
|
30 |
+
- WDBC-Malignant
|
31 |
+
- WDBC-Benign
|
32 |
+
|
33 |
+
:Summary Statistics:
|
34 |
+
|
35 |
+
===================================== ====== ======
|
36 |
+
Min Max
|
37 |
+
===================================== ====== ======
|
38 |
+
radius (mean): 6.981 28.11
|
39 |
+
texture (mean): 9.71 39.28
|
40 |
+
perimeter (mean): 43.79 188.5
|
41 |
+
area (mean): 143.5 2501.0
|
42 |
+
smoothness (mean): 0.053 0.163
|
43 |
+
compactness (mean): 0.019 0.345
|
44 |
+
concavity (mean): 0.0 0.427
|
45 |
+
concave points (mean): 0.0 0.201
|
46 |
+
symmetry (mean): 0.106 0.304
|
47 |
+
fractal dimension (mean): 0.05 0.097
|
48 |
+
radius (standard error): 0.112 2.873
|
49 |
+
texture (standard error): 0.36 4.885
|
50 |
+
perimeter (standard error): 0.757 21.98
|
51 |
+
area (standard error): 6.802 542.2
|
52 |
+
smoothness (standard error): 0.002 0.031
|
53 |
+
compactness (standard error): 0.002 0.135
|
54 |
+
concavity (standard error): 0.0 0.396
|
55 |
+
concave points (standard error): 0.0 0.053
|
56 |
+
symmetry (standard error): 0.008 0.079
|
57 |
+
fractal dimension (standard error): 0.001 0.03
|
58 |
+
radius (worst): 7.93 36.04
|
59 |
+
texture (worst): 12.02 49.54
|
60 |
+
perimeter (worst): 50.41 251.2
|
61 |
+
area (worst): 185.2 4254.0
|
62 |
+
smoothness (worst): 0.071 0.223
|
63 |
+
compactness (worst): 0.027 1.058
|
64 |
+
concavity (worst): 0.0 1.252
|
65 |
+
concave points (worst): 0.0 0.291
|
66 |
+
symmetry (worst): 0.156 0.664
|
67 |
+
fractal dimension (worst): 0.055 0.208
|
68 |
+
===================================== ====== ======
|
69 |
+
|
70 |
+
:Missing Attribute Values: None
|
71 |
+
|
72 |
+
:Class Distribution: 212 - Malignant, 357 - Benign
|
73 |
+
|
74 |
+
:Creator: Dr. William H. Wolberg, W. Nick Street, Olvi L. Mangasarian
|
75 |
+
|
76 |
+
:Donor: Nick Street
|
77 |
+
|
78 |
+
:Date: November, 1995
|
79 |
+
|
80 |
+
This is a copy of UCI ML Breast Cancer Wisconsin (Diagnostic) datasets.
|
81 |
+
https://goo.gl/U2Uwz2
|
82 |
+
|
83 |
+
Features are computed from a digitized image of a fine needle
|
84 |
+
aspirate (FNA) of a breast mass. They describe
|
85 |
+
characteristics of the cell nuclei present in the image.
|
86 |
+
|
87 |
+
Separating plane described above was obtained using
|
88 |
+
Multisurface Method-Tree (MSM-T) [K. P. Bennett, "Decision Tree
|
89 |
+
Construction Via Linear Programming." Proceedings of the 4th
|
90 |
+
Midwest Artificial Intelligence and Cognitive Science Society,
|
91 |
+
pp. 97-101, 1992], a classification method which uses linear
|
92 |
+
programming to construct a decision tree. Relevant features
|
93 |
+
were selected using an exhaustive search in the space of 1-4
|
94 |
+
features and 1-3 separating planes.
|
95 |
+
|
96 |
+
The actual linear program used to obtain the separating plane
|
97 |
+
in the 3-dimensional space is that described in:
|
98 |
+
[K. P. Bennett and O. L. Mangasarian: "Robust Linear
|
99 |
+
Programming Discrimination of Two Linearly Inseparable Sets",
|
100 |
+
Optimization Methods and Software 1, 1992, 23-34].
|
101 |
+
|
102 |
+
This database is also available through the UW CS ftp server:
|
103 |
+
|
104 |
+
ftp ftp.cs.wisc.edu
|
105 |
+
cd math-prog/cpo-dataset/machine-learn/WDBC/
|
106 |
+
|
107 |
+
|details-start|
|
108 |
+
**References**
|
109 |
+
|details-split|
|
110 |
+
|
111 |
+
- W.N. Street, W.H. Wolberg and O.L. Mangasarian. Nuclear feature extraction
|
112 |
+
for breast tumor diagnosis. IS&T/SPIE 1993 International Symposium on
|
113 |
+
Electronic Imaging: Science and Technology, volume 1905, pages 861-870,
|
114 |
+
San Jose, CA, 1993.
|
115 |
+
- O.L. Mangasarian, W.N. Street and W.H. Wolberg. Breast cancer diagnosis and
|
116 |
+
prognosis via linear programming. Operations Research, 43(4), pages 570-577,
|
117 |
+
July-August 1995.
|
118 |
+
- W.H. Wolberg, W.N. Street, and O.L. Mangasarian. Machine learning techniques
|
119 |
+
to diagnose breast cancer from fine-needle aspirates. Cancer Letters 77 (1994)
|
120 |
+
163-171.
|
121 |
+
|
122 |
+
|details-end|
|
env-llmeval/lib/python3.10/site-packages/sklearn/datasets/descr/california_housing.rst
ADDED
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.. _california_housing_dataset:
|
2 |
+
|
3 |
+
California Housing dataset
|
4 |
+
--------------------------
|
5 |
+
|
6 |
+
**Data Set Characteristics:**
|
7 |
+
|
8 |
+
:Number of Instances: 20640
|
9 |
+
|
10 |
+
:Number of Attributes: 8 numeric, predictive attributes and the target
|
11 |
+
|
12 |
+
:Attribute Information:
|
13 |
+
- MedInc median income in block group
|
14 |
+
- HouseAge median house age in block group
|
15 |
+
- AveRooms average number of rooms per household
|
16 |
+
- AveBedrms average number of bedrooms per household
|
17 |
+
- Population block group population
|
18 |
+
- AveOccup average number of household members
|
19 |
+
- Latitude block group latitude
|
20 |
+
- Longitude block group longitude
|
21 |
+
|
22 |
+
:Missing Attribute Values: None
|
23 |
+
|
24 |
+
This dataset was obtained from the StatLib repository.
|
25 |
+
https://www.dcc.fc.up.pt/~ltorgo/Regression/cal_housing.html
|
26 |
+
|
27 |
+
The target variable is the median house value for California districts,
|
28 |
+
expressed in hundreds of thousands of dollars ($100,000).
|
29 |
+
|
30 |
+
This dataset was derived from the 1990 U.S. census, using one row per census
|
31 |
+
block group. A block group is the smallest geographical unit for which the U.S.
|
32 |
+
Census Bureau publishes sample data (a block group typically has a population
|
33 |
+
of 600 to 3,000 people).
|
34 |
+
|
35 |
+
A household is a group of people residing within a home. Since the average
|
36 |
+
number of rooms and bedrooms in this dataset are provided per household, these
|
37 |
+
columns may take surprisingly large values for block groups with few households
|
38 |
+
and many empty houses, such as vacation resorts.
|
39 |
+
|
40 |
+
It can be downloaded/loaded using the
|
41 |
+
:func:`sklearn.datasets.fetch_california_housing` function.
|
42 |
+
|
43 |
+
.. topic:: References
|
44 |
+
|
45 |
+
- Pace, R. Kelley and Ronald Barry, Sparse Spatial Autoregressions,
|
46 |
+
Statistics and Probability Letters, 33 (1997) 291-297
|
env-llmeval/lib/python3.10/site-packages/sklearn/datasets/descr/covtype.rst
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.. _covtype_dataset:
|
2 |
+
|
3 |
+
Forest covertypes
|
4 |
+
-----------------
|
5 |
+
|
6 |
+
The samples in this dataset correspond to 30×30m patches of forest in the US,
|
7 |
+
collected for the task of predicting each patch's cover type,
|
8 |
+
i.e. the dominant species of tree.
|
9 |
+
There are seven covertypes, making this a multiclass classification problem.
|
10 |
+
Each sample has 54 features, described on the
|
11 |
+
`dataset's homepage <https://archive.ics.uci.edu/ml/datasets/Covertype>`__.
|
12 |
+
Some of the features are boolean indicators,
|
13 |
+
while others are discrete or continuous measurements.
|
14 |
+
|
15 |
+
**Data Set Characteristics:**
|
16 |
+
|
17 |
+
================= ============
|
18 |
+
Classes 7
|
19 |
+
Samples total 581012
|
20 |
+
Dimensionality 54
|
21 |
+
Features int
|
22 |
+
================= ============
|
23 |
+
|
24 |
+
:func:`sklearn.datasets.fetch_covtype` will load the covertype dataset;
|
25 |
+
it returns a dictionary-like 'Bunch' object
|
26 |
+
with the feature matrix in the ``data`` member
|
27 |
+
and the target values in ``target``. If optional argument 'as_frame' is
|
28 |
+
set to 'True', it will return ``data`` and ``target`` as pandas
|
29 |
+
data frame, and there will be an additional member ``frame`` as well.
|
30 |
+
The dataset will be downloaded from the web if necessary.
|
env-llmeval/lib/python3.10/site-packages/sklearn/datasets/descr/iris.rst
ADDED
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.. _iris_dataset:
|
2 |
+
|
3 |
+
Iris plants dataset
|
4 |
+
--------------------
|
5 |
+
|
6 |
+
**Data Set Characteristics:**
|
7 |
+
|
8 |
+
:Number of Instances: 150 (50 in each of three classes)
|
9 |
+
:Number of Attributes: 4 numeric, predictive attributes and the class
|
10 |
+
:Attribute Information:
|
11 |
+
- sepal length in cm
|
12 |
+
- sepal width in cm
|
13 |
+
- petal length in cm
|
14 |
+
- petal width in cm
|
15 |
+
- class:
|
16 |
+
- Iris-Setosa
|
17 |
+
- Iris-Versicolour
|
18 |
+
- Iris-Virginica
|
19 |
+
|
20 |
+
:Summary Statistics:
|
21 |
+
|
22 |
+
============== ==== ==== ======= ===== ====================
|
23 |
+
Min Max Mean SD Class Correlation
|
24 |
+
============== ==== ==== ======= ===== ====================
|
25 |
+
sepal length: 4.3 7.9 5.84 0.83 0.7826
|
26 |
+
sepal width: 2.0 4.4 3.05 0.43 -0.4194
|
27 |
+
petal length: 1.0 6.9 3.76 1.76 0.9490 (high!)
|
28 |
+
petal width: 0.1 2.5 1.20 0.76 0.9565 (high!)
|
29 |
+
============== ==== ==== ======= ===== ====================
|
30 |
+
|
31 |
+
:Missing Attribute Values: None
|
32 |
+
:Class Distribution: 33.3% for each of 3 classes.
|
33 |
+
:Creator: R.A. Fisher
|
34 |
+
:Donor: Michael Marshall (MARSHALL%[email protected])
|
35 |
+
:Date: July, 1988
|
36 |
+
|
37 |
+
The famous Iris database, first used by Sir R.A. Fisher. The dataset is taken
|
38 |
+
from Fisher's paper. Note that it's the same as in R, but not as in the UCI
|
39 |
+
Machine Learning Repository, which has two wrong data points.
|
40 |
+
|
41 |
+
This is perhaps the best known database to be found in the
|
42 |
+
pattern recognition literature. Fisher's paper is a classic in the field and
|
43 |
+
is referenced frequently to this day. (See Duda & Hart, for example.) The
|
44 |
+
data set contains 3 classes of 50 instances each, where each class refers to a
|
45 |
+
type of iris plant. One class is linearly separable from the other 2; the
|
46 |
+
latter are NOT linearly separable from each other.
|
47 |
+
|
48 |
+
|details-start|
|
49 |
+
**References**
|
50 |
+
|details-split|
|
51 |
+
|
52 |
+
- Fisher, R.A. "The use of multiple measurements in taxonomic problems"
|
53 |
+
Annual Eugenics, 7, Part II, 179-188 (1936); also in "Contributions to
|
54 |
+
Mathematical Statistics" (John Wiley, NY, 1950).
|
55 |
+
- Duda, R.O., & Hart, P.E. (1973) Pattern Classification and Scene Analysis.
|
56 |
+
(Q327.D83) John Wiley & Sons. ISBN 0-471-22361-1. See page 218.
|
57 |
+
- Dasarathy, B.V. (1980) "Nosing Around the Neighborhood: A New System
|
58 |
+
Structure and Classification Rule for Recognition in Partially Exposed
|
59 |
+
Environments". IEEE Transactions on Pattern Analysis and Machine
|
60 |
+
Intelligence, Vol. PAMI-2, No. 1, 67-71.
|
61 |
+
- Gates, G.W. (1972) "The Reduced Nearest Neighbor Rule". IEEE Transactions
|
62 |
+
on Information Theory, May 1972, 431-433.
|
63 |
+
- See also: 1988 MLC Proceedings, 54-64. Cheeseman et al"s AUTOCLASS II
|
64 |
+
conceptual clustering system finds 3 classes in the data.
|
65 |
+
- Many, many more ...
|
66 |
+
|
67 |
+
|details-end|
|
env-llmeval/lib/python3.10/site-packages/sklearn/datasets/descr/kddcup99.rst
ADDED
@@ -0,0 +1,94 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.. _kddcup99_dataset:
|
2 |
+
|
3 |
+
Kddcup 99 dataset
|
4 |
+
-----------------
|
5 |
+
|
6 |
+
The KDD Cup '99 dataset was created by processing the tcpdump portions
|
7 |
+
of the 1998 DARPA Intrusion Detection System (IDS) Evaluation dataset,
|
8 |
+
created by MIT Lincoln Lab [2]_. The artificial data (described on the `dataset's
|
9 |
+
homepage <https://kdd.ics.uci.edu/databases/kddcup99/kddcup99.html>`_) was
|
10 |
+
generated using a closed network and hand-injected attacks to produce a
|
11 |
+
large number of different types of attack with normal activity in the
|
12 |
+
background. As the initial goal was to produce a large training set for
|
13 |
+
supervised learning algorithms, there is a large proportion (80.1%) of
|
14 |
+
abnormal data which is unrealistic in real world, and inappropriate for
|
15 |
+
unsupervised anomaly detection which aims at detecting 'abnormal' data, i.e.:
|
16 |
+
|
17 |
+
* qualitatively different from normal data
|
18 |
+
* in large minority among the observations.
|
19 |
+
|
20 |
+
We thus transform the KDD Data set into two different data sets: SA and SF.
|
21 |
+
|
22 |
+
* SA is obtained by simply selecting all the normal data, and a small
|
23 |
+
proportion of abnormal data to gives an anomaly proportion of 1%.
|
24 |
+
|
25 |
+
* SF is obtained as in [3]_
|
26 |
+
by simply picking up the data whose attribute logged_in is positive, thus
|
27 |
+
focusing on the intrusion attack, which gives a proportion of 0.3% of
|
28 |
+
attack.
|
29 |
+
|
30 |
+
* http and smtp are two subsets of SF corresponding with third feature
|
31 |
+
equal to 'http' (resp. to 'smtp').
|
32 |
+
|
33 |
+
General KDD structure:
|
34 |
+
|
35 |
+
================ ==========================================
|
36 |
+
Samples total 4898431
|
37 |
+
Dimensionality 41
|
38 |
+
Features discrete (int) or continuous (float)
|
39 |
+
Targets str, 'normal.' or name of the anomaly type
|
40 |
+
================ ==========================================
|
41 |
+
|
42 |
+
SA structure:
|
43 |
+
|
44 |
+
================ ==========================================
|
45 |
+
Samples total 976158
|
46 |
+
Dimensionality 41
|
47 |
+
Features discrete (int) or continuous (float)
|
48 |
+
Targets str, 'normal.' or name of the anomaly type
|
49 |
+
================ ==========================================
|
50 |
+
|
51 |
+
SF structure:
|
52 |
+
|
53 |
+
================ ==========================================
|
54 |
+
Samples total 699691
|
55 |
+
Dimensionality 4
|
56 |
+
Features discrete (int) or continuous (float)
|
57 |
+
Targets str, 'normal.' or name of the anomaly type
|
58 |
+
================ ==========================================
|
59 |
+
|
60 |
+
http structure:
|
61 |
+
|
62 |
+
================ ==========================================
|
63 |
+
Samples total 619052
|
64 |
+
Dimensionality 3
|
65 |
+
Features discrete (int) or continuous (float)
|
66 |
+
Targets str, 'normal.' or name of the anomaly type
|
67 |
+
================ ==========================================
|
68 |
+
|
69 |
+
smtp structure:
|
70 |
+
|
71 |
+
================ ==========================================
|
72 |
+
Samples total 95373
|
73 |
+
Dimensionality 3
|
74 |
+
Features discrete (int) or continuous (float)
|
75 |
+
Targets str, 'normal.' or name of the anomaly type
|
76 |
+
================ ==========================================
|
77 |
+
|
78 |
+
:func:`sklearn.datasets.fetch_kddcup99` will load the kddcup99 dataset; it
|
79 |
+
returns a dictionary-like object with the feature matrix in the ``data`` member
|
80 |
+
and the target values in ``target``. The "as_frame" optional argument converts
|
81 |
+
``data`` into a pandas DataFrame and ``target`` into a pandas Series. The
|
82 |
+
dataset will be downloaded from the web if necessary.
|
83 |
+
|
84 |
+
.. topic:: References
|
85 |
+
|
86 |
+
.. [2] Analysis and Results of the 1999 DARPA Off-Line Intrusion
|
87 |
+
Detection Evaluation, Richard Lippmann, Joshua W. Haines,
|
88 |
+
David J. Fried, Jonathan Korba, Kumar Das.
|
89 |
+
|
90 |
+
.. [3] K. Yamanishi, J.-I. Takeuchi, G. Williams, and P. Milne. Online
|
91 |
+
unsupervised outlier detection using finite mixtures with
|
92 |
+
discounting learning algorithms. In Proceedings of the sixth
|
93 |
+
ACM SIGKDD international conference on Knowledge discovery
|
94 |
+
and data mining, pages 320-324. ACM Press, 2000.
|
env-llmeval/lib/python3.10/site-packages/sklearn/datasets/descr/lfw.rst
ADDED
@@ -0,0 +1,128 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.. _labeled_faces_in_the_wild_dataset:
|
2 |
+
|
3 |
+
The Labeled Faces in the Wild face recognition dataset
|
4 |
+
------------------------------------------------------
|
5 |
+
|
6 |
+
This dataset is a collection of JPEG pictures of famous people collected
|
7 |
+
over the internet, all details are available on the official website:
|
8 |
+
|
9 |
+
http://vis-www.cs.umass.edu/lfw/
|
10 |
+
|
11 |
+
Each picture is centered on a single face. The typical task is called
|
12 |
+
Face Verification: given a pair of two pictures, a binary classifier
|
13 |
+
must predict whether the two images are from the same person.
|
14 |
+
|
15 |
+
An alternative task, Face Recognition or Face Identification is:
|
16 |
+
given the picture of the face of an unknown person, identify the name
|
17 |
+
of the person by referring to a gallery of previously seen pictures of
|
18 |
+
identified persons.
|
19 |
+
|
20 |
+
Both Face Verification and Face Recognition are tasks that are typically
|
21 |
+
performed on the output of a model trained to perform Face Detection. The
|
22 |
+
most popular model for Face Detection is called Viola-Jones and is
|
23 |
+
implemented in the OpenCV library. The LFW faces were extracted by this
|
24 |
+
face detector from various online websites.
|
25 |
+
|
26 |
+
**Data Set Characteristics:**
|
27 |
+
|
28 |
+
================= =======================
|
29 |
+
Classes 5749
|
30 |
+
Samples total 13233
|
31 |
+
Dimensionality 5828
|
32 |
+
Features real, between 0 and 255
|
33 |
+
================= =======================
|
34 |
+
|
35 |
+
|details-start|
|
36 |
+
**Usage**
|
37 |
+
|details-split|
|
38 |
+
|
39 |
+
``scikit-learn`` provides two loaders that will automatically download,
|
40 |
+
cache, parse the metadata files, decode the jpeg and convert the
|
41 |
+
interesting slices into memmapped numpy arrays. This dataset size is more
|
42 |
+
than 200 MB. The first load typically takes more than a couple of minutes
|
43 |
+
to fully decode the relevant part of the JPEG files into numpy arrays. If
|
44 |
+
the dataset has been loaded once, the following times the loading times
|
45 |
+
less than 200ms by using a memmapped version memoized on the disk in the
|
46 |
+
``~/scikit_learn_data/lfw_home/`` folder using ``joblib``.
|
47 |
+
|
48 |
+
The first loader is used for the Face Identification task: a multi-class
|
49 |
+
classification task (hence supervised learning)::
|
50 |
+
|
51 |
+
>>> from sklearn.datasets import fetch_lfw_people
|
52 |
+
>>> lfw_people = fetch_lfw_people(min_faces_per_person=70, resize=0.4)
|
53 |
+
|
54 |
+
>>> for name in lfw_people.target_names:
|
55 |
+
... print(name)
|
56 |
+
...
|
57 |
+
Ariel Sharon
|
58 |
+
Colin Powell
|
59 |
+
Donald Rumsfeld
|
60 |
+
George W Bush
|
61 |
+
Gerhard Schroeder
|
62 |
+
Hugo Chavez
|
63 |
+
Tony Blair
|
64 |
+
|
65 |
+
The default slice is a rectangular shape around the face, removing
|
66 |
+
most of the background::
|
67 |
+
|
68 |
+
>>> lfw_people.data.dtype
|
69 |
+
dtype('float32')
|
70 |
+
|
71 |
+
>>> lfw_people.data.shape
|
72 |
+
(1288, 1850)
|
73 |
+
|
74 |
+
>>> lfw_people.images.shape
|
75 |
+
(1288, 50, 37)
|
76 |
+
|
77 |
+
Each of the ``1140`` faces is assigned to a single person id in the ``target``
|
78 |
+
array::
|
79 |
+
|
80 |
+
>>> lfw_people.target.shape
|
81 |
+
(1288,)
|
82 |
+
|
83 |
+
>>> list(lfw_people.target[:10])
|
84 |
+
[5, 6, 3, 1, 0, 1, 3, 4, 3, 0]
|
85 |
+
|
86 |
+
The second loader is typically used for the face verification task: each sample
|
87 |
+
is a pair of two picture belonging or not to the same person::
|
88 |
+
|
89 |
+
>>> from sklearn.datasets import fetch_lfw_pairs
|
90 |
+
>>> lfw_pairs_train = fetch_lfw_pairs(subset='train')
|
91 |
+
|
92 |
+
>>> list(lfw_pairs_train.target_names)
|
93 |
+
['Different persons', 'Same person']
|
94 |
+
|
95 |
+
>>> lfw_pairs_train.pairs.shape
|
96 |
+
(2200, 2, 62, 47)
|
97 |
+
|
98 |
+
>>> lfw_pairs_train.data.shape
|
99 |
+
(2200, 5828)
|
100 |
+
|
101 |
+
>>> lfw_pairs_train.target.shape
|
102 |
+
(2200,)
|
103 |
+
|
104 |
+
Both for the :func:`sklearn.datasets.fetch_lfw_people` and
|
105 |
+
:func:`sklearn.datasets.fetch_lfw_pairs` function it is
|
106 |
+
possible to get an additional dimension with the RGB color channels by
|
107 |
+
passing ``color=True``, in that case the shape will be
|
108 |
+
``(2200, 2, 62, 47, 3)``.
|
109 |
+
|
110 |
+
The :func:`sklearn.datasets.fetch_lfw_pairs` datasets is subdivided into
|
111 |
+
3 subsets: the development ``train`` set, the development ``test`` set and
|
112 |
+
an evaluation ``10_folds`` set meant to compute performance metrics using a
|
113 |
+
10-folds cross validation scheme.
|
114 |
+
|
115 |
+
|details-end|
|
116 |
+
|
117 |
+
.. topic:: References:
|
118 |
+
|
119 |
+
* `Labeled Faces in the Wild: A Database for Studying Face Recognition
|
120 |
+
in Unconstrained Environments.
|
121 |
+
<http://vis-www.cs.umass.edu/lfw/lfw.pdf>`_
|
122 |
+
Gary B. Huang, Manu Ramesh, Tamara Berg, and Erik Learned-Miller.
|
123 |
+
University of Massachusetts, Amherst, Technical Report 07-49, October, 2007.
|
124 |
+
|
125 |
+
|
126 |
+
.. topic:: Examples:
|
127 |
+
|
128 |
+
* :ref:`sphx_glr_auto_examples_applications_plot_face_recognition.py`
|
env-llmeval/lib/python3.10/site-packages/sklearn/datasets/descr/rcv1.rst
ADDED
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.. _rcv1_dataset:
|
2 |
+
|
3 |
+
RCV1 dataset
|
4 |
+
------------
|
5 |
+
|
6 |
+
Reuters Corpus Volume I (RCV1) is an archive of over 800,000 manually
|
7 |
+
categorized newswire stories made available by Reuters, Ltd. for research
|
8 |
+
purposes. The dataset is extensively described in [1]_.
|
9 |
+
|
10 |
+
**Data Set Characteristics:**
|
11 |
+
|
12 |
+
============== =====================
|
13 |
+
Classes 103
|
14 |
+
Samples total 804414
|
15 |
+
Dimensionality 47236
|
16 |
+
Features real, between 0 and 1
|
17 |
+
============== =====================
|
18 |
+
|
19 |
+
:func:`sklearn.datasets.fetch_rcv1` will load the following
|
20 |
+
version: RCV1-v2, vectors, full sets, topics multilabels::
|
21 |
+
|
22 |
+
>>> from sklearn.datasets import fetch_rcv1
|
23 |
+
>>> rcv1 = fetch_rcv1()
|
24 |
+
|
25 |
+
It returns a dictionary-like object, with the following attributes:
|
26 |
+
|
27 |
+
``data``:
|
28 |
+
The feature matrix is a scipy CSR sparse matrix, with 804414 samples and
|
29 |
+
47236 features. Non-zero values contains cosine-normalized, log TF-IDF vectors.
|
30 |
+
A nearly chronological split is proposed in [1]_: The first 23149 samples are
|
31 |
+
the training set. The last 781265 samples are the testing set. This follows
|
32 |
+
the official LYRL2004 chronological split. The array has 0.16% of non zero
|
33 |
+
values::
|
34 |
+
|
35 |
+
>>> rcv1.data.shape
|
36 |
+
(804414, 47236)
|
37 |
+
|
38 |
+
``target``:
|
39 |
+
The target values are stored in a scipy CSR sparse matrix, with 804414 samples
|
40 |
+
and 103 categories. Each sample has a value of 1 in its categories, and 0 in
|
41 |
+
others. The array has 3.15% of non zero values::
|
42 |
+
|
43 |
+
>>> rcv1.target.shape
|
44 |
+
(804414, 103)
|
45 |
+
|
46 |
+
``sample_id``:
|
47 |
+
Each sample can be identified by its ID, ranging (with gaps) from 2286
|
48 |
+
to 810596::
|
49 |
+
|
50 |
+
>>> rcv1.sample_id[:3]
|
51 |
+
array([2286, 2287, 2288], dtype=uint32)
|
52 |
+
|
53 |
+
``target_names``:
|
54 |
+
The target values are the topics of each sample. Each sample belongs to at
|
55 |
+
least one topic, and to up to 17 topics. There are 103 topics, each
|
56 |
+
represented by a string. Their corpus frequencies span five orders of
|
57 |
+
magnitude, from 5 occurrences for 'GMIL', to 381327 for 'CCAT'::
|
58 |
+
|
59 |
+
>>> rcv1.target_names[:3].tolist() # doctest: +SKIP
|
60 |
+
['E11', 'ECAT', 'M11']
|
61 |
+
|
62 |
+
The dataset will be downloaded from the `rcv1 homepage`_ if necessary.
|
63 |
+
The compressed size is about 656 MB.
|
64 |
+
|
65 |
+
.. _rcv1 homepage: http://jmlr.csail.mit.edu/papers/volume5/lewis04a/
|
66 |
+
|
67 |
+
|
68 |
+
.. topic:: References
|
69 |
+
|
70 |
+
.. [1] Lewis, D. D., Yang, Y., Rose, T. G., & Li, F. (2004).
|
71 |
+
RCV1: A new benchmark collection for text categorization research.
|
72 |
+
The Journal of Machine Learning Research, 5, 361-397.
|
env-llmeval/lib/python3.10/site-packages/sklearn/datasets/descr/twenty_newsgroups.rst
ADDED
@@ -0,0 +1,264 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.. _20newsgroups_dataset:
|
2 |
+
|
3 |
+
The 20 newsgroups text dataset
|
4 |
+
------------------------------
|
5 |
+
|
6 |
+
The 20 newsgroups dataset comprises around 18000 newsgroups posts on
|
7 |
+
20 topics split in two subsets: one for training (or development)
|
8 |
+
and the other one for testing (or for performance evaluation). The split
|
9 |
+
between the train and test set is based upon a messages posted before
|
10 |
+
and after a specific date.
|
11 |
+
|
12 |
+
This module contains two loaders. The first one,
|
13 |
+
:func:`sklearn.datasets.fetch_20newsgroups`,
|
14 |
+
returns a list of the raw texts that can be fed to text feature
|
15 |
+
extractors such as :class:`~sklearn.feature_extraction.text.CountVectorizer`
|
16 |
+
with custom parameters so as to extract feature vectors.
|
17 |
+
The second one, :func:`sklearn.datasets.fetch_20newsgroups_vectorized`,
|
18 |
+
returns ready-to-use features, i.e., it is not necessary to use a feature
|
19 |
+
extractor.
|
20 |
+
|
21 |
+
**Data Set Characteristics:**
|
22 |
+
|
23 |
+
================= ==========
|
24 |
+
Classes 20
|
25 |
+
Samples total 18846
|
26 |
+
Dimensionality 1
|
27 |
+
Features text
|
28 |
+
================= ==========
|
29 |
+
|
30 |
+
|details-start|
|
31 |
+
**Usage**
|
32 |
+
|details-split|
|
33 |
+
|
34 |
+
The :func:`sklearn.datasets.fetch_20newsgroups` function is a data
|
35 |
+
fetching / caching functions that downloads the data archive from
|
36 |
+
the original `20 newsgroups website`_, extracts the archive contents
|
37 |
+
in the ``~/scikit_learn_data/20news_home`` folder and calls the
|
38 |
+
:func:`sklearn.datasets.load_files` on either the training or
|
39 |
+
testing set folder, or both of them::
|
40 |
+
|
41 |
+
>>> from sklearn.datasets import fetch_20newsgroups
|
42 |
+
>>> newsgroups_train = fetch_20newsgroups(subset='train')
|
43 |
+
|
44 |
+
>>> from pprint import pprint
|
45 |
+
>>> pprint(list(newsgroups_train.target_names))
|
46 |
+
['alt.atheism',
|
47 |
+
'comp.graphics',
|
48 |
+
'comp.os.ms-windows.misc',
|
49 |
+
'comp.sys.ibm.pc.hardware',
|
50 |
+
'comp.sys.mac.hardware',
|
51 |
+
'comp.windows.x',
|
52 |
+
'misc.forsale',
|
53 |
+
'rec.autos',
|
54 |
+
'rec.motorcycles',
|
55 |
+
'rec.sport.baseball',
|
56 |
+
'rec.sport.hockey',
|
57 |
+
'sci.crypt',
|
58 |
+
'sci.electronics',
|
59 |
+
'sci.med',
|
60 |
+
'sci.space',
|
61 |
+
'soc.religion.christian',
|
62 |
+
'talk.politics.guns',
|
63 |
+
'talk.politics.mideast',
|
64 |
+
'talk.politics.misc',
|
65 |
+
'talk.religion.misc']
|
66 |
+
|
67 |
+
The real data lies in the ``filenames`` and ``target`` attributes. The target
|
68 |
+
attribute is the integer index of the category::
|
69 |
+
|
70 |
+
>>> newsgroups_train.filenames.shape
|
71 |
+
(11314,)
|
72 |
+
>>> newsgroups_train.target.shape
|
73 |
+
(11314,)
|
74 |
+
>>> newsgroups_train.target[:10]
|
75 |
+
array([ 7, 4, 4, 1, 14, 16, 13, 3, 2, 4])
|
76 |
+
|
77 |
+
It is possible to load only a sub-selection of the categories by passing the
|
78 |
+
list of the categories to load to the
|
79 |
+
:func:`sklearn.datasets.fetch_20newsgroups` function::
|
80 |
+
|
81 |
+
>>> cats = ['alt.atheism', 'sci.space']
|
82 |
+
>>> newsgroups_train = fetch_20newsgroups(subset='train', categories=cats)
|
83 |
+
|
84 |
+
>>> list(newsgroups_train.target_names)
|
85 |
+
['alt.atheism', 'sci.space']
|
86 |
+
>>> newsgroups_train.filenames.shape
|
87 |
+
(1073,)
|
88 |
+
>>> newsgroups_train.target.shape
|
89 |
+
(1073,)
|
90 |
+
>>> newsgroups_train.target[:10]
|
91 |
+
array([0, 1, 1, 1, 0, 1, 1, 0, 0, 0])
|
92 |
+
|
93 |
+
|details-end|
|
94 |
+
|
95 |
+
|details-start|
|
96 |
+
**Converting text to vectors**
|
97 |
+
|details-split|
|
98 |
+
|
99 |
+
In order to feed predictive or clustering models with the text data,
|
100 |
+
one first need to turn the text into vectors of numerical values suitable
|
101 |
+
for statistical analysis. This can be achieved with the utilities of the
|
102 |
+
``sklearn.feature_extraction.text`` as demonstrated in the following
|
103 |
+
example that extract `TF-IDF`_ vectors of unigram tokens
|
104 |
+
from a subset of 20news::
|
105 |
+
|
106 |
+
>>> from sklearn.feature_extraction.text import TfidfVectorizer
|
107 |
+
>>> categories = ['alt.atheism', 'talk.religion.misc',
|
108 |
+
... 'comp.graphics', 'sci.space']
|
109 |
+
>>> newsgroups_train = fetch_20newsgroups(subset='train',
|
110 |
+
... categories=categories)
|
111 |
+
>>> vectorizer = TfidfVectorizer()
|
112 |
+
>>> vectors = vectorizer.fit_transform(newsgroups_train.data)
|
113 |
+
>>> vectors.shape
|
114 |
+
(2034, 34118)
|
115 |
+
|
116 |
+
The extracted TF-IDF vectors are very sparse, with an average of 159 non-zero
|
117 |
+
components by sample in a more than 30000-dimensional space
|
118 |
+
(less than .5% non-zero features)::
|
119 |
+
|
120 |
+
>>> vectors.nnz / float(vectors.shape[0])
|
121 |
+
159.01327...
|
122 |
+
|
123 |
+
:func:`sklearn.datasets.fetch_20newsgroups_vectorized` is a function which
|
124 |
+
returns ready-to-use token counts features instead of file names.
|
125 |
+
|
126 |
+
.. _`20 newsgroups website`: http://people.csail.mit.edu/jrennie/20Newsgroups/
|
127 |
+
.. _`TF-IDF`: https://en.wikipedia.org/wiki/Tf-idf
|
128 |
+
|
129 |
+
|details-end|
|
130 |
+
|
131 |
+
|details-start|
|
132 |
+
**Filtering text for more realistic training**
|
133 |
+
|details-split|
|
134 |
+
|
135 |
+
It is easy for a classifier to overfit on particular things that appear in the
|
136 |
+
20 Newsgroups data, such as newsgroup headers. Many classifiers achieve very
|
137 |
+
high F-scores, but their results would not generalize to other documents that
|
138 |
+
aren't from this window of time.
|
139 |
+
|
140 |
+
For example, let's look at the results of a multinomial Naive Bayes classifier,
|
141 |
+
which is fast to train and achieves a decent F-score::
|
142 |
+
|
143 |
+
>>> from sklearn.naive_bayes import MultinomialNB
|
144 |
+
>>> from sklearn import metrics
|
145 |
+
>>> newsgroups_test = fetch_20newsgroups(subset='test',
|
146 |
+
... categories=categories)
|
147 |
+
>>> vectors_test = vectorizer.transform(newsgroups_test.data)
|
148 |
+
>>> clf = MultinomialNB(alpha=.01)
|
149 |
+
>>> clf.fit(vectors, newsgroups_train.target)
|
150 |
+
MultinomialNB(alpha=0.01, class_prior=None, fit_prior=True)
|
151 |
+
|
152 |
+
>>> pred = clf.predict(vectors_test)
|
153 |
+
>>> metrics.f1_score(newsgroups_test.target, pred, average='macro')
|
154 |
+
0.88213...
|
155 |
+
|
156 |
+
(The example :ref:`sphx_glr_auto_examples_text_plot_document_classification_20newsgroups.py` shuffles
|
157 |
+
the training and test data, instead of segmenting by time, and in that case
|
158 |
+
multinomial Naive Bayes gets a much higher F-score of 0.88. Are you suspicious
|
159 |
+
yet of what's going on inside this classifier?)
|
160 |
+
|
161 |
+
Let's take a look at what the most informative features are:
|
162 |
+
|
163 |
+
>>> import numpy as np
|
164 |
+
>>> def show_top10(classifier, vectorizer, categories):
|
165 |
+
... feature_names = vectorizer.get_feature_names_out()
|
166 |
+
... for i, category in enumerate(categories):
|
167 |
+
... top10 = np.argsort(classifier.coef_[i])[-10:]
|
168 |
+
... print("%s: %s" % (category, " ".join(feature_names[top10])))
|
169 |
+
...
|
170 |
+
>>> show_top10(clf, vectorizer, newsgroups_train.target_names)
|
171 |
+
alt.atheism: edu it and in you that is of to the
|
172 |
+
comp.graphics: edu in graphics it is for and of to the
|
173 |
+
sci.space: edu it that is in and space to of the
|
174 |
+
talk.religion.misc: not it you in is that and to of the
|
175 |
+
|
176 |
+
|
177 |
+
You can now see many things that these features have overfit to:
|
178 |
+
|
179 |
+
- Almost every group is distinguished by whether headers such as
|
180 |
+
``NNTP-Posting-Host:`` and ``Distribution:`` appear more or less often.
|
181 |
+
- Another significant feature involves whether the sender is affiliated with
|
182 |
+
a university, as indicated either by their headers or their signature.
|
183 |
+
- The word "article" is a significant feature, based on how often people quote
|
184 |
+
previous posts like this: "In article [article ID], [name] <[e-mail address]>
|
185 |
+
wrote:"
|
186 |
+
- Other features match the names and e-mail addresses of particular people who
|
187 |
+
were posting at the time.
|
188 |
+
|
189 |
+
With such an abundance of clues that distinguish newsgroups, the classifiers
|
190 |
+
barely have to identify topics from text at all, and they all perform at the
|
191 |
+
same high level.
|
192 |
+
|
193 |
+
For this reason, the functions that load 20 Newsgroups data provide a
|
194 |
+
parameter called **remove**, telling it what kinds of information to strip out
|
195 |
+
of each file. **remove** should be a tuple containing any subset of
|
196 |
+
``('headers', 'footers', 'quotes')``, telling it to remove headers, signature
|
197 |
+
blocks, and quotation blocks respectively.
|
198 |
+
|
199 |
+
>>> newsgroups_test = fetch_20newsgroups(subset='test',
|
200 |
+
... remove=('headers', 'footers', 'quotes'),
|
201 |
+
... categories=categories)
|
202 |
+
>>> vectors_test = vectorizer.transform(newsgroups_test.data)
|
203 |
+
>>> pred = clf.predict(vectors_test)
|
204 |
+
>>> metrics.f1_score(pred, newsgroups_test.target, average='macro')
|
205 |
+
0.77310...
|
206 |
+
|
207 |
+
This classifier lost over a lot of its F-score, just because we removed
|
208 |
+
metadata that has little to do with topic classification.
|
209 |
+
It loses even more if we also strip this metadata from the training data:
|
210 |
+
|
211 |
+
>>> newsgroups_train = fetch_20newsgroups(subset='train',
|
212 |
+
... remove=('headers', 'footers', 'quotes'),
|
213 |
+
... categories=categories)
|
214 |
+
>>> vectors = vectorizer.fit_transform(newsgroups_train.data)
|
215 |
+
>>> clf = MultinomialNB(alpha=.01)
|
216 |
+
>>> clf.fit(vectors, newsgroups_train.target)
|
217 |
+
MultinomialNB(alpha=0.01, class_prior=None, fit_prior=True)
|
218 |
+
|
219 |
+
>>> vectors_test = vectorizer.transform(newsgroups_test.data)
|
220 |
+
>>> pred = clf.predict(vectors_test)
|
221 |
+
>>> metrics.f1_score(newsgroups_test.target, pred, average='macro')
|
222 |
+
0.76995...
|
223 |
+
|
224 |
+
Some other classifiers cope better with this harder version of the task. Try the
|
225 |
+
:ref:`sphx_glr_auto_examples_model_selection_plot_grid_search_text_feature_extraction.py`
|
226 |
+
example with and without the `remove` option to compare the results.
|
227 |
+
|details-end|
|
228 |
+
|
229 |
+
.. topic:: Data Considerations
|
230 |
+
|
231 |
+
The Cleveland Indians is a major league baseball team based in Cleveland,
|
232 |
+
Ohio, USA. In December 2020, it was reported that "After several months of
|
233 |
+
discussion sparked by the death of George Floyd and a national reckoning over
|
234 |
+
race and colonialism, the Cleveland Indians have decided to change their
|
235 |
+
name." Team owner Paul Dolan "did make it clear that the team will not make
|
236 |
+
its informal nickname -- the Tribe -- its new team name." "It's not going to
|
237 |
+
be a half-step away from the Indians," Dolan said."We will not have a Native
|
238 |
+
American-themed name."
|
239 |
+
|
240 |
+
https://www.mlb.com/news/cleveland-indians-team-name-change
|
241 |
+
|
242 |
+
.. topic:: Recommendation
|
243 |
+
|
244 |
+
- When evaluating text classifiers on the 20 Newsgroups data, you
|
245 |
+
should strip newsgroup-related metadata. In scikit-learn, you can do this
|
246 |
+
by setting ``remove=('headers', 'footers', 'quotes')``. The F-score will be
|
247 |
+
lower because it is more realistic.
|
248 |
+
- This text dataset contains data which may be inappropriate for certain NLP
|
249 |
+
applications. An example is listed in the "Data Considerations" section
|
250 |
+
above. The challenge with using current text datasets in NLP for tasks such
|
251 |
+
as sentence completion, clustering, and other applications is that text
|
252 |
+
that is culturally biased and inflammatory will propagate biases. This
|
253 |
+
should be taken into consideration when using the dataset, reviewing the
|
254 |
+
output, and the bias should be documented.
|
255 |
+
|
256 |
+
.. topic:: Examples
|
257 |
+
|
258 |
+
* :ref:`sphx_glr_auto_examples_model_selection_plot_grid_search_text_feature_extraction.py`
|
259 |
+
|
260 |
+
* :ref:`sphx_glr_auto_examples_text_plot_document_classification_20newsgroups.py`
|
261 |
+
|
262 |
+
* :ref:`sphx_glr_auto_examples_text_plot_hashing_vs_dict_vectorizer.py`
|
263 |
+
|
264 |
+
* :ref:`sphx_glr_auto_examples_text_plot_document_clustering.py`
|
env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (178 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/metadata_routing_common.cpython-310.pyc
ADDED
Binary file (15.4 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/random_seed.cpython-310.pyc
ADDED
Binary file (3.22 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_base.cpython-310.pyc
ADDED
Binary file (29.9 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_build.cpython-310.pyc
ADDED
Binary file (1.25 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_calibration.cpython-310.pyc
ADDED
Binary file (29 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_check_build.cpython-310.pyc
ADDED
Binary file (522 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_common.cpython-310.pyc
ADDED
Binary file (15.9 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_config.cpython-310.pyc
ADDED
Binary file (5.7 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_discriminant_analysis.cpython-310.pyc
ADDED
Binary file (16.5 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_docstring_parameters.cpython-310.pyc
ADDED
Binary file (8.12 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_docstrings.cpython-310.pyc
ADDED
Binary file (4.34 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_dummy.cpython-310.pyc
ADDED
Binary file (18.5 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_init.cpython-310.pyc
ADDED
Binary file (484 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_isotonic.cpython-310.pyc
ADDED
Binary file (18.6 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_kernel_approximation.cpython-310.pyc
ADDED
Binary file (13.5 kB). View file
|
|