diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/__init__.py b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..7ae7902f3365c6087c9c662f2c6d676c9a0e4254 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/__init__.py @@ -0,0 +1,162 @@ +""" +The :mod:`sklearn.datasets` module includes utilities to load datasets, +including methods to load and fetch popular reference datasets. It also +features some artificial data generators. +""" +import textwrap + +from ._base import ( + clear_data_home, + get_data_home, + load_breast_cancer, + load_diabetes, + load_digits, + load_files, + load_iris, + load_linnerud, + load_sample_image, + load_sample_images, + load_wine, +) +from ._california_housing import fetch_california_housing +from ._covtype import fetch_covtype +from ._kddcup99 import fetch_kddcup99 +from ._lfw import fetch_lfw_pairs, fetch_lfw_people +from ._olivetti_faces import fetch_olivetti_faces +from ._openml import fetch_openml +from ._rcv1 import fetch_rcv1 +from ._samples_generator import ( + make_biclusters, + make_blobs, + make_checkerboard, + make_circles, + make_classification, + make_friedman1, + make_friedman2, + make_friedman3, + make_gaussian_quantiles, + make_hastie_10_2, + make_low_rank_matrix, + make_moons, + make_multilabel_classification, + make_regression, + make_s_curve, + make_sparse_coded_signal, + make_sparse_spd_matrix, + make_sparse_uncorrelated, + make_spd_matrix, + make_swiss_roll, +) +from ._species_distributions import fetch_species_distributions +from ._svmlight_format_io import ( + dump_svmlight_file, + load_svmlight_file, + load_svmlight_files, +) +from ._twenty_newsgroups import fetch_20newsgroups, fetch_20newsgroups_vectorized + +__all__ = [ + "clear_data_home", + "dump_svmlight_file", + "fetch_20newsgroups", + "fetch_20newsgroups_vectorized", + "fetch_lfw_pairs", + "fetch_lfw_people", + "fetch_olivetti_faces", + "fetch_species_distributions", + "fetch_california_housing", + "fetch_covtype", + "fetch_rcv1", + "fetch_kddcup99", + "fetch_openml", + "get_data_home", + "load_diabetes", + "load_digits", + "load_files", + "load_iris", + "load_breast_cancer", + "load_linnerud", + "load_sample_image", + "load_sample_images", + "load_svmlight_file", + "load_svmlight_files", + "load_wine", + "make_biclusters", + "make_blobs", + "make_circles", + "make_classification", + "make_checkerboard", + "make_friedman1", + "make_friedman2", + "make_friedman3", + "make_gaussian_quantiles", + "make_hastie_10_2", + "make_low_rank_matrix", + "make_moons", + "make_multilabel_classification", + "make_regression", + "make_s_curve", + "make_sparse_coded_signal", + "make_sparse_spd_matrix", + "make_sparse_uncorrelated", + "make_spd_matrix", + "make_swiss_roll", +] + + +def __getattr__(name): + if name == "load_boston": + msg = textwrap.dedent(""" + `load_boston` has been removed from scikit-learn since version 1.2. + + The Boston housing prices dataset has an ethical problem: as + investigated in [1], the authors of this dataset engineered a + non-invertible variable "B" assuming that racial self-segregation had a + positive impact on house prices [2]. Furthermore the goal of the + research that led to the creation of this dataset was to study the + impact of air quality but it did not give adequate demonstration of the + validity of this assumption. + + The scikit-learn maintainers therefore strongly discourage the use of + this dataset unless the purpose of the code is to study and educate + about ethical issues in data science and machine learning. + + In this special case, you can fetch the dataset from the original + source:: + + import pandas as pd + import numpy as np + + data_url = "http://lib.stat.cmu.edu/datasets/boston" + raw_df = pd.read_csv(data_url, sep="\\s+", skiprows=22, header=None) + data = np.hstack([raw_df.values[::2, :], raw_df.values[1::2, :2]]) + target = raw_df.values[1::2, 2] + + Alternative datasets include the California housing dataset and the + Ames housing dataset. You can load the datasets as follows:: + + from sklearn.datasets import fetch_california_housing + housing = fetch_california_housing() + + for the California housing dataset and:: + + from sklearn.datasets import fetch_openml + housing = fetch_openml(name="house_prices", as_frame=True) + + for the Ames housing dataset. + + [1] M Carlisle. + "Racist data destruction?" + + + [2] Harrison Jr, David, and Daniel L. Rubinfeld. + "Hedonic housing prices and the demand for clean air." + Journal of environmental economics and management 5.1 (1978): 81-102. + + """) + raise ImportError(msg) + try: + return globals()[name] + except KeyError: + # This is turned into the appropriate ImportError + raise AttributeError diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9487b42ef5847352f0d93be33a8a9d36a79e750e Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/__pycache__/_arff_parser.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/__pycache__/_arff_parser.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1accac9a2f9e0ed1068d1f4036d26370abaacb1e Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/__pycache__/_arff_parser.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/__pycache__/_base.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/__pycache__/_base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..784effc6632409d4a647a9628629e24c2e5cdb66 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/__pycache__/_base.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/__pycache__/_california_housing.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/__pycache__/_california_housing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..419c1da1b292cc09db4681232ea6c42729f4bf39 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/__pycache__/_california_housing.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/__pycache__/_covtype.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/__pycache__/_covtype.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dbb1a72abdd0d6fbc12514a0aedbc51d5502769a Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/__pycache__/_covtype.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/__pycache__/_kddcup99.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/__pycache__/_kddcup99.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9de0ce56b1969c29db8474788c0deb3ca8315fb9 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/__pycache__/_kddcup99.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/__pycache__/_openml.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/__pycache__/_openml.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f18ccf43634c0f12d45b13cab22823230ae7f213 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/__pycache__/_openml.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/__pycache__/_rcv1.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/__pycache__/_rcv1.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ef689cabb1524d8378843a7159a2b5134b37195a Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/__pycache__/_rcv1.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/__pycache__/_species_distributions.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/__pycache__/_species_distributions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c7fba2009ddd321247a2a274c1bee022c16ec791 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/__pycache__/_species_distributions.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/__pycache__/_svmlight_format_io.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/__pycache__/_svmlight_format_io.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d4c1adda7367b849208936dd83c6ca2aef8f3b66 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/__pycache__/_svmlight_format_io.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/__pycache__/_twenty_newsgroups.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/__pycache__/_twenty_newsgroups.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..73c83a9d126dd6e9c50e84e90c50c2cbf0df174c Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/__pycache__/_twenty_newsgroups.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/_arff_parser.py b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/_arff_parser.py new file mode 100644 index 0000000000000000000000000000000000000000..6ce1a5d3e3d0c9b1f993ca98cbc05f639e47bb14 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/_arff_parser.py @@ -0,0 +1,542 @@ +"""Implementation of ARFF parsers: via LIAC-ARFF and pandas.""" +import itertools +import re +from collections import OrderedDict +from collections.abc import Generator +from typing import List + +import numpy as np +import scipy as sp + +from ..externals import _arff +from ..externals._arff import ArffSparseDataType +from ..utils import ( + _chunk_generator, + check_pandas_support, + get_chunk_n_rows, +) +from ..utils.fixes import pd_fillna + + +def _split_sparse_columns( + arff_data: ArffSparseDataType, include_columns: List +) -> ArffSparseDataType: + """Obtains several columns from sparse ARFF representation. Additionally, + the column indices are re-labelled, given the columns that are not + included. (e.g., when including [1, 2, 3], the columns will be relabelled + to [0, 1, 2]). + + Parameters + ---------- + arff_data : tuple + A tuple of three lists of equal size; first list indicating the value, + second the x coordinate and the third the y coordinate. + + include_columns : list + A list of columns to include. + + Returns + ------- + arff_data_new : tuple + Subset of arff data with only the include columns indicated by the + include_columns argument. + """ + arff_data_new: ArffSparseDataType = (list(), list(), list()) + reindexed_columns = { + column_idx: array_idx for array_idx, column_idx in enumerate(include_columns) + } + for val, row_idx, col_idx in zip(arff_data[0], arff_data[1], arff_data[2]): + if col_idx in include_columns: + arff_data_new[0].append(val) + arff_data_new[1].append(row_idx) + arff_data_new[2].append(reindexed_columns[col_idx]) + return arff_data_new + + +def _sparse_data_to_array( + arff_data: ArffSparseDataType, include_columns: List +) -> np.ndarray: + # turns the sparse data back into an array (can't use toarray() function, + # as this does only work on numeric data) + num_obs = max(arff_data[1]) + 1 + y_shape = (num_obs, len(include_columns)) + reindexed_columns = { + column_idx: array_idx for array_idx, column_idx in enumerate(include_columns) + } + # TODO: improve for efficiency + y = np.empty(y_shape, dtype=np.float64) + for val, row_idx, col_idx in zip(arff_data[0], arff_data[1], arff_data[2]): + if col_idx in include_columns: + y[row_idx, reindexed_columns[col_idx]] = val + return y + + +def _post_process_frame(frame, feature_names, target_names): + """Post process a dataframe to select the desired columns in `X` and `y`. + + Parameters + ---------- + frame : dataframe + The dataframe to split into `X` and `y`. + + feature_names : list of str + The list of feature names to populate `X`. + + target_names : list of str + The list of target names to populate `y`. + + Returns + ------- + X : dataframe + The dataframe containing the features. + + y : {series, dataframe} or None + The series or dataframe containing the target. + """ + X = frame[feature_names] + if len(target_names) >= 2: + y = frame[target_names] + elif len(target_names) == 1: + y = frame[target_names[0]] + else: + y = None + return X, y + + +def _liac_arff_parser( + gzip_file, + output_arrays_type, + openml_columns_info, + feature_names_to_select, + target_names_to_select, + shape=None, +): + """ARFF parser using the LIAC-ARFF library coded purely in Python. + + This parser is quite slow but consumes a generator. Currently it is needed + to parse sparse datasets. For dense datasets, it is recommended to instead + use the pandas-based parser, although it does not always handles the + dtypes exactly the same. + + Parameters + ---------- + gzip_file : GzipFile instance + The file compressed to be read. + + output_arrays_type : {"numpy", "sparse", "pandas"} + The type of the arrays that will be returned. The possibilities ara: + + - `"numpy"`: both `X` and `y` will be NumPy arrays; + - `"sparse"`: `X` will be sparse matrix and `y` will be a NumPy array; + - `"pandas"`: `X` will be a pandas DataFrame and `y` will be either a + pandas Series or DataFrame. + + columns_info : dict + The information provided by OpenML regarding the columns of the ARFF + file. + + feature_names_to_select : list of str + A list of the feature names to be selected. + + target_names_to_select : list of str + A list of the target names to be selected. + + Returns + ------- + X : {ndarray, sparse matrix, dataframe} + The data matrix. + + y : {ndarray, dataframe, series} + The target. + + frame : dataframe or None + A dataframe containing both `X` and `y`. `None` if + `output_array_type != "pandas"`. + + categories : list of str or None + The names of the features that are categorical. `None` if + `output_array_type == "pandas"`. + """ + + def _io_to_generator(gzip_file): + for line in gzip_file: + yield line.decode("utf-8") + + stream = _io_to_generator(gzip_file) + + # find which type (dense or sparse) ARFF type we will have to deal with + return_type = _arff.COO if output_arrays_type == "sparse" else _arff.DENSE_GEN + # we should not let LIAC-ARFF to encode the nominal attributes with NumPy + # arrays to have only numerical values. + encode_nominal = not (output_arrays_type == "pandas") + arff_container = _arff.load( + stream, return_type=return_type, encode_nominal=encode_nominal + ) + columns_to_select = feature_names_to_select + target_names_to_select + + categories = { + name: cat + for name, cat in arff_container["attributes"] + if isinstance(cat, list) and name in columns_to_select + } + if output_arrays_type == "pandas": + pd = check_pandas_support("fetch_openml with as_frame=True") + + columns_info = OrderedDict(arff_container["attributes"]) + columns_names = list(columns_info.keys()) + + # calculate chunksize + first_row = next(arff_container["data"]) + first_df = pd.DataFrame([first_row], columns=columns_names, copy=False) + + row_bytes = first_df.memory_usage(deep=True).sum() + chunksize = get_chunk_n_rows(row_bytes) + + # read arff data with chunks + columns_to_keep = [col for col in columns_names if col in columns_to_select] + dfs = [first_df[columns_to_keep]] + for data in _chunk_generator(arff_container["data"], chunksize): + dfs.append( + pd.DataFrame(data, columns=columns_names, copy=False)[columns_to_keep] + ) + # dfs[0] contains only one row, which may not have enough data to infer to + # column's dtype. Here we use `dfs[1]` to configure the dtype in dfs[0] + if len(dfs) >= 2: + dfs[0] = dfs[0].astype(dfs[1].dtypes) + + # liac-arff parser does not depend on NumPy and uses None to represent + # missing values. To be consistent with the pandas parser, we replace + # None with np.nan. + frame = pd.concat(dfs, ignore_index=True) + frame = pd_fillna(pd, frame) + del dfs, first_df + + # cast the columns frame + dtypes = {} + for name in frame.columns: + column_dtype = openml_columns_info[name]["data_type"] + if column_dtype.lower() == "integer": + # Use a pandas extension array instead of np.int64 to be able + # to support missing values. + dtypes[name] = "Int64" + elif column_dtype.lower() == "nominal": + dtypes[name] = "category" + else: + dtypes[name] = frame.dtypes[name] + frame = frame.astype(dtypes) + + X, y = _post_process_frame( + frame, feature_names_to_select, target_names_to_select + ) + else: + arff_data = arff_container["data"] + + feature_indices_to_select = [ + int(openml_columns_info[col_name]["index"]) + for col_name in feature_names_to_select + ] + target_indices_to_select = [ + int(openml_columns_info[col_name]["index"]) + for col_name in target_names_to_select + ] + + if isinstance(arff_data, Generator): + if shape is None: + raise ValueError( + "shape must be provided when arr['data'] is a Generator" + ) + if shape[0] == -1: + count = -1 + else: + count = shape[0] * shape[1] + data = np.fromiter( + itertools.chain.from_iterable(arff_data), + dtype="float64", + count=count, + ) + data = data.reshape(*shape) + X = data[:, feature_indices_to_select] + y = data[:, target_indices_to_select] + elif isinstance(arff_data, tuple): + arff_data_X = _split_sparse_columns(arff_data, feature_indices_to_select) + num_obs = max(arff_data[1]) + 1 + X_shape = (num_obs, len(feature_indices_to_select)) + X = sp.sparse.coo_matrix( + (arff_data_X[0], (arff_data_X[1], arff_data_X[2])), + shape=X_shape, + dtype=np.float64, + ) + X = X.tocsr() + y = _sparse_data_to_array(arff_data, target_indices_to_select) + else: + # This should never happen + raise ValueError( + f"Unexpected type for data obtained from arff: {type(arff_data)}" + ) + + is_classification = { + col_name in categories for col_name in target_names_to_select + } + if not is_classification: + # No target + pass + elif all(is_classification): + y = np.hstack( + [ + np.take( + np.asarray(categories.pop(col_name), dtype="O"), + y[:, i : i + 1].astype(int, copy=False), + ) + for i, col_name in enumerate(target_names_to_select) + ] + ) + elif any(is_classification): + raise ValueError( + "Mix of nominal and non-nominal targets is not currently supported" + ) + + # reshape y back to 1-D array, if there is only 1 target column; + # back to None if there are not target columns + if y.shape[1] == 1: + y = y.reshape((-1,)) + elif y.shape[1] == 0: + y = None + + if output_arrays_type == "pandas": + return X, y, frame, None + return X, y, None, categories + + +def _pandas_arff_parser( + gzip_file, + output_arrays_type, + openml_columns_info, + feature_names_to_select, + target_names_to_select, + read_csv_kwargs=None, +): + """ARFF parser using `pandas.read_csv`. + + This parser uses the metadata fetched directly from OpenML and skips the metadata + headers of ARFF file itself. The data is loaded as a CSV file. + + Parameters + ---------- + gzip_file : GzipFile instance + The GZip compressed file with the ARFF formatted payload. + + output_arrays_type : {"numpy", "sparse", "pandas"} + The type of the arrays that will be returned. The possibilities are: + + - `"numpy"`: both `X` and `y` will be NumPy arrays; + - `"sparse"`: `X` will be sparse matrix and `y` will be a NumPy array; + - `"pandas"`: `X` will be a pandas DataFrame and `y` will be either a + pandas Series or DataFrame. + + openml_columns_info : dict + The information provided by OpenML regarding the columns of the ARFF + file. + + feature_names_to_select : list of str + A list of the feature names to be selected to build `X`. + + target_names_to_select : list of str + A list of the target names to be selected to build `y`. + + read_csv_kwargs : dict, default=None + Keyword arguments to pass to `pandas.read_csv`. It allows to overwrite + the default options. + + Returns + ------- + X : {ndarray, sparse matrix, dataframe} + The data matrix. + + y : {ndarray, dataframe, series} + The target. + + frame : dataframe or None + A dataframe containing both `X` and `y`. `None` if + `output_array_type != "pandas"`. + + categories : list of str or None + The names of the features that are categorical. `None` if + `output_array_type == "pandas"`. + """ + import pandas as pd + + # read the file until the data section to skip the ARFF metadata headers + for line in gzip_file: + if line.decode("utf-8").lower().startswith("@data"): + break + + dtypes = {} + for name in openml_columns_info: + column_dtype = openml_columns_info[name]["data_type"] + if column_dtype.lower() == "integer": + # Use Int64 to infer missing values from data + # XXX: this line is not covered by our tests. Is this really needed? + dtypes[name] = "Int64" + elif column_dtype.lower() == "nominal": + dtypes[name] = "category" + # since we will not pass `names` when reading the ARFF file, we need to translate + # `dtypes` from column names to column indices to pass to `pandas.read_csv` + dtypes_positional = { + col_idx: dtypes[name] + for col_idx, name in enumerate(openml_columns_info) + if name in dtypes + } + + default_read_csv_kwargs = { + "header": None, + "index_col": False, # always force pandas to not use the first column as index + "na_values": ["?"], # missing values are represented by `?` + "keep_default_na": False, # only `?` is a missing value given the ARFF specs + "comment": "%", # skip line starting by `%` since they are comments + "quotechar": '"', # delimiter to use for quoted strings + "skipinitialspace": True, # skip spaces after delimiter to follow ARFF specs + "escapechar": "\\", + "dtype": dtypes_positional, + } + read_csv_kwargs = {**default_read_csv_kwargs, **(read_csv_kwargs or {})} + frame = pd.read_csv(gzip_file, **read_csv_kwargs) + try: + # Setting the columns while reading the file will select the N first columns + # and not raise a ParserError. Instead, we set the columns after reading the + # file and raise a ParserError if the number of columns does not match the + # number of columns in the metadata given by OpenML. + frame.columns = [name for name in openml_columns_info] + except ValueError as exc: + raise pd.errors.ParserError( + "The number of columns provided by OpenML does not match the number of " + "columns inferred by pandas when reading the file." + ) from exc + + columns_to_select = feature_names_to_select + target_names_to_select + columns_to_keep = [col for col in frame.columns if col in columns_to_select] + frame = frame[columns_to_keep] + + # `pd.read_csv` automatically handles double quotes for quoting non-numeric + # CSV cell values. Contrary to LIAC-ARFF, `pd.read_csv` cannot be configured to + # consider either single quotes and double quotes as valid quoting chars at + # the same time since this case does not occur in regular (non-ARFF) CSV files. + # To mimic the behavior of LIAC-ARFF parser, we manually strip single quotes + # on categories as a post-processing steps if needed. + # + # Note however that we intentionally do not attempt to do this kind of manual + # post-processing of (non-categorical) string-typed columns because we cannot + # resolve the ambiguity of the case of CSV cell with nesting quoting such as + # `"'some string value'"` with pandas. + single_quote_pattern = re.compile(r"^'(?P.*)'$") + + def strip_single_quotes(input_string): + match = re.search(single_quote_pattern, input_string) + if match is None: + return input_string + + return match.group("contents") + + categorical_columns = [ + name + for name, dtype in frame.dtypes.items() + if isinstance(dtype, pd.CategoricalDtype) + ] + for col in categorical_columns: + frame[col] = frame[col].cat.rename_categories(strip_single_quotes) + + X, y = _post_process_frame(frame, feature_names_to_select, target_names_to_select) + + if output_arrays_type == "pandas": + return X, y, frame, None + else: + X, y = X.to_numpy(), y.to_numpy() + + categories = { + name: dtype.categories.tolist() + for name, dtype in frame.dtypes.items() + if isinstance(dtype, pd.CategoricalDtype) + } + return X, y, None, categories + + +def load_arff_from_gzip_file( + gzip_file, + parser, + output_type, + openml_columns_info, + feature_names_to_select, + target_names_to_select, + shape=None, + read_csv_kwargs=None, +): + """Load a compressed ARFF file using a given parser. + + Parameters + ---------- + gzip_file : GzipFile instance + The file compressed to be read. + + parser : {"pandas", "liac-arff"} + The parser used to parse the ARFF file. "pandas" is recommended + but only supports loading dense datasets. + + output_type : {"numpy", "sparse", "pandas"} + The type of the arrays that will be returned. The possibilities ara: + + - `"numpy"`: both `X` and `y` will be NumPy arrays; + - `"sparse"`: `X` will be sparse matrix and `y` will be a NumPy array; + - `"pandas"`: `X` will be a pandas DataFrame and `y` will be either a + pandas Series or DataFrame. + + openml_columns_info : dict + The information provided by OpenML regarding the columns of the ARFF + file. + + feature_names_to_select : list of str + A list of the feature names to be selected. + + target_names_to_select : list of str + A list of the target names to be selected. + + read_csv_kwargs : dict, default=None + Keyword arguments to pass to `pandas.read_csv`. It allows to overwrite + the default options. + + Returns + ------- + X : {ndarray, sparse matrix, dataframe} + The data matrix. + + y : {ndarray, dataframe, series} + The target. + + frame : dataframe or None + A dataframe containing both `X` and `y`. `None` if + `output_array_type != "pandas"`. + + categories : list of str or None + The names of the features that are categorical. `None` if + `output_array_type == "pandas"`. + """ + if parser == "liac-arff": + return _liac_arff_parser( + gzip_file, + output_type, + openml_columns_info, + feature_names_to_select, + target_names_to_select, + shape, + ) + elif parser == "pandas": + return _pandas_arff_parser( + gzip_file, + output_type, + openml_columns_info, + feature_names_to_select, + target_names_to_select, + read_csv_kwargs, + ) + else: + raise ValueError( + f"Unknown parser: '{parser}'. Should be 'liac-arff' or 'pandas'." + ) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/_base.py b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/_base.py new file mode 100644 index 0000000000000000000000000000000000000000..e055b47ab13a2edceb0027407519ab4fc0dc8766 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/_base.py @@ -0,0 +1,1441 @@ +""" +Base IO code for all datasets +""" + +# Copyright (c) 2007 David Cournapeau +# 2010 Fabian Pedregosa +# 2010 Olivier Grisel +# License: BSD 3 clause +import csv +import gzip +import hashlib +import os +import shutil +from collections import namedtuple +from importlib import resources +from numbers import Integral +from os import environ, listdir, makedirs +from os.path import expanduser, isdir, join, splitext +from pathlib import Path +from urllib.request import urlretrieve + +import numpy as np + +from ..preprocessing import scale +from ..utils import Bunch, check_pandas_support, check_random_state +from ..utils._param_validation import Interval, StrOptions, validate_params + +DATA_MODULE = "sklearn.datasets.data" +DESCR_MODULE = "sklearn.datasets.descr" +IMAGES_MODULE = "sklearn.datasets.images" + +RemoteFileMetadata = namedtuple("RemoteFileMetadata", ["filename", "url", "checksum"]) + + +@validate_params( + { + "data_home": [str, os.PathLike, None], + }, + prefer_skip_nested_validation=True, +) +def get_data_home(data_home=None) -> str: + """Return the path of the scikit-learn data directory. + + This folder is used by some large dataset loaders to avoid downloading the + data several times. + + By default the data directory is set to a folder named 'scikit_learn_data' in the + user home folder. + + Alternatively, it can be set by the 'SCIKIT_LEARN_DATA' environment + variable or programmatically by giving an explicit folder path. The '~' + symbol is expanded to the user home folder. + + If the folder does not already exist, it is automatically created. + + Parameters + ---------- + data_home : str or path-like, default=None + The path to scikit-learn data directory. If `None`, the default path + is `~/scikit_learn_data`. + + Returns + ------- + data_home: str + The path to scikit-learn data directory. + """ + if data_home is None: + data_home = environ.get("SCIKIT_LEARN_DATA", join("~", "scikit_learn_data")) + data_home = expanduser(data_home) + makedirs(data_home, exist_ok=True) + return data_home + + +@validate_params( + { + "data_home": [str, os.PathLike, None], + }, + prefer_skip_nested_validation=True, +) +def clear_data_home(data_home=None): + """Delete all the content of the data home cache. + + Parameters + ---------- + data_home : str or path-like, default=None + The path to scikit-learn data directory. If `None`, the default path + is `~/scikit_learn_data`. + + Examples + -------- + >>> from sklearn.datasets import clear_data_home + >>> clear_data_home() # doctest: +SKIP + """ + data_home = get_data_home(data_home) + shutil.rmtree(data_home) + + +def _convert_data_dataframe( + caller_name, data, target, feature_names, target_names, sparse_data=False +): + pd = check_pandas_support("{} with as_frame=True".format(caller_name)) + if not sparse_data: + data_df = pd.DataFrame(data, columns=feature_names, copy=False) + else: + data_df = pd.DataFrame.sparse.from_spmatrix(data, columns=feature_names) + + target_df = pd.DataFrame(target, columns=target_names) + combined_df = pd.concat([data_df, target_df], axis=1) + X = combined_df[feature_names] + y = combined_df[target_names] + if y.shape[1] == 1: + y = y.iloc[:, 0] + return combined_df, X, y + + +@validate_params( + { + "container_path": [str, os.PathLike], + "description": [str, None], + "categories": [list, None], + "load_content": ["boolean"], + "shuffle": ["boolean"], + "encoding": [str, None], + "decode_error": [StrOptions({"strict", "ignore", "replace"})], + "random_state": ["random_state"], + "allowed_extensions": [list, None], + }, + prefer_skip_nested_validation=True, +) +def load_files( + container_path, + *, + description=None, + categories=None, + load_content=True, + shuffle=True, + encoding=None, + decode_error="strict", + random_state=0, + allowed_extensions=None, +): + """Load text files with categories as subfolder names. + + Individual samples are assumed to be files stored a two levels folder + structure such as the following: + + container_folder/ + category_1_folder/ + file_1.txt + file_2.txt + ... + file_42.txt + category_2_folder/ + file_43.txt + file_44.txt + ... + + The folder names are used as supervised signal label names. The individual + file names are not important. + + This function does not try to extract features into a numpy array or scipy + sparse matrix. In addition, if load_content is false it does not try to + load the files in memory. + + To use text files in a scikit-learn classification or clustering algorithm, + you will need to use the :mod:`~sklearn.feature_extraction.text` module to + build a feature extraction transformer that suits your problem. + + If you set load_content=True, you should also specify the encoding of the + text using the 'encoding' parameter. For many modern text files, 'utf-8' + will be the correct encoding. If you leave encoding equal to None, then the + content will be made of bytes instead of Unicode, and you will not be able + to use most functions in :mod:`~sklearn.feature_extraction.text`. + + Similar feature extractors should be built for other kind of unstructured + data input such as images, audio, video, ... + + If you want files with a specific file extension (e.g. `.txt`) then you + can pass a list of those file extensions to `allowed_extensions`. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + container_path : str + Path to the main folder holding one subfolder per category. + + description : str, default=None + A paragraph describing the characteristic of the dataset: its source, + reference, etc. + + categories : list of str, default=None + If None (default), load all the categories. If not None, list of + category names to load (other categories ignored). + + load_content : bool, default=True + Whether to load or not the content of the different files. If true a + 'data' attribute containing the text information is present in the data + structure returned. If not, a filenames attribute gives the path to the + files. + + shuffle : bool, default=True + Whether or not to shuffle the data: might be important for models that + make the assumption that the samples are independent and identically + distributed (i.i.d.), such as stochastic gradient descent. + + encoding : str, default=None + If None, do not try to decode the content of the files (e.g. for images + or other non-text content). If not None, encoding to use to decode text + files to Unicode if load_content is True. + + decode_error : {'strict', 'ignore', 'replace'}, default='strict' + Instruction on what to do if a byte sequence is given to analyze that + contains characters not of the given `encoding`. Passed as keyword + argument 'errors' to bytes.decode. + + random_state : int, RandomState instance or None, default=0 + Determines random number generation for dataset shuffling. Pass an int + for reproducible output across multiple function calls. + See :term:`Glossary `. + + allowed_extensions : list of str, default=None + List of desired file extensions to filter the files to be loaded. + + Returns + ------- + data : :class:`~sklearn.utils.Bunch` + Dictionary-like object, with the following attributes. + + data : list of str + Only present when `load_content=True`. + The raw text data to learn. + target : ndarray + The target labels (integer index). + target_names : list + The names of target classes. + DESCR : str + The full description of the dataset. + filenames: ndarray + The filenames holding the dataset. + + Examples + -------- + >>> from sklearn.datasets import load_files + >>> container_path = "./" + >>> load_files(container_path) # doctest: +SKIP + """ + + target = [] + target_names = [] + filenames = [] + + folders = [ + f for f in sorted(listdir(container_path)) if isdir(join(container_path, f)) + ] + + if categories is not None: + folders = [f for f in folders if f in categories] + + if allowed_extensions is not None: + allowed_extensions = frozenset(allowed_extensions) + + for label, folder in enumerate(folders): + target_names.append(folder) + folder_path = join(container_path, folder) + files = sorted(listdir(folder_path)) + if allowed_extensions is not None: + documents = [ + join(folder_path, file) + for file in files + if os.path.splitext(file)[1] in allowed_extensions + ] + else: + documents = [join(folder_path, file) for file in files] + target.extend(len(documents) * [label]) + filenames.extend(documents) + + # convert to array for fancy indexing + filenames = np.array(filenames) + target = np.array(target) + + if shuffle: + random_state = check_random_state(random_state) + indices = np.arange(filenames.shape[0]) + random_state.shuffle(indices) + filenames = filenames[indices] + target = target[indices] + + if load_content: + data = [] + for filename in filenames: + data.append(Path(filename).read_bytes()) + if encoding is not None: + data = [d.decode(encoding, decode_error) for d in data] + return Bunch( + data=data, + filenames=filenames, + target_names=target_names, + target=target, + DESCR=description, + ) + + return Bunch( + filenames=filenames, target_names=target_names, target=target, DESCR=description + ) + + +def load_csv_data( + data_file_name, + *, + data_module=DATA_MODULE, + descr_file_name=None, + descr_module=DESCR_MODULE, + encoding="utf-8", +): + """Loads `data_file_name` from `data_module with `importlib.resources`. + + Parameters + ---------- + data_file_name : str + Name of csv file to be loaded from `data_module/data_file_name`. + For example `'wine_data.csv'`. + + data_module : str or module, default='sklearn.datasets.data' + Module where data lives. The default is `'sklearn.datasets.data'`. + + descr_file_name : str, default=None + Name of rst file to be loaded from `descr_module/descr_file_name`. + For example `'wine_data.rst'`. See also :func:`load_descr`. + If not None, also returns the corresponding description of + the dataset. + + descr_module : str or module, default='sklearn.datasets.descr' + Module where `descr_file_name` lives. See also :func:`load_descr`. + The default is `'sklearn.datasets.descr'`. + + Returns + ------- + data : ndarray of shape (n_samples, n_features) + A 2D array with each row representing one sample and each column + representing the features of a given sample. + + target : ndarry of shape (n_samples,) + A 1D array holding target variables for all the samples in `data`. + For example target[0] is the target variable for data[0]. + + target_names : ndarry of shape (n_samples,) + A 1D array containing the names of the classifications. For example + target_names[0] is the name of the target[0] class. + + descr : str, optional + Description of the dataset (the content of `descr_file_name`). + Only returned if `descr_file_name` is not None. + + encoding : str, optional + Text encoding of the CSV file. + + .. versionadded:: 1.4 + """ + data_path = resources.files(data_module) / data_file_name + with data_path.open("r", encoding="utf-8") as csv_file: + data_file = csv.reader(csv_file) + temp = next(data_file) + n_samples = int(temp[0]) + n_features = int(temp[1]) + target_names = np.array(temp[2:]) + data = np.empty((n_samples, n_features)) + target = np.empty((n_samples,), dtype=int) + + for i, ir in enumerate(data_file): + data[i] = np.asarray(ir[:-1], dtype=np.float64) + target[i] = np.asarray(ir[-1], dtype=int) + + if descr_file_name is None: + return data, target, target_names + else: + assert descr_module is not None + descr = load_descr(descr_module=descr_module, descr_file_name=descr_file_name) + return data, target, target_names, descr + + +def load_gzip_compressed_csv_data( + data_file_name, + *, + data_module=DATA_MODULE, + descr_file_name=None, + descr_module=DESCR_MODULE, + encoding="utf-8", + **kwargs, +): + """Loads gzip-compressed with `importlib.resources`. + + 1) Open resource file with `importlib.resources.open_binary` + 2) Decompress file obj with `gzip.open` + 3) Load decompressed data with `np.loadtxt` + + Parameters + ---------- + data_file_name : str + Name of gzip-compressed csv file (`'*.csv.gz'`) to be loaded from + `data_module/data_file_name`. For example `'diabetes_data.csv.gz'`. + + data_module : str or module, default='sklearn.datasets.data' + Module where data lives. The default is `'sklearn.datasets.data'`. + + descr_file_name : str, default=None + Name of rst file to be loaded from `descr_module/descr_file_name`. + For example `'wine_data.rst'`. See also :func:`load_descr`. + If not None, also returns the corresponding description of + the dataset. + + descr_module : str or module, default='sklearn.datasets.descr' + Module where `descr_file_name` lives. See also :func:`load_descr`. + The default is `'sklearn.datasets.descr'`. + + encoding : str, default="utf-8" + Name of the encoding that the gzip-decompressed file will be + decoded with. The default is 'utf-8'. + + **kwargs : dict, optional + Keyword arguments to be passed to `np.loadtxt`; + e.g. delimiter=','. + + Returns + ------- + data : ndarray of shape (n_samples, n_features) + A 2D array with each row representing one sample and each column + representing the features and/or target of a given sample. + + descr : str, optional + Description of the dataset (the content of `descr_file_name`). + Only returned if `descr_file_name` is not None. + """ + data_path = resources.files(data_module) / data_file_name + with data_path.open("rb") as compressed_file: + compressed_file = gzip.open(compressed_file, mode="rt", encoding=encoding) + data = np.loadtxt(compressed_file, **kwargs) + + if descr_file_name is None: + return data + else: + assert descr_module is not None + descr = load_descr(descr_module=descr_module, descr_file_name=descr_file_name) + return data, descr + + +def load_descr(descr_file_name, *, descr_module=DESCR_MODULE, encoding="utf-8"): + """Load `descr_file_name` from `descr_module` with `importlib.resources`. + + Parameters + ---------- + descr_file_name : str, default=None + Name of rst file to be loaded from `descr_module/descr_file_name`. + For example `'wine_data.rst'`. See also :func:`load_descr`. + If not None, also returns the corresponding description of + the dataset. + + descr_module : str or module, default='sklearn.datasets.descr' + Module where `descr_file_name` lives. See also :func:`load_descr`. + The default is `'sklearn.datasets.descr'`. + + encoding : str, default="utf-8" + Name of the encoding that `descr_file_name` will be decoded with. + The default is 'utf-8'. + + .. versionadded:: 1.4 + + Returns + ------- + fdescr : str + Content of `descr_file_name`. + """ + path = resources.files(descr_module) / descr_file_name + return path.read_text(encoding=encoding) + + +@validate_params( + { + "return_X_y": ["boolean"], + "as_frame": ["boolean"], + }, + prefer_skip_nested_validation=True, +) +def load_wine(*, return_X_y=False, as_frame=False): + """Load and return the wine dataset (classification). + + .. versionadded:: 0.18 + + The wine dataset is a classic and very easy multi-class classification + dataset. + + ================= ============== + Classes 3 + Samples per class [59,71,48] + Samples total 178 + Dimensionality 13 + Features real, positive + ================= ============== + + The copy of UCI ML Wine Data Set dataset is downloaded and modified to fit + standard format from: + https://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + return_X_y : bool, default=False + If True, returns ``(data, target)`` instead of a Bunch object. + See below for more information about the `data` and `target` object. + + as_frame : bool, default=False + If True, the data is a pandas DataFrame including columns with + appropriate dtypes (numeric). The target is + a pandas DataFrame or Series depending on the number of target columns. + If `return_X_y` is True, then (`data`, `target`) will be pandas + DataFrames or Series as described below. + + .. versionadded:: 0.23 + + Returns + ------- + data : :class:`~sklearn.utils.Bunch` + Dictionary-like object, with the following attributes. + + data : {ndarray, dataframe} of shape (178, 13) + The data matrix. If `as_frame=True`, `data` will be a pandas + DataFrame. + target: {ndarray, Series} of shape (178,) + The classification target. If `as_frame=True`, `target` will be + a pandas Series. + feature_names: list + The names of the dataset columns. + target_names: list + The names of target classes. + frame: DataFrame of shape (178, 14) + Only present when `as_frame=True`. DataFrame with `data` and + `target`. + + .. versionadded:: 0.23 + DESCR: str + The full description of the dataset. + + (data, target) : tuple if ``return_X_y`` is True + A tuple of two ndarrays by default. The first contains a 2D array of shape + (178, 13) with each row representing one sample and each column representing + the features. The second array of shape (178,) contains the target samples. + + Examples + -------- + Let's say you are interested in the samples 10, 80, and 140, and want to + know their class name. + + >>> from sklearn.datasets import load_wine + >>> data = load_wine() + >>> data.target[[10, 80, 140]] + array([0, 1, 2]) + >>> list(data.target_names) + ['class_0', 'class_1', 'class_2'] + """ + + data, target, target_names, fdescr = load_csv_data( + data_file_name="wine_data.csv", descr_file_name="wine_data.rst" + ) + + feature_names = [ + "alcohol", + "malic_acid", + "ash", + "alcalinity_of_ash", + "magnesium", + "total_phenols", + "flavanoids", + "nonflavanoid_phenols", + "proanthocyanins", + "color_intensity", + "hue", + "od280/od315_of_diluted_wines", + "proline", + ] + + frame = None + target_columns = [ + "target", + ] + if as_frame: + frame, data, target = _convert_data_dataframe( + "load_wine", data, target, feature_names, target_columns + ) + + if return_X_y: + return data, target + + return Bunch( + data=data, + target=target, + frame=frame, + target_names=target_names, + DESCR=fdescr, + feature_names=feature_names, + ) + + +@validate_params( + {"return_X_y": ["boolean"], "as_frame": ["boolean"]}, + prefer_skip_nested_validation=True, +) +def load_iris(*, return_X_y=False, as_frame=False): + """Load and return the iris dataset (classification). + + The iris dataset is a classic and very easy multi-class classification + dataset. + + ================= ============== + Classes 3 + Samples per class 50 + Samples total 150 + Dimensionality 4 + Features real, positive + ================= ============== + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + return_X_y : bool, default=False + If True, returns ``(data, target)`` instead of a Bunch object. See + below for more information about the `data` and `target` object. + + .. versionadded:: 0.18 + + as_frame : bool, default=False + If True, the data is a pandas DataFrame including columns with + appropriate dtypes (numeric). The target is + a pandas DataFrame or Series depending on the number of target columns. + If `return_X_y` is True, then (`data`, `target`) will be pandas + DataFrames or Series as described below. + + .. versionadded:: 0.23 + + Returns + ------- + data : :class:`~sklearn.utils.Bunch` + Dictionary-like object, with the following attributes. + + data : {ndarray, dataframe} of shape (150, 4) + The data matrix. If `as_frame=True`, `data` will be a pandas + DataFrame. + target: {ndarray, Series} of shape (150,) + The classification target. If `as_frame=True`, `target` will be + a pandas Series. + feature_names: list + The names of the dataset columns. + target_names: list + The names of target classes. + frame: DataFrame of shape (150, 5) + Only present when `as_frame=True`. DataFrame with `data` and + `target`. + + .. versionadded:: 0.23 + DESCR: str + The full description of the dataset. + filename: str + The path to the location of the data. + + .. versionadded:: 0.20 + + (data, target) : tuple if ``return_X_y`` is True + A tuple of two ndarray. The first containing a 2D array of shape + (n_samples, n_features) with each row representing one sample and + each column representing the features. The second ndarray of shape + (n_samples,) containing the target samples. + + .. versionadded:: 0.18 + + Notes + ----- + .. versionchanged:: 0.20 + Fixed two wrong data points according to Fisher's paper. + The new version is the same as in R, but not as in the UCI + Machine Learning Repository. + + Examples + -------- + Let's say you are interested in the samples 10, 25, and 50, and want to + know their class name. + + >>> from sklearn.datasets import load_iris + >>> data = load_iris() + >>> data.target[[10, 25, 50]] + array([0, 0, 1]) + >>> list(data.target_names) + ['setosa', 'versicolor', 'virginica'] + + See :ref:`sphx_glr_auto_examples_datasets_plot_iris_dataset.py` for a more + detailed example of how to work with the iris dataset. + """ + data_file_name = "iris.csv" + data, target, target_names, fdescr = load_csv_data( + data_file_name=data_file_name, descr_file_name="iris.rst" + ) + + feature_names = [ + "sepal length (cm)", + "sepal width (cm)", + "petal length (cm)", + "petal width (cm)", + ] + + frame = None + target_columns = [ + "target", + ] + if as_frame: + frame, data, target = _convert_data_dataframe( + "load_iris", data, target, feature_names, target_columns + ) + + if return_X_y: + return data, target + + return Bunch( + data=data, + target=target, + frame=frame, + target_names=target_names, + DESCR=fdescr, + feature_names=feature_names, + filename=data_file_name, + data_module=DATA_MODULE, + ) + + +@validate_params( + {"return_X_y": ["boolean"], "as_frame": ["boolean"]}, + prefer_skip_nested_validation=True, +) +def load_breast_cancer(*, return_X_y=False, as_frame=False): + """Load and return the breast cancer wisconsin dataset (classification). + + The breast cancer dataset is a classic and very easy binary classification + dataset. + + ================= ============== + Classes 2 + Samples per class 212(M),357(B) + Samples total 569 + Dimensionality 30 + Features real, positive + ================= ============== + + The copy of UCI ML Breast Cancer Wisconsin (Diagnostic) dataset is + downloaded from: + https://archive.ics.uci.edu/dataset/17/breast+cancer+wisconsin+diagnostic + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + return_X_y : bool, default=False + If True, returns ``(data, target)`` instead of a Bunch object. + See below for more information about the `data` and `target` object. + + .. versionadded:: 0.18 + + as_frame : bool, default=False + If True, the data is a pandas DataFrame including columns with + appropriate dtypes (numeric). The target is + a pandas DataFrame or Series depending on the number of target columns. + If `return_X_y` is True, then (`data`, `target`) will be pandas + DataFrames or Series as described below. + + .. versionadded:: 0.23 + + Returns + ------- + data : :class:`~sklearn.utils.Bunch` + Dictionary-like object, with the following attributes. + + data : {ndarray, dataframe} of shape (569, 30) + The data matrix. If `as_frame=True`, `data` will be a pandas + DataFrame. + target : {ndarray, Series} of shape (569,) + The classification target. If `as_frame=True`, `target` will be + a pandas Series. + feature_names : ndarray of shape (30,) + The names of the dataset columns. + target_names : ndarray of shape (2,) + The names of target classes. + frame : DataFrame of shape (569, 31) + Only present when `as_frame=True`. DataFrame with `data` and + `target`. + + .. versionadded:: 0.23 + DESCR : str + The full description of the dataset. + filename : str + The path to the location of the data. + + .. versionadded:: 0.20 + + (data, target) : tuple if ``return_X_y`` is True + A tuple of two ndarrays by default. The first contains a 2D ndarray of + shape (569, 30) with each row representing one sample and each column + representing the features. The second ndarray of shape (569,) contains + the target samples. If `as_frame=True`, both arrays are pandas objects, + i.e. `X` a dataframe and `y` a series. + + .. versionadded:: 0.18 + + Examples + -------- + Let's say you are interested in the samples 10, 50, and 85, and want to + know their class name. + + >>> from sklearn.datasets import load_breast_cancer + >>> data = load_breast_cancer() + >>> data.target[[10, 50, 85]] + array([0, 1, 0]) + >>> list(data.target_names) + ['malignant', 'benign'] + """ + data_file_name = "breast_cancer.csv" + data, target, target_names, fdescr = load_csv_data( + data_file_name=data_file_name, descr_file_name="breast_cancer.rst" + ) + + feature_names = np.array( + [ + "mean radius", + "mean texture", + "mean perimeter", + "mean area", + "mean smoothness", + "mean compactness", + "mean concavity", + "mean concave points", + "mean symmetry", + "mean fractal dimension", + "radius error", + "texture error", + "perimeter error", + "area error", + "smoothness error", + "compactness error", + "concavity error", + "concave points error", + "symmetry error", + "fractal dimension error", + "worst radius", + "worst texture", + "worst perimeter", + "worst area", + "worst smoothness", + "worst compactness", + "worst concavity", + "worst concave points", + "worst symmetry", + "worst fractal dimension", + ] + ) + + frame = None + target_columns = [ + "target", + ] + if as_frame: + frame, data, target = _convert_data_dataframe( + "load_breast_cancer", data, target, feature_names, target_columns + ) + + if return_X_y: + return data, target + + return Bunch( + data=data, + target=target, + frame=frame, + target_names=target_names, + DESCR=fdescr, + feature_names=feature_names, + filename=data_file_name, + data_module=DATA_MODULE, + ) + + +@validate_params( + { + "n_class": [Interval(Integral, 1, 10, closed="both")], + "return_X_y": ["boolean"], + "as_frame": ["boolean"], + }, + prefer_skip_nested_validation=True, +) +def load_digits(*, n_class=10, return_X_y=False, as_frame=False): + """Load and return the digits dataset (classification). + + Each datapoint is a 8x8 image of a digit. + + ================= ============== + Classes 10 + Samples per class ~180 + Samples total 1797 + Dimensionality 64 + Features integers 0-16 + ================= ============== + + This is a copy of the test set of the UCI ML hand-written digits datasets + https://archive.ics.uci.edu/ml/datasets/Optical+Recognition+of+Handwritten+Digits + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_class : int, default=10 + The number of classes to return. Between 0 and 10. + + return_X_y : bool, default=False + If True, returns ``(data, target)`` instead of a Bunch object. + See below for more information about the `data` and `target` object. + + .. versionadded:: 0.18 + + as_frame : bool, default=False + If True, the data is a pandas DataFrame including columns with + appropriate dtypes (numeric). The target is + a pandas DataFrame or Series depending on the number of target columns. + If `return_X_y` is True, then (`data`, `target`) will be pandas + DataFrames or Series as described below. + + .. versionadded:: 0.23 + + Returns + ------- + data : :class:`~sklearn.utils.Bunch` + Dictionary-like object, with the following attributes. + + data : {ndarray, dataframe} of shape (1797, 64) + The flattened data matrix. If `as_frame=True`, `data` will be + a pandas DataFrame. + target: {ndarray, Series} of shape (1797,) + The classification target. If `as_frame=True`, `target` will be + a pandas Series. + feature_names: list + The names of the dataset columns. + target_names: list + The names of target classes. + + .. versionadded:: 0.20 + + frame: DataFrame of shape (1797, 65) + Only present when `as_frame=True`. DataFrame with `data` and + `target`. + + .. versionadded:: 0.23 + images: {ndarray} of shape (1797, 8, 8) + The raw image data. + DESCR: str + The full description of the dataset. + + (data, target) : tuple if ``return_X_y`` is True + A tuple of two ndarrays by default. The first contains a 2D ndarray of + shape (1797, 64) with each row representing one sample and each column + representing the features. The second ndarray of shape (1797) contains + the target samples. If `as_frame=True`, both arrays are pandas objects, + i.e. `X` a dataframe and `y` a series. + + .. versionadded:: 0.18 + + Examples + -------- + To load the data and visualize the images:: + + >>> from sklearn.datasets import load_digits + >>> digits = load_digits() + >>> print(digits.data.shape) + (1797, 64) + >>> import matplotlib.pyplot as plt + >>> plt.gray() + >>> plt.matshow(digits.images[0]) + <...> + >>> plt.show() + """ + + data, fdescr = load_gzip_compressed_csv_data( + data_file_name="digits.csv.gz", descr_file_name="digits.rst", delimiter="," + ) + + target = data[:, -1].astype(int, copy=False) + flat_data = data[:, :-1] + images = flat_data.view() + images.shape = (-1, 8, 8) + + if n_class < 10: + idx = target < n_class + flat_data, target = flat_data[idx], target[idx] + images = images[idx] + + feature_names = [ + "pixel_{}_{}".format(row_idx, col_idx) + for row_idx in range(8) + for col_idx in range(8) + ] + + frame = None + target_columns = [ + "target", + ] + if as_frame: + frame, flat_data, target = _convert_data_dataframe( + "load_digits", flat_data, target, feature_names, target_columns + ) + + if return_X_y: + return flat_data, target + + return Bunch( + data=flat_data, + target=target, + frame=frame, + feature_names=feature_names, + target_names=np.arange(10), + images=images, + DESCR=fdescr, + ) + + +@validate_params( + {"return_X_y": ["boolean"], "as_frame": ["boolean"], "scaled": ["boolean"]}, + prefer_skip_nested_validation=True, +) +def load_diabetes(*, return_X_y=False, as_frame=False, scaled=True): + """Load and return the diabetes dataset (regression). + + ============== ================== + Samples total 442 + Dimensionality 10 + Features real, -.2 < x < .2 + Targets integer 25 - 346 + ============== ================== + + .. note:: + The meaning of each feature (i.e. `feature_names`) might be unclear + (especially for `ltg`) as the documentation of the original dataset is + not explicit. We provide information that seems correct in regard with + the scientific literature in this field of research. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + return_X_y : bool, default=False + If True, returns ``(data, target)`` instead of a Bunch object. + See below for more information about the `data` and `target` object. + + .. versionadded:: 0.18 + + as_frame : bool, default=False + If True, the data is a pandas DataFrame including columns with + appropriate dtypes (numeric). The target is + a pandas DataFrame or Series depending on the number of target columns. + If `return_X_y` is True, then (`data`, `target`) will be pandas + DataFrames or Series as described below. + + .. versionadded:: 0.23 + + scaled : bool, default=True + If True, the feature variables are mean centered and scaled by the + standard deviation times the square root of `n_samples`. + If False, raw data is returned for the feature variables. + + .. versionadded:: 1.1 + + Returns + ------- + data : :class:`~sklearn.utils.Bunch` + Dictionary-like object, with the following attributes. + + data : {ndarray, dataframe} of shape (442, 10) + The data matrix. If `as_frame=True`, `data` will be a pandas + DataFrame. + target: {ndarray, Series} of shape (442,) + The regression target. If `as_frame=True`, `target` will be + a pandas Series. + feature_names: list + The names of the dataset columns. + frame: DataFrame of shape (442, 11) + Only present when `as_frame=True`. DataFrame with `data` and + `target`. + + .. versionadded:: 0.23 + DESCR: str + The full description of the dataset. + data_filename: str + The path to the location of the data. + target_filename: str + The path to the location of the target. + + (data, target) : tuple if ``return_X_y`` is True + Returns a tuple of two ndarray of shape (n_samples, n_features) + A 2D array with each row representing one sample and each column + representing the features and/or target of a given sample. + + .. versionadded:: 0.18 + + Examples + -------- + >>> from sklearn.datasets import load_diabetes + >>> diabetes = load_diabetes() + >>> diabetes.target[:3] + array([151., 75., 141.]) + >>> diabetes.data.shape + (442, 10) + """ + data_filename = "diabetes_data_raw.csv.gz" + target_filename = "diabetes_target.csv.gz" + data = load_gzip_compressed_csv_data(data_filename) + target = load_gzip_compressed_csv_data(target_filename) + + if scaled: + data = scale(data, copy=False) + data /= data.shape[0] ** 0.5 + + fdescr = load_descr("diabetes.rst") + + feature_names = ["age", "sex", "bmi", "bp", "s1", "s2", "s3", "s4", "s5", "s6"] + + frame = None + target_columns = [ + "target", + ] + if as_frame: + frame, data, target = _convert_data_dataframe( + "load_diabetes", data, target, feature_names, target_columns + ) + + if return_X_y: + return data, target + + return Bunch( + data=data, + target=target, + frame=frame, + DESCR=fdescr, + feature_names=feature_names, + data_filename=data_filename, + target_filename=target_filename, + data_module=DATA_MODULE, + ) + + +@validate_params( + { + "return_X_y": ["boolean"], + "as_frame": ["boolean"], + }, + prefer_skip_nested_validation=True, +) +def load_linnerud(*, return_X_y=False, as_frame=False): + """Load and return the physical exercise Linnerud dataset. + + This dataset is suitable for multi-output regression tasks. + + ============== ============================ + Samples total 20 + Dimensionality 3 (for both data and target) + Features integer + Targets integer + ============== ============================ + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + return_X_y : bool, default=False + If True, returns ``(data, target)`` instead of a Bunch object. + See below for more information about the `data` and `target` object. + + .. versionadded:: 0.18 + + as_frame : bool, default=False + If True, the data is a pandas DataFrame including columns with + appropriate dtypes (numeric, string or categorical). The target is + a pandas DataFrame or Series depending on the number of target columns. + If `return_X_y` is True, then (`data`, `target`) will be pandas + DataFrames or Series as described below. + + .. versionadded:: 0.23 + + Returns + ------- + data : :class:`~sklearn.utils.Bunch` + Dictionary-like object, with the following attributes. + + data : {ndarray, dataframe} of shape (20, 3) + The data matrix. If `as_frame=True`, `data` will be a pandas + DataFrame. + target: {ndarray, dataframe} of shape (20, 3) + The regression targets. If `as_frame=True`, `target` will be + a pandas DataFrame. + feature_names: list + The names of the dataset columns. + target_names: list + The names of the target columns. + frame: DataFrame of shape (20, 6) + Only present when `as_frame=True`. DataFrame with `data` and + `target`. + + .. versionadded:: 0.23 + DESCR: str + The full description of the dataset. + data_filename: str + The path to the location of the data. + target_filename: str + The path to the location of the target. + + .. versionadded:: 0.20 + + (data, target) : tuple if ``return_X_y`` is True + Returns a tuple of two ndarrays or dataframe of shape + `(20, 3)`. Each row represents one sample and each column represents the + features in `X` and a target in `y` of a given sample. + + .. versionadded:: 0.18 + """ + data_filename = "linnerud_exercise.csv" + target_filename = "linnerud_physiological.csv" + + data_module_path = resources.files(DATA_MODULE) + # Read header and data + data_path = data_module_path / data_filename + with data_path.open("r", encoding="utf-8") as f: + header_exercise = f.readline().split() + f.seek(0) # reset file obj + data_exercise = np.loadtxt(f, skiprows=1) + + target_path = data_module_path / target_filename + with target_path.open("r", encoding="utf-8") as f: + header_physiological = f.readline().split() + f.seek(0) # reset file obj + data_physiological = np.loadtxt(f, skiprows=1) + + fdescr = load_descr("linnerud.rst") + + frame = None + if as_frame: + (frame, data_exercise, data_physiological) = _convert_data_dataframe( + "load_linnerud", + data_exercise, + data_physiological, + header_exercise, + header_physiological, + ) + if return_X_y: + return data_exercise, data_physiological + + return Bunch( + data=data_exercise, + feature_names=header_exercise, + target=data_physiological, + target_names=header_physiological, + frame=frame, + DESCR=fdescr, + data_filename=data_filename, + target_filename=target_filename, + data_module=DATA_MODULE, + ) + + +def load_sample_images(): + """Load sample images for image manipulation. + + Loads both, ``china`` and ``flower``. + + Read more in the :ref:`User Guide `. + + Returns + ------- + data : :class:`~sklearn.utils.Bunch` + Dictionary-like object, with the following attributes. + + images : list of ndarray of shape (427, 640, 3) + The two sample image. + filenames : list + The filenames for the images. + DESCR : str + The full description of the dataset. + + Examples + -------- + To load the data and visualize the images: + + >>> from sklearn.datasets import load_sample_images + >>> dataset = load_sample_images() #doctest: +SKIP + >>> len(dataset.images) #doctest: +SKIP + 2 + >>> first_img_data = dataset.images[0] #doctest: +SKIP + >>> first_img_data.shape #doctest: +SKIP + (427, 640, 3) + >>> first_img_data.dtype #doctest: +SKIP + dtype('uint8') + """ + try: + from PIL import Image + except ImportError: + raise ImportError( + "The Python Imaging Library (PIL) is required to load data " + "from jpeg files. Please refer to " + "https://pillow.readthedocs.io/en/stable/installation.html " + "for installing PIL." + ) + + descr = load_descr("README.txt", descr_module=IMAGES_MODULE) + + filenames, images = [], [] + + jpg_paths = sorted( + resource + for resource in resources.files(IMAGES_MODULE).iterdir() + if resource.is_file() and resource.match("*.jpg") + ) + + for path in jpg_paths: + filenames.append(str(path)) + with path.open("rb") as image_file: + pil_image = Image.open(image_file) + image = np.asarray(pil_image) + images.append(image) + + return Bunch(images=images, filenames=filenames, DESCR=descr) + + +@validate_params( + { + "image_name": [StrOptions({"china.jpg", "flower.jpg"})], + }, + prefer_skip_nested_validation=True, +) +def load_sample_image(image_name): + """Load the numpy array of a single sample image. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + image_name : {`china.jpg`, `flower.jpg`} + The name of the sample image loaded. + + Returns + ------- + img : 3D array + The image as a numpy array: height x width x color. + + Examples + -------- + + >>> from sklearn.datasets import load_sample_image + >>> china = load_sample_image('china.jpg') # doctest: +SKIP + >>> china.dtype # doctest: +SKIP + dtype('uint8') + >>> china.shape # doctest: +SKIP + (427, 640, 3) + >>> flower = load_sample_image('flower.jpg') # doctest: +SKIP + >>> flower.dtype # doctest: +SKIP + dtype('uint8') + >>> flower.shape # doctest: +SKIP + (427, 640, 3) + """ + images = load_sample_images() + index = None + for i, filename in enumerate(images.filenames): + if filename.endswith(image_name): + index = i + break + if index is None: + raise AttributeError("Cannot find sample image: %s" % image_name) + return images.images[index] + + +def _pkl_filepath(*args, **kwargs): + """Return filename for Python 3 pickles + + args[-1] is expected to be the ".pkl" filename. For compatibility with + older scikit-learn versions, a suffix is inserted before the extension. + + _pkl_filepath('/path/to/folder', 'filename.pkl') returns + '/path/to/folder/filename_py3.pkl' + + """ + py3_suffix = kwargs.get("py3_suffix", "_py3") + basename, ext = splitext(args[-1]) + basename += py3_suffix + new_args = args[:-1] + (basename + ext,) + return join(*new_args) + + +def _sha256(path): + """Calculate the sha256 hash of the file at path.""" + sha256hash = hashlib.sha256() + chunk_size = 8192 + with open(path, "rb") as f: + while True: + buffer = f.read(chunk_size) + if not buffer: + break + sha256hash.update(buffer) + return sha256hash.hexdigest() + + +def _fetch_remote(remote, dirname=None): + """Helper function to download a remote dataset into path + + Fetch a dataset pointed by remote's url, save into path using remote's + filename and ensure its integrity based on the SHA256 Checksum of the + downloaded file. + + Parameters + ---------- + remote : RemoteFileMetadata + Named tuple containing remote dataset meta information: url, filename + and checksum + + dirname : str + Directory to save the file to. + + Returns + ------- + file_path: str + Full path of the created file. + """ + + file_path = remote.filename if dirname is None else join(dirname, remote.filename) + urlretrieve(remote.url, file_path) + checksum = _sha256(file_path) + if remote.checksum != checksum: + raise OSError( + "{} has an SHA256 checksum ({}) " + "differing from expected ({}), " + "file may be corrupted.".format(file_path, checksum, remote.checksum) + ) + return file_path diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/_california_housing.py b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/_california_housing.py new file mode 100644 index 0000000000000000000000000000000000000000..a8a889fa8ce1de1a84697e64ca999b385e878d50 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/_california_housing.py @@ -0,0 +1,223 @@ +"""California housing dataset. + +The original database is available from StatLib + + http://lib.stat.cmu.edu/datasets/ + +The data contains 20,640 observations on 9 variables. + +This dataset contains the average house value as target variable +and the following input variables (features): average income, +housing average age, average rooms, average bedrooms, population, +average occupation, latitude, and longitude in that order. + +References +---------- + +Pace, R. Kelley and Ronald Barry, Sparse Spatial Autoregressions, +Statistics and Probability Letters, 33 (1997) 291-297. + +""" +# Authors: Peter Prettenhofer +# License: BSD 3 clause + +import logging +import tarfile +from os import PathLike, makedirs, remove +from os.path import exists + +import joblib +import numpy as np + +from ..utils import Bunch +from ..utils._param_validation import validate_params +from . import get_data_home +from ._base import ( + RemoteFileMetadata, + _convert_data_dataframe, + _fetch_remote, + _pkl_filepath, + load_descr, +) + +# The original data can be found at: +# https://www.dcc.fc.up.pt/~ltorgo/Regression/cal_housing.tgz +ARCHIVE = RemoteFileMetadata( + filename="cal_housing.tgz", + url="https://ndownloader.figshare.com/files/5976036", + checksum="aaa5c9a6afe2225cc2aed2723682ae403280c4a3695a2ddda4ffb5d8215ea681", +) + +logger = logging.getLogger(__name__) + + +@validate_params( + { + "data_home": [str, PathLike, None], + "download_if_missing": ["boolean"], + "return_X_y": ["boolean"], + "as_frame": ["boolean"], + }, + prefer_skip_nested_validation=True, +) +def fetch_california_housing( + *, data_home=None, download_if_missing=True, return_X_y=False, as_frame=False +): + """Load the California housing dataset (regression). + + ============== ============== + Samples total 20640 + Dimensionality 8 + Features real + Target real 0.15 - 5. + ============== ============== + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + data_home : str or path-like, default=None + Specify another download and cache folder for the datasets. By default + all scikit-learn data is stored in '~/scikit_learn_data' subfolders. + + download_if_missing : bool, default=True + If False, raise an OSError if the data is not locally available + instead of trying to download the data from the source site. + + return_X_y : bool, default=False + If True, returns ``(data.data, data.target)`` instead of a Bunch + object. + + .. versionadded:: 0.20 + + as_frame : bool, default=False + If True, the data is a pandas DataFrame including columns with + appropriate dtypes (numeric, string or categorical). The target is + a pandas DataFrame or Series depending on the number of target_columns. + + .. versionadded:: 0.23 + + Returns + ------- + dataset : :class:`~sklearn.utils.Bunch` + Dictionary-like object, with the following attributes. + + data : ndarray, shape (20640, 8) + Each row corresponding to the 8 feature values in order. + If ``as_frame`` is True, ``data`` is a pandas object. + target : numpy array of shape (20640,) + Each value corresponds to the average + house value in units of 100,000. + If ``as_frame`` is True, ``target`` is a pandas object. + feature_names : list of length 8 + Array of ordered feature names used in the dataset. + DESCR : str + Description of the California housing dataset. + frame : pandas DataFrame + Only present when `as_frame=True`. DataFrame with ``data`` and + ``target``. + + .. versionadded:: 0.23 + + (data, target) : tuple if ``return_X_y`` is True + A tuple of two ndarray. The first containing a 2D array of + shape (n_samples, n_features) with each row representing one + sample and each column representing the features. The second + ndarray of shape (n_samples,) containing the target samples. + + .. versionadded:: 0.20 + + Notes + ----- + + This dataset consists of 20,640 samples and 9 features. + + Examples + -------- + >>> from sklearn.datasets import fetch_california_housing + >>> housing = fetch_california_housing() + >>> print(housing.data.shape, housing.target.shape) + (20640, 8) (20640,) + >>> print(housing.feature_names[0:6]) + ['MedInc', 'HouseAge', 'AveRooms', 'AveBedrms', 'Population', 'AveOccup'] + """ + data_home = get_data_home(data_home=data_home) + if not exists(data_home): + makedirs(data_home) + + filepath = _pkl_filepath(data_home, "cal_housing.pkz") + if not exists(filepath): + if not download_if_missing: + raise OSError("Data not found and `download_if_missing` is False") + + logger.info( + "Downloading Cal. housing from {} to {}".format(ARCHIVE.url, data_home) + ) + + archive_path = _fetch_remote(ARCHIVE, dirname=data_home) + + with tarfile.open(mode="r:gz", name=archive_path) as f: + cal_housing = np.loadtxt( + f.extractfile("CaliforniaHousing/cal_housing.data"), delimiter="," + ) + # Columns are not in the same order compared to the previous + # URL resource on lib.stat.cmu.edu + columns_index = [8, 7, 2, 3, 4, 5, 6, 1, 0] + cal_housing = cal_housing[:, columns_index] + + joblib.dump(cal_housing, filepath, compress=6) + remove(archive_path) + + else: + cal_housing = joblib.load(filepath) + + feature_names = [ + "MedInc", + "HouseAge", + "AveRooms", + "AveBedrms", + "Population", + "AveOccup", + "Latitude", + "Longitude", + ] + + target, data = cal_housing[:, 0], cal_housing[:, 1:] + + # avg rooms = total rooms / households + data[:, 2] /= data[:, 5] + + # avg bed rooms = total bed rooms / households + data[:, 3] /= data[:, 5] + + # avg occupancy = population / households + data[:, 5] = data[:, 4] / data[:, 5] + + # target in units of 100,000 + target = target / 100000.0 + + descr = load_descr("california_housing.rst") + + X = data + y = target + + frame = None + target_names = [ + "MedHouseVal", + ] + if as_frame: + frame, X, y = _convert_data_dataframe( + "fetch_california_housing", data, target, feature_names, target_names + ) + + if return_X_y: + return X, y + + return Bunch( + data=X, + target=y, + frame=frame, + target_names=target_names, + feature_names=feature_names, + DESCR=descr, + ) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/_covtype.py b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/_covtype.py new file mode 100644 index 0000000000000000000000000000000000000000..4e1b1d7961f2e4b0352e8f2ca189e5d0eae08cd3 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/_covtype.py @@ -0,0 +1,236 @@ +"""Forest covertype dataset. + +A classic dataset for classification benchmarks, featuring categorical and +real-valued features. + +The dataset page is available from UCI Machine Learning Repository + + https://archive.ics.uci.edu/ml/datasets/Covertype + +Courtesy of Jock A. Blackard and Colorado State University. +""" + +# Author: Lars Buitinck +# Peter Prettenhofer +# License: BSD 3 clause + +import logging +import os +from gzip import GzipFile +from os.path import exists, join +from tempfile import TemporaryDirectory + +import joblib +import numpy as np + +from ..utils import Bunch, check_random_state +from ..utils._param_validation import validate_params +from . import get_data_home +from ._base import ( + RemoteFileMetadata, + _convert_data_dataframe, + _fetch_remote, + _pkl_filepath, + load_descr, +) + +# The original data can be found in: +# https://archive.ics.uci.edu/ml/machine-learning-databases/covtype/covtype.data.gz +ARCHIVE = RemoteFileMetadata( + filename="covtype.data.gz", + url="https://ndownloader.figshare.com/files/5976039", + checksum="614360d0257557dd1792834a85a1cdebfadc3c4f30b011d56afee7ffb5b15771", +) + +logger = logging.getLogger(__name__) + +# Column names reference: +# https://archive.ics.uci.edu/ml/machine-learning-databases/covtype/covtype.info +FEATURE_NAMES = [ + "Elevation", + "Aspect", + "Slope", + "Horizontal_Distance_To_Hydrology", + "Vertical_Distance_To_Hydrology", + "Horizontal_Distance_To_Roadways", + "Hillshade_9am", + "Hillshade_Noon", + "Hillshade_3pm", + "Horizontal_Distance_To_Fire_Points", +] +FEATURE_NAMES += [f"Wilderness_Area_{i}" for i in range(4)] +FEATURE_NAMES += [f"Soil_Type_{i}" for i in range(40)] +TARGET_NAMES = ["Cover_Type"] + + +@validate_params( + { + "data_home": [str, os.PathLike, None], + "download_if_missing": ["boolean"], + "random_state": ["random_state"], + "shuffle": ["boolean"], + "return_X_y": ["boolean"], + "as_frame": ["boolean"], + }, + prefer_skip_nested_validation=True, +) +def fetch_covtype( + *, + data_home=None, + download_if_missing=True, + random_state=None, + shuffle=False, + return_X_y=False, + as_frame=False, +): + """Load the covertype dataset (classification). + + Download it if necessary. + + ================= ============ + Classes 7 + Samples total 581012 + Dimensionality 54 + Features int + ================= ============ + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + data_home : str or path-like, default=None + Specify another download and cache folder for the datasets. By default + all scikit-learn data is stored in '~/scikit_learn_data' subfolders. + + download_if_missing : bool, default=True + If False, raise an OSError if the data is not locally available + instead of trying to download the data from the source site. + + random_state : int, RandomState instance or None, default=None + Determines random number generation for dataset shuffling. Pass an int + for reproducible output across multiple function calls. + See :term:`Glossary `. + + shuffle : bool, default=False + Whether to shuffle dataset. + + return_X_y : bool, default=False + If True, returns ``(data.data, data.target)`` instead of a Bunch + object. + + .. versionadded:: 0.20 + + as_frame : bool, default=False + If True, the data is a pandas DataFrame including columns with + appropriate dtypes (numeric). The target is a pandas DataFrame or + Series depending on the number of target columns. If `return_X_y` is + True, then (`data`, `target`) will be pandas DataFrames or Series as + described below. + + .. versionadded:: 0.24 + + Returns + ------- + dataset : :class:`~sklearn.utils.Bunch` + Dictionary-like object, with the following attributes. + + data : ndarray of shape (581012, 54) + Each row corresponds to the 54 features in the dataset. + target : ndarray of shape (581012,) + Each value corresponds to one of + the 7 forest covertypes with values + ranging between 1 to 7. + frame : dataframe of shape (581012, 55) + Only present when `as_frame=True`. Contains `data` and `target`. + DESCR : str + Description of the forest covertype dataset. + feature_names : list + The names of the dataset columns. + target_names: list + The names of the target columns. + + (data, target) : tuple if ``return_X_y`` is True + A tuple of two ndarray. The first containing a 2D array of + shape (n_samples, n_features) with each row representing one + sample and each column representing the features. The second + ndarray of shape (n_samples,) containing the target samples. + + .. versionadded:: 0.20 + + Examples + -------- + >>> from sklearn.datasets import fetch_covtype + >>> cov_type = fetch_covtype() + >>> cov_type.data.shape + (581012, 54) + >>> cov_type.target.shape + (581012,) + >>> # Let's check the 4 first feature names + >>> cov_type.feature_names[:4] + ['Elevation', 'Aspect', 'Slope', 'Horizontal_Distance_To_Hydrology'] + """ + data_home = get_data_home(data_home=data_home) + covtype_dir = join(data_home, "covertype") + samples_path = _pkl_filepath(covtype_dir, "samples") + targets_path = _pkl_filepath(covtype_dir, "targets") + available = exists(samples_path) and exists(targets_path) + + if download_if_missing and not available: + os.makedirs(covtype_dir, exist_ok=True) + + # Creating temp_dir as a direct subdirectory of the target directory + # guarantees that both reside on the same filesystem, so that we can use + # os.rename to atomically move the data files to their target location. + with TemporaryDirectory(dir=covtype_dir) as temp_dir: + logger.info(f"Downloading {ARCHIVE.url}") + archive_path = _fetch_remote(ARCHIVE, dirname=temp_dir) + Xy = np.genfromtxt(GzipFile(filename=archive_path), delimiter=",") + + X = Xy[:, :-1] + y = Xy[:, -1].astype(np.int32, copy=False) + + samples_tmp_path = _pkl_filepath(temp_dir, "samples") + joblib.dump(X, samples_tmp_path, compress=9) + os.rename(samples_tmp_path, samples_path) + + targets_tmp_path = _pkl_filepath(temp_dir, "targets") + joblib.dump(y, targets_tmp_path, compress=9) + os.rename(targets_tmp_path, targets_path) + + elif not available and not download_if_missing: + raise OSError("Data not found and `download_if_missing` is False") + try: + X, y + except NameError: + X = joblib.load(samples_path) + y = joblib.load(targets_path) + + if shuffle: + ind = np.arange(X.shape[0]) + rng = check_random_state(random_state) + rng.shuffle(ind) + X = X[ind] + y = y[ind] + + fdescr = load_descr("covtype.rst") + + frame = None + if as_frame: + frame, X, y = _convert_data_dataframe( + caller_name="fetch_covtype", + data=X, + target=y, + feature_names=FEATURE_NAMES, + target_names=TARGET_NAMES, + ) + if return_X_y: + return X, y + + return Bunch( + data=X, + target=y, + frame=frame, + target_names=TARGET_NAMES, + feature_names=FEATURE_NAMES, + DESCR=fdescr, + ) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/_kddcup99.py b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/_kddcup99.py new file mode 100644 index 0000000000000000000000000000000000000000..444bd01737901f0b2fa791c2f7e80b3762d40dc0 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/_kddcup99.py @@ -0,0 +1,401 @@ +"""KDDCUP 99 dataset. + +A classic dataset for anomaly detection. + +The dataset page is available from UCI Machine Learning Repository + +https://archive.ics.uci.edu/ml/machine-learning-databases/kddcup99-mld/kddcup.data.gz + +""" + +import errno +import logging +import os +from gzip import GzipFile +from os.path import exists, join + +import joblib +import numpy as np + +from ..utils import Bunch, check_random_state +from ..utils import shuffle as shuffle_method +from ..utils._param_validation import StrOptions, validate_params +from . import get_data_home +from ._base import ( + RemoteFileMetadata, + _convert_data_dataframe, + _fetch_remote, + load_descr, +) + +# The original data can be found at: +# https://archive.ics.uci.edu/ml/machine-learning-databases/kddcup99-mld/kddcup.data.gz +ARCHIVE = RemoteFileMetadata( + filename="kddcup99_data", + url="https://ndownloader.figshare.com/files/5976045", + checksum="3b6c942aa0356c0ca35b7b595a26c89d343652c9db428893e7494f837b274292", +) + +# The original data can be found at: +# https://archive.ics.uci.edu/ml/machine-learning-databases/kddcup99-mld/kddcup.data_10_percent.gz +ARCHIVE_10_PERCENT = RemoteFileMetadata( + filename="kddcup99_10_data", + url="https://ndownloader.figshare.com/files/5976042", + checksum="8045aca0d84e70e622d1148d7df782496f6333bf6eb979a1b0837c42a9fd9561", +) + +logger = logging.getLogger(__name__) + + +@validate_params( + { + "subset": [StrOptions({"SA", "SF", "http", "smtp"}), None], + "data_home": [str, os.PathLike, None], + "shuffle": ["boolean"], + "random_state": ["random_state"], + "percent10": ["boolean"], + "download_if_missing": ["boolean"], + "return_X_y": ["boolean"], + "as_frame": ["boolean"], + }, + prefer_skip_nested_validation=True, +) +def fetch_kddcup99( + *, + subset=None, + data_home=None, + shuffle=False, + random_state=None, + percent10=True, + download_if_missing=True, + return_X_y=False, + as_frame=False, +): + """Load the kddcup99 dataset (classification). + + Download it if necessary. + + ================= ==================================== + Classes 23 + Samples total 4898431 + Dimensionality 41 + Features discrete (int) or continuous (float) + ================= ==================================== + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.18 + + Parameters + ---------- + subset : {'SA', 'SF', 'http', 'smtp'}, default=None + To return the corresponding classical subsets of kddcup 99. + If None, return the entire kddcup 99 dataset. + + data_home : str or path-like, default=None + Specify another download and cache folder for the datasets. By default + all scikit-learn data is stored in '~/scikit_learn_data' subfolders. + + .. versionadded:: 0.19 + + shuffle : bool, default=False + Whether to shuffle dataset. + + random_state : int, RandomState instance or None, default=None + Determines random number generation for dataset shuffling and for + selection of abnormal samples if `subset='SA'`. Pass an int for + reproducible output across multiple function calls. + See :term:`Glossary `. + + percent10 : bool, default=True + Whether to load only 10 percent of the data. + + download_if_missing : bool, default=True + If False, raise an OSError if the data is not locally available + instead of trying to download the data from the source site. + + return_X_y : bool, default=False + If True, returns ``(data, target)`` instead of a Bunch object. See + below for more information about the `data` and `target` object. + + .. versionadded:: 0.20 + + as_frame : bool, default=False + If `True`, returns a pandas Dataframe for the ``data`` and ``target`` + objects in the `Bunch` returned object; `Bunch` return object will also + have a ``frame`` member. + + .. versionadded:: 0.24 + + Returns + ------- + data : :class:`~sklearn.utils.Bunch` + Dictionary-like object, with the following attributes. + + data : {ndarray, dataframe} of shape (494021, 41) + The data matrix to learn. If `as_frame=True`, `data` will be a + pandas DataFrame. + target : {ndarray, series} of shape (494021,) + The regression target for each sample. If `as_frame=True`, `target` + will be a pandas Series. + frame : dataframe of shape (494021, 42) + Only present when `as_frame=True`. Contains `data` and `target`. + DESCR : str + The full description of the dataset. + feature_names : list + The names of the dataset columns + target_names: list + The names of the target columns + + (data, target) : tuple if ``return_X_y`` is True + A tuple of two ndarray. The first containing a 2D array of + shape (n_samples, n_features) with each row representing one + sample and each column representing the features. The second + ndarray of shape (n_samples,) containing the target samples. + + .. versionadded:: 0.20 + """ + data_home = get_data_home(data_home=data_home) + kddcup99 = _fetch_brute_kddcup99( + data_home=data_home, + percent10=percent10, + download_if_missing=download_if_missing, + ) + + data = kddcup99.data + target = kddcup99.target + feature_names = kddcup99.feature_names + target_names = kddcup99.target_names + + if subset == "SA": + s = target == b"normal." + t = np.logical_not(s) + normal_samples = data[s, :] + normal_targets = target[s] + abnormal_samples = data[t, :] + abnormal_targets = target[t] + + n_samples_abnormal = abnormal_samples.shape[0] + # selected abnormal samples: + random_state = check_random_state(random_state) + r = random_state.randint(0, n_samples_abnormal, 3377) + abnormal_samples = abnormal_samples[r] + abnormal_targets = abnormal_targets[r] + + data = np.r_[normal_samples, abnormal_samples] + target = np.r_[normal_targets, abnormal_targets] + + if subset == "SF" or subset == "http" or subset == "smtp": + # select all samples with positive logged_in attribute: + s = data[:, 11] == 1 + data = np.c_[data[s, :11], data[s, 12:]] + feature_names = feature_names[:11] + feature_names[12:] + target = target[s] + + data[:, 0] = np.log((data[:, 0] + 0.1).astype(float, copy=False)) + data[:, 4] = np.log((data[:, 4] + 0.1).astype(float, copy=False)) + data[:, 5] = np.log((data[:, 5] + 0.1).astype(float, copy=False)) + + if subset == "http": + s = data[:, 2] == b"http" + data = data[s] + target = target[s] + data = np.c_[data[:, 0], data[:, 4], data[:, 5]] + feature_names = [feature_names[0], feature_names[4], feature_names[5]] + + if subset == "smtp": + s = data[:, 2] == b"smtp" + data = data[s] + target = target[s] + data = np.c_[data[:, 0], data[:, 4], data[:, 5]] + feature_names = [feature_names[0], feature_names[4], feature_names[5]] + + if subset == "SF": + data = np.c_[data[:, 0], data[:, 2], data[:, 4], data[:, 5]] + feature_names = [ + feature_names[0], + feature_names[2], + feature_names[4], + feature_names[5], + ] + + if shuffle: + data, target = shuffle_method(data, target, random_state=random_state) + + fdescr = load_descr("kddcup99.rst") + + frame = None + if as_frame: + frame, data, target = _convert_data_dataframe( + "fetch_kddcup99", data, target, feature_names, target_names + ) + + if return_X_y: + return data, target + + return Bunch( + data=data, + target=target, + frame=frame, + target_names=target_names, + feature_names=feature_names, + DESCR=fdescr, + ) + + +def _fetch_brute_kddcup99(data_home=None, download_if_missing=True, percent10=True): + """Load the kddcup99 dataset, downloading it if necessary. + + Parameters + ---------- + data_home : str, default=None + Specify another download and cache folder for the datasets. By default + all scikit-learn data is stored in '~/scikit_learn_data' subfolders. + + download_if_missing : bool, default=True + If False, raise an OSError if the data is not locally available + instead of trying to download the data from the source site. + + percent10 : bool, default=True + Whether to load only 10 percent of the data. + + Returns + ------- + dataset : :class:`~sklearn.utils.Bunch` + Dictionary-like object, with the following attributes. + + data : ndarray of shape (494021, 41) + Each row corresponds to the 41 features in the dataset. + target : ndarray of shape (494021,) + Each value corresponds to one of the 21 attack types or to the + label 'normal.'. + feature_names : list + The names of the dataset columns + target_names: list + The names of the target columns + DESCR : str + Description of the kddcup99 dataset. + + """ + + data_home = get_data_home(data_home=data_home) + dir_suffix = "-py3" + + if percent10: + kddcup_dir = join(data_home, "kddcup99_10" + dir_suffix) + archive = ARCHIVE_10_PERCENT + else: + kddcup_dir = join(data_home, "kddcup99" + dir_suffix) + archive = ARCHIVE + + samples_path = join(kddcup_dir, "samples") + targets_path = join(kddcup_dir, "targets") + available = exists(samples_path) + + dt = [ + ("duration", int), + ("protocol_type", "S4"), + ("service", "S11"), + ("flag", "S6"), + ("src_bytes", int), + ("dst_bytes", int), + ("land", int), + ("wrong_fragment", int), + ("urgent", int), + ("hot", int), + ("num_failed_logins", int), + ("logged_in", int), + ("num_compromised", int), + ("root_shell", int), + ("su_attempted", int), + ("num_root", int), + ("num_file_creations", int), + ("num_shells", int), + ("num_access_files", int), + ("num_outbound_cmds", int), + ("is_host_login", int), + ("is_guest_login", int), + ("count", int), + ("srv_count", int), + ("serror_rate", float), + ("srv_serror_rate", float), + ("rerror_rate", float), + ("srv_rerror_rate", float), + ("same_srv_rate", float), + ("diff_srv_rate", float), + ("srv_diff_host_rate", float), + ("dst_host_count", int), + ("dst_host_srv_count", int), + ("dst_host_same_srv_rate", float), + ("dst_host_diff_srv_rate", float), + ("dst_host_same_src_port_rate", float), + ("dst_host_srv_diff_host_rate", float), + ("dst_host_serror_rate", float), + ("dst_host_srv_serror_rate", float), + ("dst_host_rerror_rate", float), + ("dst_host_srv_rerror_rate", float), + ("labels", "S16"), + ] + + column_names = [c[0] for c in dt] + target_names = column_names[-1] + feature_names = column_names[:-1] + + if available: + try: + X = joblib.load(samples_path) + y = joblib.load(targets_path) + except Exception as e: + raise OSError( + "The cache for fetch_kddcup99 is invalid, please delete " + f"{str(kddcup_dir)} and run the fetch_kddcup99 again" + ) from e + + elif download_if_missing: + _mkdirp(kddcup_dir) + logger.info("Downloading %s" % archive.url) + _fetch_remote(archive, dirname=kddcup_dir) + DT = np.dtype(dt) + logger.debug("extracting archive") + archive_path = join(kddcup_dir, archive.filename) + file_ = GzipFile(filename=archive_path, mode="r") + Xy = [] + for line in file_.readlines(): + line = line.decode() + Xy.append(line.replace("\n", "").split(",")) + file_.close() + logger.debug("extraction done") + os.remove(archive_path) + + Xy = np.asarray(Xy, dtype=object) + for j in range(42): + Xy[:, j] = Xy[:, j].astype(DT[j]) + + X = Xy[:, :-1] + y = Xy[:, -1] + # XXX bug when compress!=0: + # (error: 'Incorrect data length while decompressing[...] the file + # could be corrupted.') + + joblib.dump(X, samples_path, compress=0) + joblib.dump(y, targets_path, compress=0) + else: + raise OSError("Data not found and `download_if_missing` is False") + + return Bunch( + data=X, + target=y, + feature_names=feature_names, + target_names=[target_names], + ) + + +def _mkdirp(d): + """Ensure directory d exists (like mkdir -p on Unix) + No guarantee that the directory is writable. + """ + try: + os.makedirs(d) + except OSError as e: + if e.errno != errno.EEXIST: + raise diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/_lfw.py b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/_lfw.py new file mode 100644 index 0000000000000000000000000000000000000000..d06d29f21d0a5c79b96fc65eb6c998e6a74a67b3 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/_lfw.py @@ -0,0 +1,570 @@ +"""Labeled Faces in the Wild (LFW) dataset + +This dataset is a collection of JPEG pictures of famous people collected +over the internet, all details are available on the official website: + + http://vis-www.cs.umass.edu/lfw/ +""" +# Copyright (c) 2011 Olivier Grisel +# License: BSD 3 clause + +import logging +from numbers import Integral, Real +from os import PathLike, listdir, makedirs, remove +from os.path import exists, isdir, join + +import numpy as np +from joblib import Memory + +from ..utils import Bunch +from ..utils._param_validation import Hidden, Interval, StrOptions, validate_params +from ._base import ( + RemoteFileMetadata, + _fetch_remote, + get_data_home, + load_descr, +) + +logger = logging.getLogger(__name__) + +# The original data can be found in: +# http://vis-www.cs.umass.edu/lfw/lfw.tgz +ARCHIVE = RemoteFileMetadata( + filename="lfw.tgz", + url="https://ndownloader.figshare.com/files/5976018", + checksum="055f7d9c632d7370e6fb4afc7468d40f970c34a80d4c6f50ffec63f5a8d536c0", +) + +# The original funneled data can be found in: +# http://vis-www.cs.umass.edu/lfw/lfw-funneled.tgz +FUNNELED_ARCHIVE = RemoteFileMetadata( + filename="lfw-funneled.tgz", + url="https://ndownloader.figshare.com/files/5976015", + checksum="b47c8422c8cded889dc5a13418c4bc2abbda121092b3533a83306f90d900100a", +) + +# The original target data can be found in: +# http://vis-www.cs.umass.edu/lfw/pairsDevTrain.txt', +# http://vis-www.cs.umass.edu/lfw/pairsDevTest.txt', +# http://vis-www.cs.umass.edu/lfw/pairs.txt', +TARGETS = ( + RemoteFileMetadata( + filename="pairsDevTrain.txt", + url="https://ndownloader.figshare.com/files/5976012", + checksum="1d454dada7dfeca0e7eab6f65dc4e97a6312d44cf142207be28d688be92aabfa", + ), + RemoteFileMetadata( + filename="pairsDevTest.txt", + url="https://ndownloader.figshare.com/files/5976009", + checksum="7cb06600ea8b2814ac26e946201cdb304296262aad67d046a16a7ec85d0ff87c", + ), + RemoteFileMetadata( + filename="pairs.txt", + url="https://ndownloader.figshare.com/files/5976006", + checksum="ea42330c62c92989f9d7c03237ed5d591365e89b3e649747777b70e692dc1592", + ), +) + + +# +# Common private utilities for data fetching from the original LFW website +# local disk caching, and image decoding. +# + + +def _check_fetch_lfw(data_home=None, funneled=True, download_if_missing=True): + """Helper function to download any missing LFW data""" + + data_home = get_data_home(data_home=data_home) + lfw_home = join(data_home, "lfw_home") + + if not exists(lfw_home): + makedirs(lfw_home) + + for target in TARGETS: + target_filepath = join(lfw_home, target.filename) + if not exists(target_filepath): + if download_if_missing: + logger.info("Downloading LFW metadata: %s", target.url) + _fetch_remote(target, dirname=lfw_home) + else: + raise OSError("%s is missing" % target_filepath) + + if funneled: + data_folder_path = join(lfw_home, "lfw_funneled") + archive = FUNNELED_ARCHIVE + else: + data_folder_path = join(lfw_home, "lfw") + archive = ARCHIVE + + if not exists(data_folder_path): + archive_path = join(lfw_home, archive.filename) + if not exists(archive_path): + if download_if_missing: + logger.info("Downloading LFW data (~200MB): %s", archive.url) + _fetch_remote(archive, dirname=lfw_home) + else: + raise OSError("%s is missing" % archive_path) + + import tarfile + + logger.debug("Decompressing the data archive to %s", data_folder_path) + tarfile.open(archive_path, "r:gz").extractall(path=lfw_home) + remove(archive_path) + + return lfw_home, data_folder_path + + +def _load_imgs(file_paths, slice_, color, resize): + """Internally used to load images""" + try: + from PIL import Image + except ImportError: + raise ImportError( + "The Python Imaging Library (PIL) is required to load data " + "from jpeg files. Please refer to " + "https://pillow.readthedocs.io/en/stable/installation.html " + "for installing PIL." + ) + + # compute the portion of the images to load to respect the slice_ parameter + # given by the caller + default_slice = (slice(0, 250), slice(0, 250)) + if slice_ is None: + slice_ = default_slice + else: + slice_ = tuple(s or ds for s, ds in zip(slice_, default_slice)) + + h_slice, w_slice = slice_ + h = (h_slice.stop - h_slice.start) // (h_slice.step or 1) + w = (w_slice.stop - w_slice.start) // (w_slice.step or 1) + + if resize is not None: + resize = float(resize) + h = int(resize * h) + w = int(resize * w) + + # allocate some contiguous memory to host the decoded image slices + n_faces = len(file_paths) + if not color: + faces = np.zeros((n_faces, h, w), dtype=np.float32) + else: + faces = np.zeros((n_faces, h, w, 3), dtype=np.float32) + + # iterate over the collected file path to load the jpeg files as numpy + # arrays + for i, file_path in enumerate(file_paths): + if i % 1000 == 0: + logger.debug("Loading face #%05d / %05d", i + 1, n_faces) + + # Checks if jpeg reading worked. Refer to issue #3594 for more + # details. + pil_img = Image.open(file_path) + pil_img = pil_img.crop( + (w_slice.start, h_slice.start, w_slice.stop, h_slice.stop) + ) + if resize is not None: + pil_img = pil_img.resize((w, h)) + face = np.asarray(pil_img, dtype=np.float32) + + if face.ndim == 0: + raise RuntimeError( + "Failed to read the image file %s, " + "Please make sure that libjpeg is installed" % file_path + ) + + face /= 255.0 # scale uint8 coded colors to the [0.0, 1.0] floats + if not color: + # average the color channels to compute a gray levels + # representation + face = face.mean(axis=2) + + faces[i, ...] = face + + return faces + + +# +# Task #1: Face Identification on picture with names +# + + +def _fetch_lfw_people( + data_folder_path, slice_=None, color=False, resize=None, min_faces_per_person=0 +): + """Perform the actual data loading for the lfw people dataset + + This operation is meant to be cached by a joblib wrapper. + """ + # scan the data folder content to retain people with more that + # `min_faces_per_person` face pictures + person_names, file_paths = [], [] + for person_name in sorted(listdir(data_folder_path)): + folder_path = join(data_folder_path, person_name) + if not isdir(folder_path): + continue + paths = [join(folder_path, f) for f in sorted(listdir(folder_path))] + n_pictures = len(paths) + if n_pictures >= min_faces_per_person: + person_name = person_name.replace("_", " ") + person_names.extend([person_name] * n_pictures) + file_paths.extend(paths) + + n_faces = len(file_paths) + if n_faces == 0: + raise ValueError( + "min_faces_per_person=%d is too restrictive" % min_faces_per_person + ) + + target_names = np.unique(person_names) + target = np.searchsorted(target_names, person_names) + + faces = _load_imgs(file_paths, slice_, color, resize) + + # shuffle the faces with a deterministic RNG scheme to avoid having + # all faces of the same person in a row, as it would break some + # cross validation and learning algorithms such as SGD and online + # k-means that make an IID assumption + + indices = np.arange(n_faces) + np.random.RandomState(42).shuffle(indices) + faces, target = faces[indices], target[indices] + return faces, target, target_names + + +@validate_params( + { + "data_home": [str, PathLike, None], + "funneled": ["boolean"], + "resize": [Interval(Real, 0, None, closed="neither"), None], + "min_faces_per_person": [Interval(Integral, 0, None, closed="left"), None], + "color": ["boolean"], + "slice_": [tuple, Hidden(None)], + "download_if_missing": ["boolean"], + "return_X_y": ["boolean"], + }, + prefer_skip_nested_validation=True, +) +def fetch_lfw_people( + *, + data_home=None, + funneled=True, + resize=0.5, + min_faces_per_person=0, + color=False, + slice_=(slice(70, 195), slice(78, 172)), + download_if_missing=True, + return_X_y=False, +): + """Load the Labeled Faces in the Wild (LFW) people dataset \ +(classification). + + Download it if necessary. + + ================= ======================= + Classes 5749 + Samples total 13233 + Dimensionality 5828 + Features real, between 0 and 255 + ================= ======================= + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + data_home : str or path-like, default=None + Specify another download and cache folder for the datasets. By default + all scikit-learn data is stored in '~/scikit_learn_data' subfolders. + + funneled : bool, default=True + Download and use the funneled variant of the dataset. + + resize : float or None, default=0.5 + Ratio used to resize the each face picture. If `None`, no resizing is + performed. + + min_faces_per_person : int, default=None + The extracted dataset will only retain pictures of people that have at + least `min_faces_per_person` different pictures. + + color : bool, default=False + Keep the 3 RGB channels instead of averaging them to a single + gray level channel. If color is True the shape of the data has + one more dimension than the shape with color = False. + + slice_ : tuple of slice, default=(slice(70, 195), slice(78, 172)) + Provide a custom 2D slice (height, width) to extract the + 'interesting' part of the jpeg files and avoid use statistical + correlation from the background. + + download_if_missing : bool, default=True + If False, raise an OSError if the data is not locally available + instead of trying to download the data from the source site. + + return_X_y : bool, default=False + If True, returns ``(dataset.data, dataset.target)`` instead of a Bunch + object. See below for more information about the `dataset.data` and + `dataset.target` object. + + .. versionadded:: 0.20 + + Returns + ------- + dataset : :class:`~sklearn.utils.Bunch` + Dictionary-like object, with the following attributes. + + data : numpy array of shape (13233, 2914) + Each row corresponds to a ravelled face image + of original size 62 x 47 pixels. + Changing the ``slice_`` or resize parameters will change the + shape of the output. + images : numpy array of shape (13233, 62, 47) + Each row is a face image corresponding to one of the 5749 people in + the dataset. Changing the ``slice_`` + or resize parameters will change the shape of the output. + target : numpy array of shape (13233,) + Labels associated to each face image. + Those labels range from 0-5748 and correspond to the person IDs. + target_names : numpy array of shape (5749,) + Names of all persons in the dataset. + Position in array corresponds to the person ID in the target array. + DESCR : str + Description of the Labeled Faces in the Wild (LFW) dataset. + + (data, target) : tuple if ``return_X_y`` is True + A tuple of two ndarray. The first containing a 2D array of + shape (n_samples, n_features) with each row representing one + sample and each column representing the features. The second + ndarray of shape (n_samples,) containing the target samples. + + .. versionadded:: 0.20 + """ + lfw_home, data_folder_path = _check_fetch_lfw( + data_home=data_home, funneled=funneled, download_if_missing=download_if_missing + ) + logger.debug("Loading LFW people faces from %s", lfw_home) + + # wrap the loader in a memoizing function that will return memmaped data + # arrays for optimal memory usage + m = Memory(location=lfw_home, compress=6, verbose=0) + load_func = m.cache(_fetch_lfw_people) + + # load and memoize the pairs as np arrays + faces, target, target_names = load_func( + data_folder_path, + resize=resize, + min_faces_per_person=min_faces_per_person, + color=color, + slice_=slice_, + ) + + X = faces.reshape(len(faces), -1) + + fdescr = load_descr("lfw.rst") + + if return_X_y: + return X, target + + # pack the results as a Bunch instance + return Bunch( + data=X, images=faces, target=target, target_names=target_names, DESCR=fdescr + ) + + +# +# Task #2: Face Verification on pairs of face pictures +# + + +def _fetch_lfw_pairs( + index_file_path, data_folder_path, slice_=None, color=False, resize=None +): + """Perform the actual data loading for the LFW pairs dataset + + This operation is meant to be cached by a joblib wrapper. + """ + # parse the index file to find the number of pairs to be able to allocate + # the right amount of memory before starting to decode the jpeg files + with open(index_file_path, "rb") as index_file: + split_lines = [ln.decode().strip().split("\t") for ln in index_file] + pair_specs = [sl for sl in split_lines if len(sl) > 2] + n_pairs = len(pair_specs) + + # iterating over the metadata lines for each pair to find the filename to + # decode and load in memory + target = np.zeros(n_pairs, dtype=int) + file_paths = list() + for i, components in enumerate(pair_specs): + if len(components) == 3: + target[i] = 1 + pair = ( + (components[0], int(components[1]) - 1), + (components[0], int(components[2]) - 1), + ) + elif len(components) == 4: + target[i] = 0 + pair = ( + (components[0], int(components[1]) - 1), + (components[2], int(components[3]) - 1), + ) + else: + raise ValueError("invalid line %d: %r" % (i + 1, components)) + for j, (name, idx) in enumerate(pair): + try: + person_folder = join(data_folder_path, name) + except TypeError: + person_folder = join(data_folder_path, str(name, "UTF-8")) + filenames = list(sorted(listdir(person_folder))) + file_path = join(person_folder, filenames[idx]) + file_paths.append(file_path) + + pairs = _load_imgs(file_paths, slice_, color, resize) + shape = list(pairs.shape) + n_faces = shape.pop(0) + shape.insert(0, 2) + shape.insert(0, n_faces // 2) + pairs.shape = shape + + return pairs, target, np.array(["Different persons", "Same person"]) + + +@validate_params( + { + "subset": [StrOptions({"train", "test", "10_folds"})], + "data_home": [str, PathLike, None], + "funneled": ["boolean"], + "resize": [Interval(Real, 0, None, closed="neither"), None], + "color": ["boolean"], + "slice_": [tuple, Hidden(None)], + "download_if_missing": ["boolean"], + }, + prefer_skip_nested_validation=True, +) +def fetch_lfw_pairs( + *, + subset="train", + data_home=None, + funneled=True, + resize=0.5, + color=False, + slice_=(slice(70, 195), slice(78, 172)), + download_if_missing=True, +): + """Load the Labeled Faces in the Wild (LFW) pairs dataset (classification). + + Download it if necessary. + + ================= ======================= + Classes 2 + Samples total 13233 + Dimensionality 5828 + Features real, between 0 and 255 + ================= ======================= + + In the official `README.txt`_ this task is described as the + "Restricted" task. As I am not sure as to implement the + "Unrestricted" variant correctly, I left it as unsupported for now. + + .. _`README.txt`: http://vis-www.cs.umass.edu/lfw/README.txt + + The original images are 250 x 250 pixels, but the default slice and resize + arguments reduce them to 62 x 47. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + subset : {'train', 'test', '10_folds'}, default='train' + Select the dataset to load: 'train' for the development training + set, 'test' for the development test set, and '10_folds' for the + official evaluation set that is meant to be used with a 10-folds + cross validation. + + data_home : str or path-like, default=None + Specify another download and cache folder for the datasets. By + default all scikit-learn data is stored in '~/scikit_learn_data' + subfolders. + + funneled : bool, default=True + Download and use the funneled variant of the dataset. + + resize : float, default=0.5 + Ratio used to resize the each face picture. + + color : bool, default=False + Keep the 3 RGB channels instead of averaging them to a single + gray level channel. If color is True the shape of the data has + one more dimension than the shape with color = False. + + slice_ : tuple of slice, default=(slice(70, 195), slice(78, 172)) + Provide a custom 2D slice (height, width) to extract the + 'interesting' part of the jpeg files and avoid use statistical + correlation from the background. + + download_if_missing : bool, default=True + If False, raise an OSError if the data is not locally available + instead of trying to download the data from the source site. + + Returns + ------- + data : :class:`~sklearn.utils.Bunch` + Dictionary-like object, with the following attributes. + + data : ndarray of shape (2200, 5828). Shape depends on ``subset``. + Each row corresponds to 2 ravel'd face images + of original size 62 x 47 pixels. + Changing the ``slice_``, ``resize`` or ``subset`` parameters + will change the shape of the output. + pairs : ndarray of shape (2200, 2, 62, 47). Shape depends on ``subset`` + Each row has 2 face images corresponding + to same or different person from the dataset + containing 5749 people. Changing the ``slice_``, + ``resize`` or ``subset`` parameters will change the shape of the + output. + target : numpy array of shape (2200,). Shape depends on ``subset``. + Labels associated to each pair of images. + The two label values being different persons or the same person. + target_names : numpy array of shape (2,) + Explains the target values of the target array. + 0 corresponds to "Different person", 1 corresponds to "same person". + DESCR : str + Description of the Labeled Faces in the Wild (LFW) dataset. + """ + lfw_home, data_folder_path = _check_fetch_lfw( + data_home=data_home, funneled=funneled, download_if_missing=download_if_missing + ) + logger.debug("Loading %s LFW pairs from %s", subset, lfw_home) + + # wrap the loader in a memoizing function that will return memmaped data + # arrays for optimal memory usage + m = Memory(location=lfw_home, compress=6, verbose=0) + load_func = m.cache(_fetch_lfw_pairs) + + # select the right metadata file according to the requested subset + label_filenames = { + "train": "pairsDevTrain.txt", + "test": "pairsDevTest.txt", + "10_folds": "pairs.txt", + } + if subset not in label_filenames: + raise ValueError( + "subset='%s' is invalid: should be one of %r" + % (subset, list(sorted(label_filenames.keys()))) + ) + index_file_path = join(lfw_home, label_filenames[subset]) + + # load and memoize the pairs as np arrays + pairs, target, target_names = load_func( + index_file_path, data_folder_path, resize=resize, color=color, slice_=slice_ + ) + + fdescr = load_descr("lfw.rst") + + # pack the results as a Bunch instance + return Bunch( + data=pairs.reshape(len(pairs), -1), + pairs=pairs, + target=target, + target_names=target_names, + DESCR=fdescr, + ) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/_olivetti_faces.py b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/_olivetti_faces.py new file mode 100644 index 0000000000000000000000000000000000000000..8e1b3c91e254b80bff7b52d7e671ac15ba079264 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/_olivetti_faces.py @@ -0,0 +1,156 @@ +"""Modified Olivetti faces dataset. + +The original database was available from (now defunct) + + https://www.cl.cam.ac.uk/research/dtg/attarchive/facedatabase.html + +The version retrieved here comes in MATLAB format from the personal +web page of Sam Roweis: + + https://cs.nyu.edu/~roweis/ +""" + +# Copyright (c) 2011 David Warde-Farley +# License: BSD 3 clause + +from os import PathLike, makedirs, remove +from os.path import exists + +import joblib +import numpy as np +from scipy.io import loadmat + +from ..utils import Bunch, check_random_state +from ..utils._param_validation import validate_params +from . import get_data_home +from ._base import RemoteFileMetadata, _fetch_remote, _pkl_filepath, load_descr + +# The original data can be found at: +# https://cs.nyu.edu/~roweis/data/olivettifaces.mat +FACES = RemoteFileMetadata( + filename="olivettifaces.mat", + url="https://ndownloader.figshare.com/files/5976027", + checksum="b612fb967f2dc77c9c62d3e1266e0c73d5fca46a4b8906c18e454d41af987794", +) + + +@validate_params( + { + "data_home": [str, PathLike, None], + "shuffle": ["boolean"], + "random_state": ["random_state"], + "download_if_missing": ["boolean"], + "return_X_y": ["boolean"], + }, + prefer_skip_nested_validation=True, +) +def fetch_olivetti_faces( + *, + data_home=None, + shuffle=False, + random_state=0, + download_if_missing=True, + return_X_y=False, +): + """Load the Olivetti faces data-set from AT&T (classification). + + Download it if necessary. + + ================= ===================== + Classes 40 + Samples total 400 + Dimensionality 4096 + Features real, between 0 and 1 + ================= ===================== + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + data_home : str or path-like, default=None + Specify another download and cache folder for the datasets. By default + all scikit-learn data is stored in '~/scikit_learn_data' subfolders. + + shuffle : bool, default=False + If True the order of the dataset is shuffled to avoid having + images of the same person grouped. + + random_state : int, RandomState instance or None, default=0 + Determines random number generation for dataset shuffling. Pass an int + for reproducible output across multiple function calls. + See :term:`Glossary `. + + download_if_missing : bool, default=True + If False, raise an OSError if the data is not locally available + instead of trying to download the data from the source site. + + return_X_y : bool, default=False + If True, returns `(data, target)` instead of a `Bunch` object. See + below for more information about the `data` and `target` object. + + .. versionadded:: 0.22 + + Returns + ------- + data : :class:`~sklearn.utils.Bunch` + Dictionary-like object, with the following attributes. + + data: ndarray, shape (400, 4096) + Each row corresponds to a ravelled + face image of original size 64 x 64 pixels. + images : ndarray, shape (400, 64, 64) + Each row is a face image + corresponding to one of the 40 subjects of the dataset. + target : ndarray, shape (400,) + Labels associated to each face image. + Those labels are ranging from 0-39 and correspond to the + Subject IDs. + DESCR : str + Description of the modified Olivetti Faces Dataset. + + (data, target) : tuple if `return_X_y=True` + Tuple with the `data` and `target` objects described above. + + .. versionadded:: 0.22 + """ + data_home = get_data_home(data_home=data_home) + if not exists(data_home): + makedirs(data_home) + filepath = _pkl_filepath(data_home, "olivetti.pkz") + if not exists(filepath): + if not download_if_missing: + raise OSError("Data not found and `download_if_missing` is False") + + print("downloading Olivetti faces from %s to %s" % (FACES.url, data_home)) + mat_path = _fetch_remote(FACES, dirname=data_home) + mfile = loadmat(file_name=mat_path) + # delete raw .mat data + remove(mat_path) + + faces = mfile["faces"].T.copy() + joblib.dump(faces, filepath, compress=6) + del mfile + else: + faces = joblib.load(filepath) + + # We want floating point data, but float32 is enough (there is only + # one byte of precision in the original uint8s anyway) + faces = np.float32(faces) + faces = faces - faces.min() + faces /= faces.max() + faces = faces.reshape((400, 64, 64)).transpose(0, 2, 1) + # 10 images per class, 400 images total, each class is contiguous. + target = np.array([i // 10 for i in range(400)]) + if shuffle: + random_state = check_random_state(random_state) + order = random_state.permutation(len(faces)) + faces = faces[order] + target = target[order] + faces_vectorized = faces.reshape(len(faces), -1) + + fdescr = load_descr("olivetti_faces.rst") + + if return_X_y: + return faces_vectorized, target + + return Bunch(data=faces_vectorized, images=faces, target=target, DESCR=fdescr) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/_openml.py b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/_openml.py new file mode 100644 index 0000000000000000000000000000000000000000..d1745042bfcba7aef3696290973e07d71785d2fb --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/_openml.py @@ -0,0 +1,1158 @@ +import gzip +import hashlib +import json +import os +import shutil +import time +from contextlib import closing +from functools import wraps +from os.path import join +from tempfile import TemporaryDirectory +from typing import Any, Callable, Dict, List, Optional, Tuple, Union +from urllib.error import HTTPError, URLError +from urllib.request import Request, urlopen +from warnings import warn + +import numpy as np + +from ..utils import ( + Bunch, + check_pandas_support, # noqa # noqa +) +from ..utils._param_validation import ( + Integral, + Interval, + Real, + StrOptions, + validate_params, +) +from . import get_data_home +from ._arff_parser import load_arff_from_gzip_file + +__all__ = ["fetch_openml"] + +_OPENML_PREFIX = "https://api.openml.org/" +_SEARCH_NAME = "api/v1/json/data/list/data_name/{}/limit/2" +_DATA_INFO = "api/v1/json/data/{}" +_DATA_FEATURES = "api/v1/json/data/features/{}" +_DATA_QUALITIES = "api/v1/json/data/qualities/{}" +_DATA_FILE = "data/v1/download/{}" + +OpenmlQualitiesType = List[Dict[str, str]] +OpenmlFeaturesType = List[Dict[str, str]] + + +def _get_local_path(openml_path: str, data_home: str) -> str: + return os.path.join(data_home, "openml.org", openml_path + ".gz") + + +def _retry_with_clean_cache( + openml_path: str, + data_home: Optional[str], + no_retry_exception: Optional[Exception] = None, +) -> Callable: + """If the first call to the decorated function fails, the local cached + file is removed, and the function is called again. If ``data_home`` is + ``None``, then the function is called once. We can provide a specific + exception to not retry on using `no_retry_exception` parameter. + """ + + def decorator(f): + @wraps(f) + def wrapper(*args, **kw): + if data_home is None: + return f(*args, **kw) + try: + return f(*args, **kw) + except URLError: + raise + except Exception as exc: + if no_retry_exception is not None and isinstance( + exc, no_retry_exception + ): + raise + warn("Invalid cache, redownloading file", RuntimeWarning) + local_path = _get_local_path(openml_path, data_home) + if os.path.exists(local_path): + os.unlink(local_path) + return f(*args, **kw) + + return wrapper + + return decorator + + +def _retry_on_network_error( + n_retries: int = 3, delay: float = 1.0, url: str = "" +) -> Callable: + """If the function call results in a network error, call the function again + up to ``n_retries`` times with a ``delay`` between each call. If the error + has a 412 status code, don't call the function again as this is a specific + OpenML error. + The url parameter is used to give more information to the user about the + error. + """ + + def decorator(f): + @wraps(f) + def wrapper(*args, **kwargs): + retry_counter = n_retries + while True: + try: + return f(*args, **kwargs) + except (URLError, TimeoutError) as e: + # 412 is a specific OpenML error code. + if isinstance(e, HTTPError) and e.code == 412: + raise + if retry_counter == 0: + raise + warn( + f"A network error occurred while downloading {url}. Retrying..." + ) + retry_counter -= 1 + time.sleep(delay) + + return wrapper + + return decorator + + +def _open_openml_url( + openml_path: str, data_home: Optional[str], n_retries: int = 3, delay: float = 1.0 +): + """ + Returns a resource from OpenML.org. Caches it to data_home if required. + + Parameters + ---------- + openml_path : str + OpenML URL that will be accessed. This will be prefixes with + _OPENML_PREFIX. + + data_home : str + Directory to which the files will be cached. If None, no caching will + be applied. + + n_retries : int, default=3 + Number of retries when HTTP errors are encountered. Error with status + code 412 won't be retried as they represent OpenML generic errors. + + delay : float, default=1.0 + Number of seconds between retries. + + Returns + ------- + result : stream + A stream to the OpenML resource. + """ + + def is_gzip_encoded(_fsrc): + return _fsrc.info().get("Content-Encoding", "") == "gzip" + + req = Request(_OPENML_PREFIX + openml_path) + req.add_header("Accept-encoding", "gzip") + + if data_home is None: + fsrc = _retry_on_network_error(n_retries, delay, req.full_url)(urlopen)(req) + if is_gzip_encoded(fsrc): + return gzip.GzipFile(fileobj=fsrc, mode="rb") + return fsrc + + local_path = _get_local_path(openml_path, data_home) + dir_name, file_name = os.path.split(local_path) + if not os.path.exists(local_path): + os.makedirs(dir_name, exist_ok=True) + try: + # Create a tmpdir as a subfolder of dir_name where the final file will + # be moved to if the download is successful. This guarantees that the + # renaming operation to the final location is atomic to ensure the + # concurrence safety of the dataset caching mechanism. + with TemporaryDirectory(dir=dir_name) as tmpdir: + with closing( + _retry_on_network_error(n_retries, delay, req.full_url)(urlopen)( + req + ) + ) as fsrc: + opener: Callable + if is_gzip_encoded(fsrc): + opener = open + else: + opener = gzip.GzipFile + with opener(os.path.join(tmpdir, file_name), "wb") as fdst: + shutil.copyfileobj(fsrc, fdst) + shutil.move(fdst.name, local_path) + except Exception: + if os.path.exists(local_path): + os.unlink(local_path) + raise + + # XXX: First time, decompression will not be necessary (by using fsrc), but + # it will happen nonetheless + return gzip.GzipFile(local_path, "rb") + + +class OpenMLError(ValueError): + """HTTP 412 is a specific OpenML error code, indicating a generic error""" + + pass + + +def _get_json_content_from_openml_api( + url: str, + error_message: Optional[str], + data_home: Optional[str], + n_retries: int = 3, + delay: float = 1.0, +) -> Dict: + """ + Loads json data from the openml api. + + Parameters + ---------- + url : str + The URL to load from. Should be an official OpenML endpoint. + + error_message : str or None + The error message to raise if an acceptable OpenML error is thrown + (acceptable error is, e.g., data id not found. Other errors, like 404's + will throw the native error message). + + data_home : str or None + Location to cache the response. None if no cache is required. + + n_retries : int, default=3 + Number of retries when HTTP errors are encountered. Error with status + code 412 won't be retried as they represent OpenML generic errors. + + delay : float, default=1.0 + Number of seconds between retries. + + Returns + ------- + json_data : json + the json result from the OpenML server if the call was successful. + An exception otherwise. + """ + + @_retry_with_clean_cache(url, data_home=data_home) + def _load_json(): + with closing( + _open_openml_url(url, data_home, n_retries=n_retries, delay=delay) + ) as response: + return json.loads(response.read().decode("utf-8")) + + try: + return _load_json() + except HTTPError as error: + # 412 is an OpenML specific error code, indicating a generic error + # (e.g., data not found) + if error.code != 412: + raise error + + # 412 error, not in except for nicer traceback + raise OpenMLError(error_message) + + +def _get_data_info_by_name( + name: str, + version: Union[int, str], + data_home: Optional[str], + n_retries: int = 3, + delay: float = 1.0, +): + """ + Utilizes the openml dataset listing api to find a dataset by + name/version + OpenML api function: + https://www.openml.org/api_docs#!/data/get_data_list_data_name_data_name + + Parameters + ---------- + name : str + name of the dataset + + version : int or str + If version is an integer, the exact name/version will be obtained from + OpenML. If version is a string (value: "active") it will take the first + version from OpenML that is annotated as active. Any other string + values except "active" are treated as integer. + + data_home : str or None + Location to cache the response. None if no cache is required. + + n_retries : int, default=3 + Number of retries when HTTP errors are encountered. Error with status + code 412 won't be retried as they represent OpenML generic errors. + + delay : float, default=1.0 + Number of seconds between retries. + + Returns + ------- + first_dataset : json + json representation of the first dataset object that adhired to the + search criteria + + """ + if version == "active": + # situation in which we return the oldest active version + url = _SEARCH_NAME.format(name) + "/status/active/" + error_msg = "No active dataset {} found.".format(name) + json_data = _get_json_content_from_openml_api( + url, + error_msg, + data_home=data_home, + n_retries=n_retries, + delay=delay, + ) + res = json_data["data"]["dataset"] + if len(res) > 1: + first_version = version = res[0]["version"] + warning_msg = ( + "Multiple active versions of the dataset matching the name" + f" {name} exist. Versions may be fundamentally different, " + f"returning version {first_version}. " + "Available versions:\n" + ) + for r in res: + warning_msg += f"- version {r['version']}, status: {r['status']}\n" + warning_msg += ( + f" url: https://www.openml.org/search?type=data&id={r['did']}\n" + ) + warn(warning_msg) + return res[0] + + # an integer version has been provided + url = (_SEARCH_NAME + "/data_version/{}").format(name, version) + try: + json_data = _get_json_content_from_openml_api( + url, + error_message=None, + data_home=data_home, + n_retries=n_retries, + delay=delay, + ) + except OpenMLError: + # we can do this in 1 function call if OpenML does not require the + # specification of the dataset status (i.e., return datasets with a + # given name / version regardless of active, deactivated, etc. ) + # TODO: feature request OpenML. + url += "/status/deactivated" + error_msg = "Dataset {} with version {} not found.".format(name, version) + json_data = _get_json_content_from_openml_api( + url, + error_msg, + data_home=data_home, + n_retries=n_retries, + delay=delay, + ) + + return json_data["data"]["dataset"][0] + + +def _get_data_description_by_id( + data_id: int, + data_home: Optional[str], + n_retries: int = 3, + delay: float = 1.0, +) -> Dict[str, Any]: + # OpenML API function: https://www.openml.org/api_docs#!/data/get_data_id + url = _DATA_INFO.format(data_id) + error_message = "Dataset with data_id {} not found.".format(data_id) + json_data = _get_json_content_from_openml_api( + url, + error_message, + data_home=data_home, + n_retries=n_retries, + delay=delay, + ) + return json_data["data_set_description"] + + +def _get_data_features( + data_id: int, + data_home: Optional[str], + n_retries: int = 3, + delay: float = 1.0, +) -> OpenmlFeaturesType: + # OpenML function: + # https://www.openml.org/api_docs#!/data/get_data_features_id + url = _DATA_FEATURES.format(data_id) + error_message = "Dataset with data_id {} not found.".format(data_id) + json_data = _get_json_content_from_openml_api( + url, + error_message, + data_home=data_home, + n_retries=n_retries, + delay=delay, + ) + return json_data["data_features"]["feature"] + + +def _get_data_qualities( + data_id: int, + data_home: Optional[str], + n_retries: int = 3, + delay: float = 1.0, +) -> OpenmlQualitiesType: + # OpenML API function: + # https://www.openml.org/api_docs#!/data/get_data_qualities_id + url = _DATA_QUALITIES.format(data_id) + error_message = "Dataset with data_id {} not found.".format(data_id) + json_data = _get_json_content_from_openml_api( + url, + error_message, + data_home=data_home, + n_retries=n_retries, + delay=delay, + ) + # the qualities might not be available, but we still try to process + # the data + return json_data.get("data_qualities", {}).get("quality", []) + + +def _get_num_samples(data_qualities: OpenmlQualitiesType) -> int: + """Get the number of samples from data qualities. + + Parameters + ---------- + data_qualities : list of dict + Used to retrieve the number of instances (samples) in the dataset. + + Returns + ------- + n_samples : int + The number of samples in the dataset or -1 if data qualities are + unavailable. + """ + # If the data qualities are unavailable, we return -1 + default_n_samples = -1 + + qualities = {d["name"]: d["value"] for d in data_qualities} + return int(float(qualities.get("NumberOfInstances", default_n_samples))) + + +def _load_arff_response( + url: str, + data_home: Optional[str], + parser: str, + output_type: str, + openml_columns_info: dict, + feature_names_to_select: List[str], + target_names_to_select: List[str], + shape: Optional[Tuple[int, int]], + md5_checksum: str, + n_retries: int = 3, + delay: float = 1.0, + read_csv_kwargs: Optional[Dict] = None, +): + """Load the ARFF data associated with the OpenML URL. + + In addition of loading the data, this function will also check the + integrity of the downloaded file from OpenML using MD5 checksum. + + Parameters + ---------- + url : str + The URL of the ARFF file on OpenML. + + data_home : str + The location where to cache the data. + + parser : {"liac-arff", "pandas"} + The parser used to parse the ARFF file. + + output_type : {"numpy", "pandas", "sparse"} + The type of the arrays that will be returned. The possibilities are: + + - `"numpy"`: both `X` and `y` will be NumPy arrays; + - `"sparse"`: `X` will be sparse matrix and `y` will be a NumPy array; + - `"pandas"`: `X` will be a pandas DataFrame and `y` will be either a + pandas Series or DataFrame. + + openml_columns_info : dict + The information provided by OpenML regarding the columns of the ARFF + file. + + feature_names_to_select : list of str + The list of the features to be selected. + + target_names_to_select : list of str + The list of the target variables to be selected. + + shape : tuple or None + With `parser="liac-arff"`, when using a generator to load the data, + one needs to provide the shape of the data beforehand. + + md5_checksum : str + The MD5 checksum provided by OpenML to check the data integrity. + + n_retries : int, default=3 + The number of times to retry downloading the data if it fails. + + delay : float, default=1.0 + The delay between two consecutive downloads in seconds. + + read_csv_kwargs : dict, default=None + Keyword arguments to pass to `pandas.read_csv` when using the pandas parser. + It allows to overwrite the default options. + + .. versionadded:: 1.3 + + Returns + ------- + X : {ndarray, sparse matrix, dataframe} + The data matrix. + + y : {ndarray, dataframe, series} + The target. + + frame : dataframe or None + A dataframe containing both `X` and `y`. `None` if + `output_array_type != "pandas"`. + + categories : list of str or None + The names of the features that are categorical. `None` if + `output_array_type == "pandas"`. + """ + gzip_file = _open_openml_url(url, data_home, n_retries=n_retries, delay=delay) + with closing(gzip_file): + md5 = hashlib.md5() + for chunk in iter(lambda: gzip_file.read(4096), b""): + md5.update(chunk) + actual_md5_checksum = md5.hexdigest() + + if actual_md5_checksum != md5_checksum: + raise ValueError( + f"md5 checksum of local file for {url} does not match description: " + f"expected: {md5_checksum} but got {actual_md5_checksum}. " + "Downloaded file could have been modified / corrupted, clean cache " + "and retry..." + ) + + def _open_url_and_load_gzip_file(url, data_home, n_retries, delay, arff_params): + gzip_file = _open_openml_url(url, data_home, n_retries=n_retries, delay=delay) + with closing(gzip_file): + return load_arff_from_gzip_file(gzip_file, **arff_params) + + arff_params: Dict = dict( + parser=parser, + output_type=output_type, + openml_columns_info=openml_columns_info, + feature_names_to_select=feature_names_to_select, + target_names_to_select=target_names_to_select, + shape=shape, + read_csv_kwargs=read_csv_kwargs or {}, + ) + try: + X, y, frame, categories = _open_url_and_load_gzip_file( + url, data_home, n_retries, delay, arff_params + ) + except Exception as exc: + if parser != "pandas": + raise + + from pandas.errors import ParserError + + if not isinstance(exc, ParserError): + raise + + # A parsing error could come from providing the wrong quotechar + # to pandas. By default, we use a double quote. Thus, we retry + # with a single quote before to raise the error. + arff_params["read_csv_kwargs"].update(quotechar="'") + X, y, frame, categories = _open_url_and_load_gzip_file( + url, data_home, n_retries, delay, arff_params + ) + + return X, y, frame, categories + + +def _download_data_to_bunch( + url: str, + sparse: bool, + data_home: Optional[str], + *, + as_frame: bool, + openml_columns_info: List[dict], + data_columns: List[str], + target_columns: List[str], + shape: Optional[Tuple[int, int]], + md5_checksum: str, + n_retries: int = 3, + delay: float = 1.0, + parser: str, + read_csv_kwargs: Optional[Dict] = None, +): + """Download ARFF data, load it to a specific container and create to Bunch. + + This function has a mechanism to retry/cache/clean the data. + + Parameters + ---------- + url : str + The URL of the ARFF file on OpenML. + + sparse : bool + Whether the dataset is expected to use the sparse ARFF format. + + data_home : str + The location where to cache the data. + + as_frame : bool + Whether or not to return the data into a pandas DataFrame. + + openml_columns_info : list of dict + The information regarding the columns provided by OpenML for the + ARFF dataset. The information is stored as a list of dictionaries. + + data_columns : list of str + The list of the features to be selected. + + target_columns : list of str + The list of the target variables to be selected. + + shape : tuple or None + With `parser="liac-arff"`, when using a generator to load the data, + one needs to provide the shape of the data beforehand. + + md5_checksum : str + The MD5 checksum provided by OpenML to check the data integrity. + + n_retries : int, default=3 + Number of retries when HTTP errors are encountered. Error with status + code 412 won't be retried as they represent OpenML generic errors. + + delay : float, default=1.0 + Number of seconds between retries. + + parser : {"liac-arff", "pandas"} + The parser used to parse the ARFF file. + + read_csv_kwargs : dict, default=None + Keyword arguments to pass to `pandas.read_csv` when using the pandas parser. + It allows to overwrite the default options. + + .. versionadded:: 1.3 + + Returns + ------- + data : :class:`~sklearn.utils.Bunch` + Dictionary-like object, with the following attributes. + + X : {ndarray, sparse matrix, dataframe} + The data matrix. + y : {ndarray, dataframe, series} + The target. + frame : dataframe or None + A dataframe containing both `X` and `y`. `None` if + `output_array_type != "pandas"`. + categories : list of str or None + The names of the features that are categorical. `None` if + `output_array_type == "pandas"`. + """ + # Prepare which columns and data types should be returned for the X and y + features_dict = {feature["name"]: feature for feature in openml_columns_info} + + if sparse: + output_type = "sparse" + elif as_frame: + output_type = "pandas" + else: + output_type = "numpy" + + # XXX: target columns should all be categorical or all numeric + _verify_target_data_type(features_dict, target_columns) + for name in target_columns: + column_info = features_dict[name] + n_missing_values = int(column_info["number_of_missing_values"]) + if n_missing_values > 0: + raise ValueError( + f"Target column '{column_info['name']}' has {n_missing_values} missing " + "values. Missing values are not supported for target columns." + ) + + no_retry_exception = None + if parser == "pandas": + # If we get a ParserError with pandas, then we don't want to retry and we raise + # early. + from pandas.errors import ParserError + + no_retry_exception = ParserError + + X, y, frame, categories = _retry_with_clean_cache( + url, data_home, no_retry_exception + )(_load_arff_response)( + url, + data_home, + parser=parser, + output_type=output_type, + openml_columns_info=features_dict, + feature_names_to_select=data_columns, + target_names_to_select=target_columns, + shape=shape, + md5_checksum=md5_checksum, + n_retries=n_retries, + delay=delay, + read_csv_kwargs=read_csv_kwargs, + ) + + return Bunch( + data=X, + target=y, + frame=frame, + categories=categories, + feature_names=data_columns, + target_names=target_columns, + ) + + +def _verify_target_data_type(features_dict, target_columns): + # verifies the data type of the y array in case there are multiple targets + # (throws an error if these targets do not comply with sklearn support) + if not isinstance(target_columns, list): + raise ValueError("target_column should be list, got: %s" % type(target_columns)) + found_types = set() + for target_column in target_columns: + if target_column not in features_dict: + raise KeyError(f"Could not find target_column='{target_column}'") + if features_dict[target_column]["data_type"] == "numeric": + found_types.add(np.float64) + else: + found_types.add(object) + + # note: we compare to a string, not boolean + if features_dict[target_column]["is_ignore"] == "true": + warn(f"target_column='{target_column}' has flag is_ignore.") + if features_dict[target_column]["is_row_identifier"] == "true": + warn(f"target_column='{target_column}' has flag is_row_identifier.") + if len(found_types) > 1: + raise ValueError( + "Can only handle homogeneous multi-target datasets, " + "i.e., all targets are either numeric or " + "categorical." + ) + + +def _valid_data_column_names(features_list, target_columns): + # logic for determining on which columns can be learned. Note that from the + # OpenML guide follows that columns that have the `is_row_identifier` or + # `is_ignore` flag, these can not be learned on. Also target columns are + # excluded. + valid_data_column_names = [] + for feature in features_list: + if ( + feature["name"] not in target_columns + and feature["is_ignore"] != "true" + and feature["is_row_identifier"] != "true" + ): + valid_data_column_names.append(feature["name"]) + return valid_data_column_names + + +@validate_params( + { + "name": [str, None], + "version": [Interval(Integral, 1, None, closed="left"), StrOptions({"active"})], + "data_id": [Interval(Integral, 1, None, closed="left"), None], + "data_home": [str, os.PathLike, None], + "target_column": [str, list, None], + "cache": [bool], + "return_X_y": [bool], + "as_frame": [bool, StrOptions({"auto"})], + "n_retries": [Interval(Integral, 1, None, closed="left")], + "delay": [Interval(Real, 0, None, closed="right")], + "parser": [ + StrOptions({"auto", "pandas", "liac-arff"}), + ], + "read_csv_kwargs": [dict, None], + }, + prefer_skip_nested_validation=True, +) +def fetch_openml( + name: Optional[str] = None, + *, + version: Union[str, int] = "active", + data_id: Optional[int] = None, + data_home: Optional[Union[str, os.PathLike]] = None, + target_column: Optional[Union[str, List]] = "default-target", + cache: bool = True, + return_X_y: bool = False, + as_frame: Union[str, bool] = "auto", + n_retries: int = 3, + delay: float = 1.0, + parser: str = "auto", + read_csv_kwargs: Optional[Dict] = None, +): + """Fetch dataset from openml by name or dataset id. + + Datasets are uniquely identified by either an integer ID or by a + combination of name and version (i.e. there might be multiple + versions of the 'iris' dataset). Please give either name or data_id + (not both). In case a name is given, a version can also be + provided. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.20 + + .. note:: EXPERIMENTAL + + The API is experimental (particularly the return value structure), + and might have small backward-incompatible changes without notice + or warning in future releases. + + Parameters + ---------- + name : str, default=None + String identifier of the dataset. Note that OpenML can have multiple + datasets with the same name. + + version : int or 'active', default='active' + Version of the dataset. Can only be provided if also ``name`` is given. + If 'active' the oldest version that's still active is used. Since + there may be more than one active version of a dataset, and those + versions may fundamentally be different from one another, setting an + exact version is highly recommended. + + data_id : int, default=None + OpenML ID of the dataset. The most specific way of retrieving a + dataset. If data_id is not given, name (and potential version) are + used to obtain a dataset. + + data_home : str or path-like, default=None + Specify another download and cache folder for the data sets. By default + all scikit-learn data is stored in '~/scikit_learn_data' subfolders. + + target_column : str, list or None, default='default-target' + Specify the column name in the data to use as target. If + 'default-target', the standard target column a stored on the server + is used. If ``None``, all columns are returned as data and the + target is ``None``. If list (of strings), all columns with these names + are returned as multi-target (Note: not all scikit-learn classifiers + can handle all types of multi-output combinations). + + cache : bool, default=True + Whether to cache the downloaded datasets into `data_home`. + + return_X_y : bool, default=False + If True, returns ``(data, target)`` instead of a Bunch object. See + below for more information about the `data` and `target` objects. + + as_frame : bool or 'auto', default='auto' + If True, the data is a pandas DataFrame including columns with + appropriate dtypes (numeric, string or categorical). The target is + a pandas DataFrame or Series depending on the number of target_columns. + The Bunch will contain a ``frame`` attribute with the target and the + data. If ``return_X_y`` is True, then ``(data, target)`` will be pandas + DataFrames or Series as describe above. + + If `as_frame` is 'auto', the data and target will be converted to + DataFrame or Series as if `as_frame` is set to True, unless the dataset + is stored in sparse format. + + If `as_frame` is False, the data and target will be NumPy arrays and + the `data` will only contain numerical values when `parser="liac-arff"` + where the categories are provided in the attribute `categories` of the + `Bunch` instance. When `parser="pandas"`, no ordinal encoding is made. + + .. versionchanged:: 0.24 + The default value of `as_frame` changed from `False` to `'auto'` + in 0.24. + + n_retries : int, default=3 + Number of retries when HTTP errors or network timeouts are encountered. + Error with status code 412 won't be retried as they represent OpenML + generic errors. + + delay : float, default=1.0 + Number of seconds between retries. + + parser : {"auto", "pandas", "liac-arff"}, default="auto" + Parser used to load the ARFF file. Two parsers are implemented: + + - `"pandas"`: this is the most efficient parser. However, it requires + pandas to be installed and can only open dense datasets. + - `"liac-arff"`: this is a pure Python ARFF parser that is much less + memory- and CPU-efficient. It deals with sparse ARFF datasets. + + If `"auto"`, the parser is chosen automatically such that `"liac-arff"` + is selected for sparse ARFF datasets, otherwise `"pandas"` is selected. + + .. versionadded:: 1.2 + .. versionchanged:: 1.4 + The default value of `parser` changes from `"liac-arff"` to + `"auto"`. + + read_csv_kwargs : dict, default=None + Keyword arguments passed to :func:`pandas.read_csv` when loading the data + from a ARFF file and using the pandas parser. It can allow to + overwrite some default parameters. + + .. versionadded:: 1.3 + + Returns + ------- + data : :class:`~sklearn.utils.Bunch` + Dictionary-like object, with the following attributes. + + data : np.array, scipy.sparse.csr_matrix of floats, or pandas DataFrame + The feature matrix. Categorical features are encoded as ordinals. + target : np.array, pandas Series or DataFrame + The regression target or classification labels, if applicable. + Dtype is float if numeric, and object if categorical. If + ``as_frame`` is True, ``target`` is a pandas object. + DESCR : str + The full description of the dataset. + feature_names : list + The names of the dataset columns. + target_names: list + The names of the target columns. + + .. versionadded:: 0.22 + + categories : dict or None + Maps each categorical feature name to a list of values, such + that the value encoded as i is ith in the list. If ``as_frame`` + is True, this is None. + details : dict + More metadata from OpenML. + frame : pandas DataFrame + Only present when `as_frame=True`. DataFrame with ``data`` and + ``target``. + + (data, target) : tuple if ``return_X_y`` is True + + .. note:: EXPERIMENTAL + + This interface is **experimental** and subsequent releases may + change attributes without notice (although there should only be + minor changes to ``data`` and ``target``). + + Missing values in the 'data' are represented as NaN's. Missing values + in 'target' are represented as NaN's (numerical target) or None + (categorical target). + + Notes + ----- + The `"pandas"` and `"liac-arff"` parsers can lead to different data types + in the output. The notable differences are the following: + + - The `"liac-arff"` parser always encodes categorical features as `str` objects. + To the contrary, the `"pandas"` parser instead infers the type while + reading and numerical categories will be casted into integers whenever + possible. + - The `"liac-arff"` parser uses float64 to encode numerical features + tagged as 'REAL' and 'NUMERICAL' in the metadata. The `"pandas"` + parser instead infers if these numerical features corresponds + to integers and uses panda's Integer extension dtype. + - In particular, classification datasets with integer categories are + typically loaded as such `(0, 1, ...)` with the `"pandas"` parser while + `"liac-arff"` will force the use of string encoded class labels such as + `"0"`, `"1"` and so on. + - The `"pandas"` parser will not strip single quotes - i.e. `'` - from + string columns. For instance, a string `'my string'` will be kept as is + while the `"liac-arff"` parser will strip the single quotes. For + categorical columns, the single quotes are stripped from the values. + + In addition, when `as_frame=False` is used, the `"liac-arff"` parser + returns ordinally encoded data where the categories are provided in the + attribute `categories` of the `Bunch` instance. Instead, `"pandas"` returns + a NumPy array were the categories are not encoded. + + Examples + -------- + >>> from sklearn.datasets import fetch_openml + >>> adult = fetch_openml("adult", version=2) # doctest: +SKIP + >>> adult.frame.info() # doctest: +SKIP + + RangeIndex: 48842 entries, 0 to 48841 + Data columns (total 15 columns): + # Column Non-Null Count Dtype + --- ------ -------------- ----- + 0 age 48842 non-null int64 + 1 workclass 46043 non-null category + 2 fnlwgt 48842 non-null int64 + 3 education 48842 non-null category + 4 education-num 48842 non-null int64 + 5 marital-status 48842 non-null category + 6 occupation 46033 non-null category + 7 relationship 48842 non-null category + 8 race 48842 non-null category + 9 sex 48842 non-null category + 10 capital-gain 48842 non-null int64 + 11 capital-loss 48842 non-null int64 + 12 hours-per-week 48842 non-null int64 + 13 native-country 47985 non-null category + 14 class 48842 non-null category + dtypes: category(9), int64(6) + memory usage: 2.7 MB + """ + if cache is False: + # no caching will be applied + data_home = None + else: + data_home = get_data_home(data_home=data_home) + data_home = join(str(data_home), "openml") + + # check valid function arguments. data_id XOR (name, version) should be + # provided + if name is not None: + # OpenML is case-insensitive, but the caching mechanism is not + # convert all data names (str) to lower case + name = name.lower() + if data_id is not None: + raise ValueError( + "Dataset data_id={} and name={} passed, but you can only " + "specify a numeric data_id or a name, not " + "both.".format(data_id, name) + ) + data_info = _get_data_info_by_name( + name, version, data_home, n_retries=n_retries, delay=delay + ) + data_id = data_info["did"] + elif data_id is not None: + # from the previous if statement, it is given that name is None + if version != "active": + raise ValueError( + "Dataset data_id={} and version={} passed, but you can only " + "specify a numeric data_id or a version, not " + "both.".format(data_id, version) + ) + else: + raise ValueError( + "Neither name nor data_id are provided. Please provide name or data_id." + ) + + data_description = _get_data_description_by_id(data_id, data_home) + if data_description["status"] != "active": + warn( + "Version {} of dataset {} is inactive, meaning that issues have " + "been found in the dataset. Try using a newer version from " + "this URL: {}".format( + data_description["version"], + data_description["name"], + data_description["url"], + ) + ) + if "error" in data_description: + warn( + "OpenML registered a problem with the dataset. It might be " + "unusable. Error: {}".format(data_description["error"]) + ) + if "warning" in data_description: + warn( + "OpenML raised a warning on the dataset. It might be " + "unusable. Warning: {}".format(data_description["warning"]) + ) + + return_sparse = data_description["format"].lower() == "sparse_arff" + as_frame = not return_sparse if as_frame == "auto" else as_frame + if parser == "auto": + parser_ = "liac-arff" if return_sparse else "pandas" + else: + parser_ = parser + + if parser_ == "pandas": + try: + check_pandas_support("`fetch_openml`") + except ImportError as exc: + if as_frame: + err_msg = ( + "Returning pandas objects requires pandas to be installed. " + "Alternatively, explicitly set `as_frame=False` and " + "`parser='liac-arff'`." + ) + else: + err_msg = ( + f"Using `parser={parser!r}` wit dense data requires pandas to be " + "installed. Alternatively, explicitly set `parser='liac-arff'`." + ) + raise ImportError(err_msg) from exc + + if return_sparse: + if as_frame: + raise ValueError( + "Sparse ARFF datasets cannot be loaded with as_frame=True. " + "Use as_frame=False or as_frame='auto' instead." + ) + if parser_ == "pandas": + raise ValueError( + f"Sparse ARFF datasets cannot be loaded with parser={parser!r}. " + "Use parser='liac-arff' or parser='auto' instead." + ) + + # download data features, meta-info about column types + features_list = _get_data_features(data_id, data_home) + + if not as_frame: + for feature in features_list: + if "true" in (feature["is_ignore"], feature["is_row_identifier"]): + continue + if feature["data_type"] == "string": + raise ValueError( + "STRING attributes are not supported for " + "array representation. Try as_frame=True" + ) + + if target_column == "default-target": + # determines the default target based on the data feature results + # (which is currently more reliable than the data description; + # see issue: https://github.com/openml/OpenML/issues/768) + target_columns = [ + feature["name"] + for feature in features_list + if feature["is_target"] == "true" + ] + elif isinstance(target_column, str): + # for code-simplicity, make target_column by default a list + target_columns = [target_column] + elif target_column is None: + target_columns = [] + else: + # target_column already is of type list + target_columns = target_column + data_columns = _valid_data_column_names(features_list, target_columns) + + shape: Optional[Tuple[int, int]] + # determine arff encoding to return + if not return_sparse: + # The shape must include the ignored features to keep the right indexes + # during the arff data conversion. + data_qualities = _get_data_qualities(data_id, data_home) + shape = _get_num_samples(data_qualities), len(features_list) + else: + shape = None + + # obtain the data + url = _DATA_FILE.format(data_description["file_id"]) + bunch = _download_data_to_bunch( + url, + return_sparse, + data_home, + as_frame=bool(as_frame), + openml_columns_info=features_list, + shape=shape, + target_columns=target_columns, + data_columns=data_columns, + md5_checksum=data_description["md5_checksum"], + n_retries=n_retries, + delay=delay, + parser=parser_, + read_csv_kwargs=read_csv_kwargs, + ) + + if return_X_y: + return bunch.data, bunch.target + + description = "{}\n\nDownloaded from openml.org.".format( + data_description.pop("description") + ) + + bunch.update( + DESCR=description, + details=data_description, + url="https://www.openml.org/d/{}".format(data_id), + ) + + return bunch diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/_rcv1.py b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/_rcv1.py new file mode 100644 index 0000000000000000000000000000000000000000..d9f392d872216d7b420d8ddf34ffdd71db6527e1 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/_rcv1.py @@ -0,0 +1,306 @@ +"""RCV1 dataset. + +The dataset page is available at + + http://jmlr.csail.mit.edu/papers/volume5/lewis04a/ +""" + +# Author: Tom Dupre la Tour +# License: BSD 3 clause + +import logging +from gzip import GzipFile +from os import PathLike, makedirs, remove +from os.path import exists, join + +import joblib +import numpy as np +import scipy.sparse as sp + +from ..utils import Bunch +from ..utils import shuffle as shuffle_ +from ..utils._param_validation import StrOptions, validate_params +from . import get_data_home +from ._base import RemoteFileMetadata, _fetch_remote, _pkl_filepath, load_descr +from ._svmlight_format_io import load_svmlight_files + +# The original vectorized data can be found at: +# http://www.ai.mit.edu/projects/jmlr/papers/volume5/lewis04a/a13-vector-files/lyrl2004_vectors_test_pt0.dat.gz +# http://www.ai.mit.edu/projects/jmlr/papers/volume5/lewis04a/a13-vector-files/lyrl2004_vectors_test_pt1.dat.gz +# http://www.ai.mit.edu/projects/jmlr/papers/volume5/lewis04a/a13-vector-files/lyrl2004_vectors_test_pt2.dat.gz +# http://www.ai.mit.edu/projects/jmlr/papers/volume5/lewis04a/a13-vector-files/lyrl2004_vectors_test_pt3.dat.gz +# http://www.ai.mit.edu/projects/jmlr/papers/volume5/lewis04a/a13-vector-files/lyrl2004_vectors_train.dat.gz +# while the original stemmed token files can be found +# in the README, section B.12.i.: +# http://www.ai.mit.edu/projects/jmlr/papers/volume5/lewis04a/lyrl2004_rcv1v2_README.htm +XY_METADATA = ( + RemoteFileMetadata( + url="https://ndownloader.figshare.com/files/5976069", + checksum="ed40f7e418d10484091b059703eeb95ae3199fe042891dcec4be6696b9968374", + filename="lyrl2004_vectors_test_pt0.dat.gz", + ), + RemoteFileMetadata( + url="https://ndownloader.figshare.com/files/5976066", + checksum="87700668ae45d45d5ca1ef6ae9bd81ab0f5ec88cc95dcef9ae7838f727a13aa6", + filename="lyrl2004_vectors_test_pt1.dat.gz", + ), + RemoteFileMetadata( + url="https://ndownloader.figshare.com/files/5976063", + checksum="48143ac703cbe33299f7ae9f4995db49a258690f60e5debbff8995c34841c7f5", + filename="lyrl2004_vectors_test_pt2.dat.gz", + ), + RemoteFileMetadata( + url="https://ndownloader.figshare.com/files/5976060", + checksum="dfcb0d658311481523c6e6ca0c3f5a3e1d3d12cde5d7a8ce629a9006ec7dbb39", + filename="lyrl2004_vectors_test_pt3.dat.gz", + ), + RemoteFileMetadata( + url="https://ndownloader.figshare.com/files/5976057", + checksum="5468f656d0ba7a83afc7ad44841cf9a53048a5c083eedc005dcdb5cc768924ae", + filename="lyrl2004_vectors_train.dat.gz", + ), +) + +# The original data can be found at: +# http://jmlr.csail.mit.edu/papers/volume5/lewis04a/a08-topic-qrels/rcv1-v2.topics.qrels.gz +TOPICS_METADATA = RemoteFileMetadata( + url="https://ndownloader.figshare.com/files/5976048", + checksum="2a98e5e5d8b770bded93afc8930d88299474317fe14181aee1466cc754d0d1c1", + filename="rcv1v2.topics.qrels.gz", +) + +logger = logging.getLogger(__name__) + + +@validate_params( + { + "data_home": [str, PathLike, None], + "subset": [StrOptions({"train", "test", "all"})], + "download_if_missing": ["boolean"], + "random_state": ["random_state"], + "shuffle": ["boolean"], + "return_X_y": ["boolean"], + }, + prefer_skip_nested_validation=True, +) +def fetch_rcv1( + *, + data_home=None, + subset="all", + download_if_missing=True, + random_state=None, + shuffle=False, + return_X_y=False, +): + """Load the RCV1 multilabel dataset (classification). + + Download it if necessary. + + Version: RCV1-v2, vectors, full sets, topics multilabels. + + ================= ===================== + Classes 103 + Samples total 804414 + Dimensionality 47236 + Features real, between 0 and 1 + ================= ===================== + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.17 + + Parameters + ---------- + data_home : str or path-like, default=None + Specify another download and cache folder for the datasets. By default + all scikit-learn data is stored in '~/scikit_learn_data' subfolders. + + subset : {'train', 'test', 'all'}, default='all' + Select the dataset to load: 'train' for the training set + (23149 samples), 'test' for the test set (781265 samples), + 'all' for both, with the training samples first if shuffle is False. + This follows the official LYRL2004 chronological split. + + download_if_missing : bool, default=True + If False, raise an OSError if the data is not locally available + instead of trying to download the data from the source site. + + random_state : int, RandomState instance or None, default=None + Determines random number generation for dataset shuffling. Pass an int + for reproducible output across multiple function calls. + See :term:`Glossary `. + + shuffle : bool, default=False + Whether to shuffle dataset. + + return_X_y : bool, default=False + If True, returns ``(dataset.data, dataset.target)`` instead of a Bunch + object. See below for more information about the `dataset.data` and + `dataset.target` object. + + .. versionadded:: 0.20 + + Returns + ------- + dataset : :class:`~sklearn.utils.Bunch` + Dictionary-like object. Returned only if `return_X_y` is False. + `dataset` has the following attributes: + + - data : sparse matrix of shape (804414, 47236), dtype=np.float64 + The array has 0.16% of non zero values. Will be of CSR format. + - target : sparse matrix of shape (804414, 103), dtype=np.uint8 + Each sample has a value of 1 in its categories, and 0 in others. + The array has 3.15% of non zero values. Will be of CSR format. + - sample_id : ndarray of shape (804414,), dtype=np.uint32, + Identification number of each sample, as ordered in dataset.data. + - target_names : ndarray of shape (103,), dtype=object + Names of each target (RCV1 topics), as ordered in dataset.target. + - DESCR : str + Description of the RCV1 dataset. + + (data, target) : tuple + A tuple consisting of `dataset.data` and `dataset.target`, as + described above. Returned only if `return_X_y` is True. + + .. versionadded:: 0.20 + """ + N_SAMPLES = 804414 + N_FEATURES = 47236 + N_CATEGORIES = 103 + N_TRAIN = 23149 + + data_home = get_data_home(data_home=data_home) + rcv1_dir = join(data_home, "RCV1") + if download_if_missing: + if not exists(rcv1_dir): + makedirs(rcv1_dir) + + samples_path = _pkl_filepath(rcv1_dir, "samples.pkl") + sample_id_path = _pkl_filepath(rcv1_dir, "sample_id.pkl") + sample_topics_path = _pkl_filepath(rcv1_dir, "sample_topics.pkl") + topics_path = _pkl_filepath(rcv1_dir, "topics_names.pkl") + + # load data (X) and sample_id + if download_if_missing and (not exists(samples_path) or not exists(sample_id_path)): + files = [] + for each in XY_METADATA: + logger.info("Downloading %s" % each.url) + file_path = _fetch_remote(each, dirname=rcv1_dir) + files.append(GzipFile(filename=file_path)) + + Xy = load_svmlight_files(files, n_features=N_FEATURES) + + # Training data is before testing data + X = sp.vstack([Xy[8], Xy[0], Xy[2], Xy[4], Xy[6]]).tocsr() + sample_id = np.hstack((Xy[9], Xy[1], Xy[3], Xy[5], Xy[7])) + sample_id = sample_id.astype(np.uint32, copy=False) + + joblib.dump(X, samples_path, compress=9) + joblib.dump(sample_id, sample_id_path, compress=9) + + # delete archives + for f in files: + f.close() + remove(f.name) + else: + X = joblib.load(samples_path) + sample_id = joblib.load(sample_id_path) + + # load target (y), categories, and sample_id_bis + if download_if_missing and ( + not exists(sample_topics_path) or not exists(topics_path) + ): + logger.info("Downloading %s" % TOPICS_METADATA.url) + topics_archive_path = _fetch_remote(TOPICS_METADATA, dirname=rcv1_dir) + + # parse the target file + n_cat = -1 + n_doc = -1 + doc_previous = -1 + y = np.zeros((N_SAMPLES, N_CATEGORIES), dtype=np.uint8) + sample_id_bis = np.zeros(N_SAMPLES, dtype=np.int32) + category_names = {} + with GzipFile(filename=topics_archive_path, mode="rb") as f: + for line in f: + line_components = line.decode("ascii").split(" ") + if len(line_components) == 3: + cat, doc, _ = line_components + if cat not in category_names: + n_cat += 1 + category_names[cat] = n_cat + + doc = int(doc) + if doc != doc_previous: + doc_previous = doc + n_doc += 1 + sample_id_bis[n_doc] = doc + y[n_doc, category_names[cat]] = 1 + + # delete archive + remove(topics_archive_path) + + # Samples in X are ordered with sample_id, + # whereas in y, they are ordered with sample_id_bis. + permutation = _find_permutation(sample_id_bis, sample_id) + y = y[permutation, :] + + # save category names in a list, with same order than y + categories = np.empty(N_CATEGORIES, dtype=object) + for k in category_names.keys(): + categories[category_names[k]] = k + + # reorder categories in lexicographic order + order = np.argsort(categories) + categories = categories[order] + y = sp.csr_matrix(y[:, order]) + + joblib.dump(y, sample_topics_path, compress=9) + joblib.dump(categories, topics_path, compress=9) + else: + y = joblib.load(sample_topics_path) + categories = joblib.load(topics_path) + + if subset == "all": + pass + elif subset == "train": + X = X[:N_TRAIN, :] + y = y[:N_TRAIN, :] + sample_id = sample_id[:N_TRAIN] + elif subset == "test": + X = X[N_TRAIN:, :] + y = y[N_TRAIN:, :] + sample_id = sample_id[N_TRAIN:] + else: + raise ValueError( + "Unknown subset parameter. Got '%s' instead of one" + " of ('all', 'train', test')" % subset + ) + + if shuffle: + X, y, sample_id = shuffle_(X, y, sample_id, random_state=random_state) + + fdescr = load_descr("rcv1.rst") + + if return_X_y: + return X, y + + return Bunch( + data=X, target=y, sample_id=sample_id, target_names=categories, DESCR=fdescr + ) + + +def _inverse_permutation(p): + """Inverse permutation p.""" + n = p.size + s = np.zeros(n, dtype=np.int32) + i = np.arange(n, dtype=np.int32) + np.put(s, p, i) # s[p] = i + return s + + +def _find_permutation(a, b): + """Find the permutation from a to b.""" + t = np.argsort(a) + u = np.argsort(b) + u_ = _inverse_permutation(u) + return t[u_] diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/_samples_generator.py b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/_samples_generator.py new file mode 100644 index 0000000000000000000000000000000000000000..1d1e65ff9966ef0f71521314f59772ca0dfd2283 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/_samples_generator.py @@ -0,0 +1,2284 @@ +""" +Generate samples of synthetic data sets. +""" + +# Authors: B. Thirion, G. Varoquaux, A. Gramfort, V. Michel, O. Grisel, +# G. Louppe, J. Nothman +# License: BSD 3 clause + +import array +import numbers +import warnings +from collections.abc import Iterable +from numbers import Integral, Real + +import numpy as np +import scipy.sparse as sp +from scipy import linalg + +from ..preprocessing import MultiLabelBinarizer +from ..utils import check_array, check_random_state +from ..utils import shuffle as util_shuffle +from ..utils._param_validation import Hidden, Interval, StrOptions, validate_params +from ..utils.random import sample_without_replacement + + +def _generate_hypercube(samples, dimensions, rng): + """Returns distinct binary samples of length dimensions.""" + if dimensions > 30: + return np.hstack( + [ + rng.randint(2, size=(samples, dimensions - 30)), + _generate_hypercube(samples, 30, rng), + ] + ) + out = sample_without_replacement(2**dimensions, samples, random_state=rng).astype( + dtype=">u4", copy=False + ) + out = np.unpackbits(out.view(">u1")).reshape((-1, 32))[:, -dimensions:] + return out + + +@validate_params( + { + "n_samples": [Interval(Integral, 1, None, closed="left")], + "n_features": [Interval(Integral, 1, None, closed="left")], + "n_informative": [Interval(Integral, 1, None, closed="left")], + "n_redundant": [Interval(Integral, 0, None, closed="left")], + "n_repeated": [Interval(Integral, 0, None, closed="left")], + "n_classes": [Interval(Integral, 1, None, closed="left")], + "n_clusters_per_class": [Interval(Integral, 1, None, closed="left")], + "weights": ["array-like", None], + "flip_y": [Interval(Real, 0, 1, closed="both")], + "class_sep": [Interval(Real, 0, None, closed="neither")], + "hypercube": ["boolean"], + "shift": [Interval(Real, None, None, closed="neither"), "array-like", None], + "scale": [Interval(Real, 0, None, closed="neither"), "array-like", None], + "shuffle": ["boolean"], + "random_state": ["random_state"], + }, + prefer_skip_nested_validation=True, +) +def make_classification( + n_samples=100, + n_features=20, + *, + n_informative=2, + n_redundant=2, + n_repeated=0, + n_classes=2, + n_clusters_per_class=2, + weights=None, + flip_y=0.01, + class_sep=1.0, + hypercube=True, + shift=0.0, + scale=1.0, + shuffle=True, + random_state=None, +): + """Generate a random n-class classification problem. + + This initially creates clusters of points normally distributed (std=1) + about vertices of an ``n_informative``-dimensional hypercube with sides of + length ``2*class_sep`` and assigns an equal number of clusters to each + class. It introduces interdependence between these features and adds + various types of further noise to the data. + + Without shuffling, ``X`` horizontally stacks features in the following + order: the primary ``n_informative`` features, followed by ``n_redundant`` + linear combinations of the informative features, followed by ``n_repeated`` + duplicates, drawn randomly with replacement from the informative and + redundant features. The remaining features are filled with random noise. + Thus, without shuffling, all useful features are contained in the columns + ``X[:, :n_informative + n_redundant + n_repeated]``. + + For an example of usage, see + :ref:`sphx_glr_auto_examples_datasets_plot_random_dataset.py`. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_samples : int, default=100 + The number of samples. + + n_features : int, default=20 + The total number of features. These comprise ``n_informative`` + informative features, ``n_redundant`` redundant features, + ``n_repeated`` duplicated features and + ``n_features-n_informative-n_redundant-n_repeated`` useless features + drawn at random. + + n_informative : int, default=2 + The number of informative features. Each class is composed of a number + of gaussian clusters each located around the vertices of a hypercube + in a subspace of dimension ``n_informative``. For each cluster, + informative features are drawn independently from N(0, 1) and then + randomly linearly combined within each cluster in order to add + covariance. The clusters are then placed on the vertices of the + hypercube. + + n_redundant : int, default=2 + The number of redundant features. These features are generated as + random linear combinations of the informative features. + + n_repeated : int, default=0 + The number of duplicated features, drawn randomly from the informative + and the redundant features. + + n_classes : int, default=2 + The number of classes (or labels) of the classification problem. + + n_clusters_per_class : int, default=2 + The number of clusters per class. + + weights : array-like of shape (n_classes,) or (n_classes - 1,),\ + default=None + The proportions of samples assigned to each class. If None, then + classes are balanced. Note that if ``len(weights) == n_classes - 1``, + then the last class weight is automatically inferred. + More than ``n_samples`` samples may be returned if the sum of + ``weights`` exceeds 1. Note that the actual class proportions will + not exactly match ``weights`` when ``flip_y`` isn't 0. + + flip_y : float, default=0.01 + The fraction of samples whose class is assigned randomly. Larger + values introduce noise in the labels and make the classification + task harder. Note that the default setting flip_y > 0 might lead + to less than ``n_classes`` in y in some cases. + + class_sep : float, default=1.0 + The factor multiplying the hypercube size. Larger values spread + out the clusters/classes and make the classification task easier. + + hypercube : bool, default=True + If True, the clusters are put on the vertices of a hypercube. If + False, the clusters are put on the vertices of a random polytope. + + shift : float, ndarray of shape (n_features,) or None, default=0.0 + Shift features by the specified value. If None, then features + are shifted by a random value drawn in [-class_sep, class_sep]. + + scale : float, ndarray of shape (n_features,) or None, default=1.0 + Multiply features by the specified value. If None, then features + are scaled by a random value drawn in [1, 100]. Note that scaling + happens after shifting. + + shuffle : bool, default=True + Shuffle the samples and the features. + + random_state : int, RandomState instance or None, default=None + Determines random number generation for dataset creation. Pass an int + for reproducible output across multiple function calls. + See :term:`Glossary `. + + Returns + ------- + X : ndarray of shape (n_samples, n_features) + The generated samples. + + y : ndarray of shape (n_samples,) + The integer labels for class membership of each sample. + + See Also + -------- + make_blobs : Simplified variant. + make_multilabel_classification : Unrelated generator for multilabel tasks. + + Notes + ----- + The algorithm is adapted from Guyon [1] and was designed to generate + the "Madelon" dataset. + + References + ---------- + .. [1] I. Guyon, "Design of experiments for the NIPS 2003 variable + selection benchmark", 2003. + + Examples + -------- + >>> from sklearn.datasets import make_classification + >>> X, y = make_classification(random_state=42) + >>> X.shape + (100, 20) + >>> y.shape + (100,) + >>> list(y[:5]) + [0, 0, 1, 1, 0] + """ + generator = check_random_state(random_state) + + # Count features, clusters and samples + if n_informative + n_redundant + n_repeated > n_features: + raise ValueError( + "Number of informative, redundant and repeated " + "features must sum to less than the number of total" + " features" + ) + # Use log2 to avoid overflow errors + if n_informative < np.log2(n_classes * n_clusters_per_class): + msg = "n_classes({}) * n_clusters_per_class({}) must be" + msg += " smaller or equal 2**n_informative({})={}" + raise ValueError( + msg.format( + n_classes, n_clusters_per_class, n_informative, 2**n_informative + ) + ) + + if weights is not None: + if len(weights) not in [n_classes, n_classes - 1]: + raise ValueError( + "Weights specified but incompatible with number of classes." + ) + if len(weights) == n_classes - 1: + if isinstance(weights, list): + weights = weights + [1.0 - sum(weights)] + else: + weights = np.resize(weights, n_classes) + weights[-1] = 1.0 - sum(weights[:-1]) + else: + weights = [1.0 / n_classes] * n_classes + + n_useless = n_features - n_informative - n_redundant - n_repeated + n_clusters = n_classes * n_clusters_per_class + + # Distribute samples among clusters by weight + n_samples_per_cluster = [ + int(n_samples * weights[k % n_classes] / n_clusters_per_class) + for k in range(n_clusters) + ] + + for i in range(n_samples - sum(n_samples_per_cluster)): + n_samples_per_cluster[i % n_clusters] += 1 + + # Initialize X and y + X = np.zeros((n_samples, n_features)) + y = np.zeros(n_samples, dtype=int) + + # Build the polytope whose vertices become cluster centroids + centroids = _generate_hypercube(n_clusters, n_informative, generator).astype( + float, copy=False + ) + centroids *= 2 * class_sep + centroids -= class_sep + if not hypercube: + centroids *= generator.uniform(size=(n_clusters, 1)) + centroids *= generator.uniform(size=(1, n_informative)) + + # Initially draw informative features from the standard normal + X[:, :n_informative] = generator.standard_normal(size=(n_samples, n_informative)) + + # Create each cluster; a variant of make_blobs + stop = 0 + for k, centroid in enumerate(centroids): + start, stop = stop, stop + n_samples_per_cluster[k] + y[start:stop] = k % n_classes # assign labels + X_k = X[start:stop, :n_informative] # slice a view of the cluster + + A = 2 * generator.uniform(size=(n_informative, n_informative)) - 1 + X_k[...] = np.dot(X_k, A) # introduce random covariance + + X_k += centroid # shift the cluster to a vertex + + # Create redundant features + if n_redundant > 0: + B = 2 * generator.uniform(size=(n_informative, n_redundant)) - 1 + X[:, n_informative : n_informative + n_redundant] = np.dot( + X[:, :n_informative], B + ) + + # Repeat some features + if n_repeated > 0: + n = n_informative + n_redundant + indices = ((n - 1) * generator.uniform(size=n_repeated) + 0.5).astype(np.intp) + X[:, n : n + n_repeated] = X[:, indices] + + # Fill useless features + if n_useless > 0: + X[:, -n_useless:] = generator.standard_normal(size=(n_samples, n_useless)) + + # Randomly replace labels + if flip_y >= 0.0: + flip_mask = generator.uniform(size=n_samples) < flip_y + y[flip_mask] = generator.randint(n_classes, size=flip_mask.sum()) + + # Randomly shift and scale + if shift is None: + shift = (2 * generator.uniform(size=n_features) - 1) * class_sep + X += shift + + if scale is None: + scale = 1 + 100 * generator.uniform(size=n_features) + X *= scale + + if shuffle: + # Randomly permute samples + X, y = util_shuffle(X, y, random_state=generator) + + # Randomly permute features + indices = np.arange(n_features) + generator.shuffle(indices) + X[:, :] = X[:, indices] + + return X, y + + +@validate_params( + { + "n_samples": [Interval(Integral, 1, None, closed="left")], + "n_features": [Interval(Integral, 1, None, closed="left")], + "n_classes": [Interval(Integral, 1, None, closed="left")], + "n_labels": [Interval(Integral, 0, None, closed="left")], + "length": [Interval(Integral, 1, None, closed="left")], + "allow_unlabeled": ["boolean"], + "sparse": ["boolean"], + "return_indicator": [StrOptions({"dense", "sparse"}), "boolean"], + "return_distributions": ["boolean"], + "random_state": ["random_state"], + }, + prefer_skip_nested_validation=True, +) +def make_multilabel_classification( + n_samples=100, + n_features=20, + *, + n_classes=5, + n_labels=2, + length=50, + allow_unlabeled=True, + sparse=False, + return_indicator="dense", + return_distributions=False, + random_state=None, +): + """Generate a random multilabel classification problem. + + For each sample, the generative process is: + - pick the number of labels: n ~ Poisson(n_labels) + - n times, choose a class c: c ~ Multinomial(theta) + - pick the document length: k ~ Poisson(length) + - k times, choose a word: w ~ Multinomial(theta_c) + + In the above process, rejection sampling is used to make sure that + n is never zero or more than `n_classes`, and that the document length + is never zero. Likewise, we reject classes which have already been chosen. + + For an example of usage, see + :ref:`sphx_glr_auto_examples_datasets_plot_random_multilabel_dataset.py`. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_samples : int, default=100 + The number of samples. + + n_features : int, default=20 + The total number of features. + + n_classes : int, default=5 + The number of classes of the classification problem. + + n_labels : int, default=2 + The average number of labels per instance. More precisely, the number + of labels per sample is drawn from a Poisson distribution with + ``n_labels`` as its expected value, but samples are bounded (using + rejection sampling) by ``n_classes``, and must be nonzero if + ``allow_unlabeled`` is False. + + length : int, default=50 + The sum of the features (number of words if documents) is drawn from + a Poisson distribution with this expected value. + + allow_unlabeled : bool, default=True + If ``True``, some instances might not belong to any class. + + sparse : bool, default=False + If ``True``, return a sparse feature matrix. + + .. versionadded:: 0.17 + parameter to allow *sparse* output. + + return_indicator : {'dense', 'sparse'} or False, default='dense' + If ``'dense'`` return ``Y`` in the dense binary indicator format. If + ``'sparse'`` return ``Y`` in the sparse binary indicator format. + ``False`` returns a list of lists of labels. + + return_distributions : bool, default=False + If ``True``, return the prior class probability and conditional + probabilities of features given classes, from which the data was + drawn. + + random_state : int, RandomState instance or None, default=None + Determines random number generation for dataset creation. Pass an int + for reproducible output across multiple function calls. + See :term:`Glossary `. + + Returns + ------- + X : ndarray of shape (n_samples, n_features) + The generated samples. + + Y : {ndarray, sparse matrix} of shape (n_samples, n_classes) + The label sets. Sparse matrix should be of CSR format. + + p_c : ndarray of shape (n_classes,) + The probability of each class being drawn. Only returned if + ``return_distributions=True``. + + p_w_c : ndarray of shape (n_features, n_classes) + The probability of each feature being drawn given each class. + Only returned if ``return_distributions=True``. + + Examples + -------- + >>> from sklearn.datasets import make_multilabel_classification + >>> X, y = make_multilabel_classification(n_labels=3, random_state=42) + >>> X.shape + (100, 20) + >>> y.shape + (100, 5) + >>> list(y[:3]) + [array([1, 1, 0, 1, 0]), array([0, 1, 1, 1, 0]), array([0, 1, 0, 0, 0])] + """ + + generator = check_random_state(random_state) + p_c = generator.uniform(size=n_classes) + p_c /= p_c.sum() + cumulative_p_c = np.cumsum(p_c) + p_w_c = generator.uniform(size=(n_features, n_classes)) + p_w_c /= np.sum(p_w_c, axis=0) + + def sample_example(): + _, n_classes = p_w_c.shape + + # pick a nonzero number of labels per document by rejection sampling + y_size = n_classes + 1 + while (not allow_unlabeled and y_size == 0) or y_size > n_classes: + y_size = generator.poisson(n_labels) + + # pick n classes + y = set() + while len(y) != y_size: + # pick a class with probability P(c) + c = np.searchsorted(cumulative_p_c, generator.uniform(size=y_size - len(y))) + y.update(c) + y = list(y) + + # pick a non-zero document length by rejection sampling + n_words = 0 + while n_words == 0: + n_words = generator.poisson(length) + + # generate a document of length n_words + if len(y) == 0: + # if sample does not belong to any class, generate noise word + words = generator.randint(n_features, size=n_words) + return words, y + + # sample words with replacement from selected classes + cumulative_p_w_sample = p_w_c.take(y, axis=1).sum(axis=1).cumsum() + cumulative_p_w_sample /= cumulative_p_w_sample[-1] + words = np.searchsorted(cumulative_p_w_sample, generator.uniform(size=n_words)) + return words, y + + X_indices = array.array("i") + X_indptr = array.array("i", [0]) + Y = [] + for i in range(n_samples): + words, y = sample_example() + X_indices.extend(words) + X_indptr.append(len(X_indices)) + Y.append(y) + X_data = np.ones(len(X_indices), dtype=np.float64) + X = sp.csr_matrix((X_data, X_indices, X_indptr), shape=(n_samples, n_features)) + X.sum_duplicates() + if not sparse: + X = X.toarray() + + # return_indicator can be True due to backward compatibility + if return_indicator in (True, "sparse", "dense"): + lb = MultiLabelBinarizer(sparse_output=(return_indicator == "sparse")) + Y = lb.fit([range(n_classes)]).transform(Y) + if return_distributions: + return X, Y, p_c, p_w_c + return X, Y + + +@validate_params( + { + "n_samples": [Interval(Integral, 1, None, closed="left")], + "random_state": ["random_state"], + }, + prefer_skip_nested_validation=True, +) +def make_hastie_10_2(n_samples=12000, *, random_state=None): + """Generate data for binary classification used in Hastie et al. 2009, Example 10.2. + + The ten features are standard independent Gaussian and + the target ``y`` is defined by:: + + y[i] = 1 if np.sum(X[i] ** 2) > 9.34 else -1 + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_samples : int, default=12000 + The number of samples. + + random_state : int, RandomState instance or None, default=None + Determines random number generation for dataset creation. Pass an int + for reproducible output across multiple function calls. + See :term:`Glossary `. + + Returns + ------- + X : ndarray of shape (n_samples, 10) + The input samples. + + y : ndarray of shape (n_samples,) + The output values. + + See Also + -------- + make_gaussian_quantiles : A generalization of this dataset approach. + + References + ---------- + .. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical + Learning Ed. 2", Springer, 2009. + """ + rs = check_random_state(random_state) + + shape = (n_samples, 10) + X = rs.normal(size=shape).reshape(shape) + y = ((X**2.0).sum(axis=1) > 9.34).astype(np.float64, copy=False) + y[y == 0.0] = -1.0 + + return X, y + + +@validate_params( + { + "n_samples": [Interval(Integral, 1, None, closed="left")], + "n_features": [Interval(Integral, 1, None, closed="left")], + "n_informative": [Interval(Integral, 0, None, closed="left")], + "n_targets": [Interval(Integral, 1, None, closed="left")], + "bias": [Interval(Real, None, None, closed="neither")], + "effective_rank": [Interval(Integral, 1, None, closed="left"), None], + "tail_strength": [Interval(Real, 0, 1, closed="both")], + "noise": [Interval(Real, 0, None, closed="left")], + "shuffle": ["boolean"], + "coef": ["boolean"], + "random_state": ["random_state"], + }, + prefer_skip_nested_validation=True, +) +def make_regression( + n_samples=100, + n_features=100, + *, + n_informative=10, + n_targets=1, + bias=0.0, + effective_rank=None, + tail_strength=0.5, + noise=0.0, + shuffle=True, + coef=False, + random_state=None, +): + """Generate a random regression problem. + + The input set can either be well conditioned (by default) or have a low + rank-fat tail singular profile. See :func:`make_low_rank_matrix` for + more details. + + The output is generated by applying a (potentially biased) random linear + regression model with `n_informative` nonzero regressors to the previously + generated input and some gaussian centered noise with some adjustable + scale. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_samples : int, default=100 + The number of samples. + + n_features : int, default=100 + The number of features. + + n_informative : int, default=10 + The number of informative features, i.e., the number of features used + to build the linear model used to generate the output. + + n_targets : int, default=1 + The number of regression targets, i.e., the dimension of the y output + vector associated with a sample. By default, the output is a scalar. + + bias : float, default=0.0 + The bias term in the underlying linear model. + + effective_rank : int, default=None + If not None: + The approximate number of singular vectors required to explain most + of the input data by linear combinations. Using this kind of + singular spectrum in the input allows the generator to reproduce + the correlations often observed in practice. + If None: + The input set is well conditioned, centered and gaussian with + unit variance. + + tail_strength : float, default=0.5 + The relative importance of the fat noisy tail of the singular values + profile if `effective_rank` is not None. When a float, it should be + between 0 and 1. + + noise : float, default=0.0 + The standard deviation of the gaussian noise applied to the output. + + shuffle : bool, default=True + Shuffle the samples and the features. + + coef : bool, default=False + If True, the coefficients of the underlying linear model are returned. + + random_state : int, RandomState instance or None, default=None + Determines random number generation for dataset creation. Pass an int + for reproducible output across multiple function calls. + See :term:`Glossary `. + + Returns + ------- + X : ndarray of shape (n_samples, n_features) + The input samples. + + y : ndarray of shape (n_samples,) or (n_samples, n_targets) + The output values. + + coef : ndarray of shape (n_features,) or (n_features, n_targets) + The coefficient of the underlying linear model. It is returned only if + coef is True. + + Examples + -------- + >>> from sklearn.datasets import make_regression + >>> X, y = make_regression(n_samples=5, n_features=2, noise=1, random_state=42) + >>> X + array([[ 0.4967..., -0.1382... ], + [ 0.6476..., 1.523...], + [-0.2341..., -0.2341...], + [-0.4694..., 0.5425...], + [ 1.579..., 0.7674...]]) + >>> y + array([ 6.737..., 37.79..., -10.27..., 0.4017..., 42.22...]) + """ + n_informative = min(n_features, n_informative) + generator = check_random_state(random_state) + + if effective_rank is None: + # Randomly generate a well conditioned input set + X = generator.standard_normal(size=(n_samples, n_features)) + + else: + # Randomly generate a low rank, fat tail input set + X = make_low_rank_matrix( + n_samples=n_samples, + n_features=n_features, + effective_rank=effective_rank, + tail_strength=tail_strength, + random_state=generator, + ) + + # Generate a ground truth model with only n_informative features being non + # zeros (the other features are not correlated to y and should be ignored + # by a sparsifying regularizers such as L1 or elastic net) + ground_truth = np.zeros((n_features, n_targets)) + ground_truth[:n_informative, :] = 100 * generator.uniform( + size=(n_informative, n_targets) + ) + + y = np.dot(X, ground_truth) + bias + + # Add noise + if noise > 0.0: + y += generator.normal(scale=noise, size=y.shape) + + # Randomly permute samples and features + if shuffle: + X, y = util_shuffle(X, y, random_state=generator) + + indices = np.arange(n_features) + generator.shuffle(indices) + X[:, :] = X[:, indices] + ground_truth = ground_truth[indices] + + y = np.squeeze(y) + + if coef: + return X, y, np.squeeze(ground_truth) + + else: + return X, y + + +@validate_params( + { + "n_samples": [Interval(Integral, 0, None, closed="left"), tuple], + "shuffle": ["boolean"], + "noise": [Interval(Real, 0, None, closed="left"), None], + "random_state": ["random_state"], + "factor": [Interval(Real, 0, 1, closed="left")], + }, + prefer_skip_nested_validation=True, +) +def make_circles( + n_samples=100, *, shuffle=True, noise=None, random_state=None, factor=0.8 +): + """Make a large circle containing a smaller circle in 2d. + + A simple toy dataset to visualize clustering and classification + algorithms. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_samples : int or tuple of shape (2,), dtype=int, default=100 + If int, it is the total number of points generated. + For odd numbers, the inner circle will have one point more than the + outer circle. + If two-element tuple, number of points in outer circle and inner + circle. + + .. versionchanged:: 0.23 + Added two-element tuple. + + shuffle : bool, default=True + Whether to shuffle the samples. + + noise : float, default=None + Standard deviation of Gaussian noise added to the data. + + random_state : int, RandomState instance or None, default=None + Determines random number generation for dataset shuffling and noise. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + factor : float, default=.8 + Scale factor between inner and outer circle in the range `[0, 1)`. + + Returns + ------- + X : ndarray of shape (n_samples, 2) + The generated samples. + + y : ndarray of shape (n_samples,) + The integer labels (0 or 1) for class membership of each sample. + + Examples + -------- + >>> from sklearn.datasets import make_circles + >>> X, y = make_circles(random_state=42) + >>> X.shape + (100, 2) + >>> y.shape + (100,) + >>> list(y[:5]) + [1, 1, 1, 0, 0] + """ + if isinstance(n_samples, numbers.Integral): + n_samples_out = n_samples // 2 + n_samples_in = n_samples - n_samples_out + else: # n_samples is a tuple + if len(n_samples) != 2: + raise ValueError("When a tuple, n_samples must have exactly two elements.") + n_samples_out, n_samples_in = n_samples + + generator = check_random_state(random_state) + # so as not to have the first point = last point, we set endpoint=False + linspace_out = np.linspace(0, 2 * np.pi, n_samples_out, endpoint=False) + linspace_in = np.linspace(0, 2 * np.pi, n_samples_in, endpoint=False) + outer_circ_x = np.cos(linspace_out) + outer_circ_y = np.sin(linspace_out) + inner_circ_x = np.cos(linspace_in) * factor + inner_circ_y = np.sin(linspace_in) * factor + + X = np.vstack( + [np.append(outer_circ_x, inner_circ_x), np.append(outer_circ_y, inner_circ_y)] + ).T + y = np.hstack( + [np.zeros(n_samples_out, dtype=np.intp), np.ones(n_samples_in, dtype=np.intp)] + ) + if shuffle: + X, y = util_shuffle(X, y, random_state=generator) + + if noise is not None: + X += generator.normal(scale=noise, size=X.shape) + + return X, y + + +@validate_params( + { + "n_samples": [Interval(Integral, 1, None, closed="left"), tuple], + "shuffle": ["boolean"], + "noise": [Interval(Real, 0, None, closed="left"), None], + "random_state": ["random_state"], + }, + prefer_skip_nested_validation=True, +) +def make_moons(n_samples=100, *, shuffle=True, noise=None, random_state=None): + """Make two interleaving half circles. + + A simple toy dataset to visualize clustering and classification + algorithms. Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_samples : int or tuple of shape (2,), dtype=int, default=100 + If int, the total number of points generated. + If two-element tuple, number of points in each of two moons. + + .. versionchanged:: 0.23 + Added two-element tuple. + + shuffle : bool, default=True + Whether to shuffle the samples. + + noise : float, default=None + Standard deviation of Gaussian noise added to the data. + + random_state : int, RandomState instance or None, default=None + Determines random number generation for dataset shuffling and noise. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + Returns + ------- + X : ndarray of shape (n_samples, 2) + The generated samples. + + y : ndarray of shape (n_samples,) + The integer labels (0 or 1) for class membership of each sample. + """ + + if isinstance(n_samples, numbers.Integral): + n_samples_out = n_samples // 2 + n_samples_in = n_samples - n_samples_out + else: + try: + n_samples_out, n_samples_in = n_samples + except ValueError as e: + raise ValueError( + "`n_samples` can be either an int or a two-element tuple." + ) from e + + generator = check_random_state(random_state) + + outer_circ_x = np.cos(np.linspace(0, np.pi, n_samples_out)) + outer_circ_y = np.sin(np.linspace(0, np.pi, n_samples_out)) + inner_circ_x = 1 - np.cos(np.linspace(0, np.pi, n_samples_in)) + inner_circ_y = 1 - np.sin(np.linspace(0, np.pi, n_samples_in)) - 0.5 + + X = np.vstack( + [np.append(outer_circ_x, inner_circ_x), np.append(outer_circ_y, inner_circ_y)] + ).T + y = np.hstack( + [np.zeros(n_samples_out, dtype=np.intp), np.ones(n_samples_in, dtype=np.intp)] + ) + + if shuffle: + X, y = util_shuffle(X, y, random_state=generator) + + if noise is not None: + X += generator.normal(scale=noise, size=X.shape) + + return X, y + + +@validate_params( + { + "n_samples": [Interval(Integral, 1, None, closed="left"), "array-like"], + "n_features": [Interval(Integral, 1, None, closed="left")], + "centers": [Interval(Integral, 1, None, closed="left"), "array-like", None], + "cluster_std": [Interval(Real, 0, None, closed="left"), "array-like"], + "center_box": [tuple], + "shuffle": ["boolean"], + "random_state": ["random_state"], + "return_centers": ["boolean"], + }, + prefer_skip_nested_validation=True, +) +def make_blobs( + n_samples=100, + n_features=2, + *, + centers=None, + cluster_std=1.0, + center_box=(-10.0, 10.0), + shuffle=True, + random_state=None, + return_centers=False, +): + """Generate isotropic Gaussian blobs for clustering. + + For an example of usage, see + :ref:`sphx_glr_auto_examples_datasets_plot_random_dataset.py`. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_samples : int or array-like, default=100 + If int, it is the total number of points equally divided among + clusters. + If array-like, each element of the sequence indicates + the number of samples per cluster. + + .. versionchanged:: v0.20 + one can now pass an array-like to the ``n_samples`` parameter + + n_features : int, default=2 + The number of features for each sample. + + centers : int or array-like of shape (n_centers, n_features), default=None + The number of centers to generate, or the fixed center locations. + If n_samples is an int and centers is None, 3 centers are generated. + If n_samples is array-like, centers must be + either None or an array of length equal to the length of n_samples. + + cluster_std : float or array-like of float, default=1.0 + The standard deviation of the clusters. + + center_box : tuple of float (min, max), default=(-10.0, 10.0) + The bounding box for each cluster center when centers are + generated at random. + + shuffle : bool, default=True + Shuffle the samples. + + random_state : int, RandomState instance or None, default=None + Determines random number generation for dataset creation. Pass an int + for reproducible output across multiple function calls. + See :term:`Glossary `. + + return_centers : bool, default=False + If True, then return the centers of each cluster. + + .. versionadded:: 0.23 + + Returns + ------- + X : ndarray of shape (n_samples, n_features) + The generated samples. + + y : ndarray of shape (n_samples,) + The integer labels for cluster membership of each sample. + + centers : ndarray of shape (n_centers, n_features) + The centers of each cluster. Only returned if + ``return_centers=True``. + + See Also + -------- + make_classification : A more intricate variant. + + Examples + -------- + >>> from sklearn.datasets import make_blobs + >>> X, y = make_blobs(n_samples=10, centers=3, n_features=2, + ... random_state=0) + >>> print(X.shape) + (10, 2) + >>> y + array([0, 0, 1, 0, 2, 2, 2, 1, 1, 0]) + >>> X, y = make_blobs(n_samples=[3, 3, 4], centers=None, n_features=2, + ... random_state=0) + >>> print(X.shape) + (10, 2) + >>> y + array([0, 1, 2, 0, 2, 2, 2, 1, 1, 0]) + """ + generator = check_random_state(random_state) + + if isinstance(n_samples, numbers.Integral): + # Set n_centers by looking at centers arg + if centers is None: + centers = 3 + + if isinstance(centers, numbers.Integral): + n_centers = centers + centers = generator.uniform( + center_box[0], center_box[1], size=(n_centers, n_features) + ) + + else: + centers = check_array(centers) + n_features = centers.shape[1] + n_centers = centers.shape[0] + + else: + # Set n_centers by looking at [n_samples] arg + n_centers = len(n_samples) + if centers is None: + centers = generator.uniform( + center_box[0], center_box[1], size=(n_centers, n_features) + ) + if not isinstance(centers, Iterable): + raise ValueError( + "Parameter `centers` must be array-like. Got {!r} instead".format( + centers + ) + ) + if len(centers) != n_centers: + raise ValueError( + "Length of `n_samples` not consistent with number of " + f"centers. Got n_samples = {n_samples} and centers = {centers}" + ) + centers = check_array(centers) + n_features = centers.shape[1] + + # stds: if cluster_std is given as list, it must be consistent + # with the n_centers + if hasattr(cluster_std, "__len__") and len(cluster_std) != n_centers: + raise ValueError( + "Length of `clusters_std` not consistent with " + "number of centers. Got centers = {} " + "and cluster_std = {}".format(centers, cluster_std) + ) + + if isinstance(cluster_std, numbers.Real): + cluster_std = np.full(len(centers), cluster_std) + + if isinstance(n_samples, Iterable): + n_samples_per_center = n_samples + else: + n_samples_per_center = [int(n_samples // n_centers)] * n_centers + + for i in range(n_samples % n_centers): + n_samples_per_center[i] += 1 + + cum_sum_n_samples = np.cumsum(n_samples_per_center) + X = np.empty(shape=(sum(n_samples_per_center), n_features), dtype=np.float64) + y = np.empty(shape=(sum(n_samples_per_center),), dtype=int) + + for i, (n, std) in enumerate(zip(n_samples_per_center, cluster_std)): + start_idx = cum_sum_n_samples[i - 1] if i > 0 else 0 + end_idx = cum_sum_n_samples[i] + X[start_idx:end_idx] = generator.normal( + loc=centers[i], scale=std, size=(n, n_features) + ) + y[start_idx:end_idx] = i + + if shuffle: + X, y = util_shuffle(X, y, random_state=generator) + + if return_centers: + return X, y, centers + else: + return X, y + + +@validate_params( + { + "n_samples": [Interval(Integral, 1, None, closed="left")], + "n_features": [Interval(Integral, 5, None, closed="left")], + "noise": [Interval(Real, 0.0, None, closed="left")], + "random_state": ["random_state"], + }, + prefer_skip_nested_validation=True, +) +def make_friedman1(n_samples=100, n_features=10, *, noise=0.0, random_state=None): + """Generate the "Friedman #1" regression problem. + + This dataset is described in Friedman [1] and Breiman [2]. + + Inputs `X` are independent features uniformly distributed on the interval + [0, 1]. The output `y` is created according to the formula:: + + y(X) = 10 * sin(pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \ ++ 10 * X[:, 3] + 5 * X[:, 4] + noise * N(0, 1). + + Out of the `n_features` features, only 5 are actually used to compute + `y`. The remaining features are independent of `y`. + + The number of features has to be >= 5. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_samples : int, default=100 + The number of samples. + + n_features : int, default=10 + The number of features. Should be at least 5. + + noise : float, default=0.0 + The standard deviation of the gaussian noise applied to the output. + + random_state : int, RandomState instance or None, default=None + Determines random number generation for dataset noise. Pass an int + for reproducible output across multiple function calls. + See :term:`Glossary `. + + Returns + ------- + X : ndarray of shape (n_samples, n_features) + The input samples. + + y : ndarray of shape (n_samples,) + The output values. + + References + ---------- + .. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals + of Statistics 19 (1), pages 1-67, 1991. + + .. [2] L. Breiman, "Bagging predictors", Machine Learning 24, + pages 123-140, 1996. + + Examples + -------- + >>> from sklearn.datasets import make_friedman1 + >>> X, y = make_friedman1(random_state=42) + >>> X.shape + (100, 10) + >>> y.shape + (100,) + >>> list(y[:3]) + [16.8..., 5.8..., 9.4...] + """ + generator = check_random_state(random_state) + + X = generator.uniform(size=(n_samples, n_features)) + y = ( + 10 * np.sin(np.pi * X[:, 0] * X[:, 1]) + + 20 * (X[:, 2] - 0.5) ** 2 + + 10 * X[:, 3] + + 5 * X[:, 4] + + noise * generator.standard_normal(size=(n_samples)) + ) + + return X, y + + +@validate_params( + { + "n_samples": [Interval(Integral, 1, None, closed="left")], + "noise": [Interval(Real, 0, None, closed="left")], + "random_state": ["random_state"], + }, + prefer_skip_nested_validation=True, +) +def make_friedman2(n_samples=100, *, noise=0.0, random_state=None): + """Generate the "Friedman #2" regression problem. + + This dataset is described in Friedman [1] and Breiman [2]. + + Inputs `X` are 4 independent features uniformly distributed on the + intervals:: + + 0 <= X[:, 0] <= 100, + 40 * pi <= X[:, 1] <= 560 * pi, + 0 <= X[:, 2] <= 1, + 1 <= X[:, 3] <= 11. + + The output `y` is created according to the formula:: + + y(X) = (X[:, 0] ** 2 + (X[:, 1] * X[:, 2] \ + - 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 + noise * N(0, 1). + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_samples : int, default=100 + The number of samples. + + noise : float, default=0.0 + The standard deviation of the gaussian noise applied to the output. + + random_state : int, RandomState instance or None, default=None + Determines random number generation for dataset noise. Pass an int + for reproducible output across multiple function calls. + See :term:`Glossary `. + + Returns + ------- + X : ndarray of shape (n_samples, 4) + The input samples. + + y : ndarray of shape (n_samples,) + The output values. + + References + ---------- + .. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals + of Statistics 19 (1), pages 1-67, 1991. + + .. [2] L. Breiman, "Bagging predictors", Machine Learning 24, + pages 123-140, 1996. + + Examples + -------- + >>> from sklearn.datasets import make_friedman2 + >>> X, y = make_friedman2(random_state=42) + >>> X.shape + (100, 4) + >>> y.shape + (100,) + >>> list(y[:3]) + [1229.4..., 27.0..., 65.6...] + """ + generator = check_random_state(random_state) + + X = generator.uniform(size=(n_samples, 4)) + X[:, 0] *= 100 + X[:, 1] *= 520 * np.pi + X[:, 1] += 40 * np.pi + X[:, 3] *= 10 + X[:, 3] += 1 + + y = ( + X[:, 0] ** 2 + (X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) ** 2 + ) ** 0.5 + noise * generator.standard_normal(size=(n_samples)) + + return X, y + + +@validate_params( + { + "n_samples": [Interval(Integral, 1, None, closed="left")], + "noise": [Interval(Real, 0, None, closed="left")], + "random_state": ["random_state"], + }, + prefer_skip_nested_validation=True, +) +def make_friedman3(n_samples=100, *, noise=0.0, random_state=None): + """Generate the "Friedman #3" regression problem. + + This dataset is described in Friedman [1] and Breiman [2]. + + Inputs `X` are 4 independent features uniformly distributed on the + intervals:: + + 0 <= X[:, 0] <= 100, + 40 * pi <= X[:, 1] <= 560 * pi, + 0 <= X[:, 2] <= 1, + 1 <= X[:, 3] <= 11. + + The output `y` is created according to the formula:: + + y(X) = arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) \ +/ X[:, 0]) + noise * N(0, 1). + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_samples : int, default=100 + The number of samples. + + noise : float, default=0.0 + The standard deviation of the gaussian noise applied to the output. + + random_state : int, RandomState instance or None, default=None + Determines random number generation for dataset noise. Pass an int + for reproducible output across multiple function calls. + See :term:`Glossary `. + + Returns + ------- + X : ndarray of shape (n_samples, 4) + The input samples. + + y : ndarray of shape (n_samples,) + The output values. + + References + ---------- + .. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals + of Statistics 19 (1), pages 1-67, 1991. + + .. [2] L. Breiman, "Bagging predictors", Machine Learning 24, + pages 123-140, 1996. + + Examples + -------- + >>> from sklearn.datasets import make_friedman3 + >>> X, y = make_friedman3(random_state=42) + >>> X.shape + (100, 4) + >>> y.shape + (100,) + >>> list(y[:3]) + [1.5..., 0.9..., 0.4...] + """ + generator = check_random_state(random_state) + + X = generator.uniform(size=(n_samples, 4)) + X[:, 0] *= 100 + X[:, 1] *= 520 * np.pi + X[:, 1] += 40 * np.pi + X[:, 3] *= 10 + X[:, 3] += 1 + + y = np.arctan( + (X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) / X[:, 0] + ) + noise * generator.standard_normal(size=(n_samples)) + + return X, y + + +@validate_params( + { + "n_samples": [Interval(Integral, 1, None, closed="left")], + "n_features": [Interval(Integral, 1, None, closed="left")], + "effective_rank": [Interval(Integral, 1, None, closed="left")], + "tail_strength": [Interval(Real, 0, 1, closed="both")], + "random_state": ["random_state"], + }, + prefer_skip_nested_validation=True, +) +def make_low_rank_matrix( + n_samples=100, + n_features=100, + *, + effective_rank=10, + tail_strength=0.5, + random_state=None, +): + """Generate a mostly low rank matrix with bell-shaped singular values. + + Most of the variance can be explained by a bell-shaped curve of width + effective_rank: the low rank part of the singular values profile is:: + + (1 - tail_strength) * exp(-1.0 * (i / effective_rank) ** 2) + + The remaining singular values' tail is fat, decreasing as:: + + tail_strength * exp(-0.1 * i / effective_rank). + + The low rank part of the profile can be considered the structured + signal part of the data while the tail can be considered the noisy + part of the data that cannot be summarized by a low number of linear + components (singular vectors). + + This kind of singular profiles is often seen in practice, for instance: + - gray level pictures of faces + - TF-IDF vectors of text documents crawled from the web + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_samples : int, default=100 + The number of samples. + + n_features : int, default=100 + The number of features. + + effective_rank : int, default=10 + The approximate number of singular vectors required to explain most of + the data by linear combinations. + + tail_strength : float, default=0.5 + The relative importance of the fat noisy tail of the singular values + profile. The value should be between 0 and 1. + + random_state : int, RandomState instance or None, default=None + Determines random number generation for dataset creation. Pass an int + for reproducible output across multiple function calls. + See :term:`Glossary `. + + Returns + ------- + X : ndarray of shape (n_samples, n_features) + The matrix. + """ + generator = check_random_state(random_state) + n = min(n_samples, n_features) + + # Random (ortho normal) vectors + u, _ = linalg.qr( + generator.standard_normal(size=(n_samples, n)), + mode="economic", + check_finite=False, + ) + v, _ = linalg.qr( + generator.standard_normal(size=(n_features, n)), + mode="economic", + check_finite=False, + ) + + # Index of the singular values + singular_ind = np.arange(n, dtype=np.float64) + + # Build the singular profile by assembling signal and noise components + low_rank = (1 - tail_strength) * np.exp(-1.0 * (singular_ind / effective_rank) ** 2) + tail = tail_strength * np.exp(-0.1 * singular_ind / effective_rank) + s = np.identity(n) * (low_rank + tail) + + return np.dot(np.dot(u, s), v.T) + + +@validate_params( + { + "n_samples": [Interval(Integral, 1, None, closed="left")], + "n_components": [Interval(Integral, 1, None, closed="left")], + "n_features": [Interval(Integral, 1, None, closed="left")], + "n_nonzero_coefs": [Interval(Integral, 1, None, closed="left")], + "random_state": ["random_state"], + "data_transposed": ["boolean", Hidden(StrOptions({"deprecated"}))], + }, + prefer_skip_nested_validation=True, +) +def make_sparse_coded_signal( + n_samples, + *, + n_components, + n_features, + n_nonzero_coefs, + random_state=None, + data_transposed="deprecated", +): + """Generate a signal as a sparse combination of dictionary elements. + + Returns a matrix `Y = DX`, such that `D` is of shape `(n_features, n_components)`, + `X` is of shape `(n_components, n_samples)` and each column of `X` has exactly + `n_nonzero_coefs` non-zero elements. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_samples : int + Number of samples to generate. + + n_components : int + Number of components in the dictionary. + + n_features : int + Number of features of the dataset to generate. + + n_nonzero_coefs : int + Number of active (non-zero) coefficients in each sample. + + random_state : int, RandomState instance or None, default=None + Determines random number generation for dataset creation. Pass an int + for reproducible output across multiple function calls. + See :term:`Glossary `. + + data_transposed : bool, default=False + By default, Y, D and X are not transposed. + + .. versionadded:: 1.1 + + .. versionchanged:: 1.3 + Default value changed from True to False. + + .. deprecated:: 1.3 + `data_transposed` is deprecated and will be removed in 1.5. + + Returns + ------- + data : ndarray of shape (n_features, n_samples) or (n_samples, n_features) + The encoded signal (Y). The shape is `(n_samples, n_features)` if + `data_transposed` is False, otherwise it's `(n_features, n_samples)`. + + dictionary : ndarray of shape (n_features, n_components) or \ + (n_components, n_features) + The dictionary with normalized components (D). The shape is + `(n_components, n_features)` if `data_transposed` is False, otherwise it's + `(n_features, n_components)`. + + code : ndarray of shape (n_components, n_samples) or (n_samples, n_components) + The sparse code such that each column of this matrix has exactly + n_nonzero_coefs non-zero items (X). The shape is `(n_samples, n_components)` + if `data_transposed` is False, otherwise it's `(n_components, n_samples)`. + """ + generator = check_random_state(random_state) + + # generate dictionary + D = generator.standard_normal(size=(n_features, n_components)) + D /= np.sqrt(np.sum((D**2), axis=0)) + + # generate code + X = np.zeros((n_components, n_samples)) + for i in range(n_samples): + idx = np.arange(n_components) + generator.shuffle(idx) + idx = idx[:n_nonzero_coefs] + X[idx, i] = generator.standard_normal(size=n_nonzero_coefs) + + # encode signal + Y = np.dot(D, X) + + # TODO(1.5) remove data_transposed + # raise warning if data_transposed is not passed explicitly + if data_transposed != "deprecated": + warnings.warn( + "data_transposed was deprecated in version 1.3 and will be removed in 1.5.", + FutureWarning, + ) + else: + data_transposed = False + + # transpose if needed + if not data_transposed: + Y, D, X = Y.T, D.T, X.T + + return map(np.squeeze, (Y, D, X)) + + +@validate_params( + { + "n_samples": [Interval(Integral, 1, None, closed="left")], + "n_features": [Interval(Integral, 1, None, closed="left")], + "random_state": ["random_state"], + }, + prefer_skip_nested_validation=True, +) +def make_sparse_uncorrelated(n_samples=100, n_features=10, *, random_state=None): + """Generate a random regression problem with sparse uncorrelated design. + + This dataset is described in Celeux et al [1]. as:: + + X ~ N(0, 1) + y(X) = X[:, 0] + 2 * X[:, 1] - 2 * X[:, 2] - 1.5 * X[:, 3] + + Only the first 4 features are informative. The remaining features are + useless. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_samples : int, default=100 + The number of samples. + + n_features : int, default=10 + The number of features. + + random_state : int, RandomState instance or None, default=None + Determines random number generation for dataset creation. Pass an int + for reproducible output across multiple function calls. + See :term:`Glossary `. + + Returns + ------- + X : ndarray of shape (n_samples, n_features) + The input samples. + + y : ndarray of shape (n_samples,) + The output values. + + References + ---------- + .. [1] G. Celeux, M. El Anbari, J.-M. Marin, C. P. Robert, + "Regularization in regression: comparing Bayesian and frequentist + methods in a poorly informative situation", 2009. + """ + generator = check_random_state(random_state) + + X = generator.normal(loc=0, scale=1, size=(n_samples, n_features)) + y = generator.normal( + loc=(X[:, 0] + 2 * X[:, 1] - 2 * X[:, 2] - 1.5 * X[:, 3]), + scale=np.ones(n_samples), + ) + + return X, y + + +@validate_params( + { + "n_dim": [Interval(Integral, 1, None, closed="left")], + "random_state": ["random_state"], + }, + prefer_skip_nested_validation=True, +) +def make_spd_matrix(n_dim, *, random_state=None): + """Generate a random symmetric, positive-definite matrix. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_dim : int + The matrix dimension. + + random_state : int, RandomState instance or None, default=None + Determines random number generation for dataset creation. Pass an int + for reproducible output across multiple function calls. + See :term:`Glossary `. + + Returns + ------- + X : ndarray of shape (n_dim, n_dim) + The random symmetric, positive-definite matrix. + + See Also + -------- + make_sparse_spd_matrix: Generate a sparse symmetric definite positive matrix. + + Examples + -------- + >>> from sklearn.datasets import make_spd_matrix + >>> make_spd_matrix(n_dim=2, random_state=42) + array([[2.09..., 0.34...], + [0.34..., 0.21...]]) + """ + generator = check_random_state(random_state) + + A = generator.uniform(size=(n_dim, n_dim)) + U, _, Vt = linalg.svd(np.dot(A.T, A), check_finite=False) + X = np.dot(np.dot(U, 1.0 + np.diag(generator.uniform(size=n_dim))), Vt) + + return X + + +@validate_params( + { + "n_dim": [Hidden(None), Interval(Integral, 1, None, closed="left")], + "alpha": [Interval(Real, 0, 1, closed="both")], + "norm_diag": ["boolean"], + "smallest_coef": [Interval(Real, 0, 1, closed="both")], + "largest_coef": [Interval(Real, 0, 1, closed="both")], + "sparse_format": [ + StrOptions({"bsr", "coo", "csc", "csr", "dia", "dok", "lil"}), + None, + ], + "random_state": ["random_state"], + "dim": [ + Interval(Integral, 1, None, closed="left"), + Hidden(StrOptions({"deprecated"})), + ], + }, + prefer_skip_nested_validation=True, +) +def make_sparse_spd_matrix( + n_dim=None, + *, + alpha=0.95, + norm_diag=False, + smallest_coef=0.1, + largest_coef=0.9, + sparse_format=None, + random_state=None, + dim="deprecated", +): + """Generate a sparse symmetric definite positive matrix. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_dim : int, default=1 + The size of the random matrix to generate. + + .. versionchanged:: 1.4 + Renamed from ``dim`` to ``n_dim``. + + alpha : float, default=0.95 + The probability that a coefficient is zero (see notes). Larger values + enforce more sparsity. The value should be in the range 0 and 1. + + norm_diag : bool, default=False + Whether to normalize the output matrix to make the leading diagonal + elements all 1. + + smallest_coef : float, default=0.1 + The value of the smallest coefficient between 0 and 1. + + largest_coef : float, default=0.9 + The value of the largest coefficient between 0 and 1. + + sparse_format : str, default=None + String representing the output sparse format, such as 'csc', 'csr', etc. + If ``None``, return a dense numpy ndarray. + + .. versionadded:: 1.4 + + random_state : int, RandomState instance or None, default=None + Determines random number generation for dataset creation. Pass an int + for reproducible output across multiple function calls. + See :term:`Glossary `. + + dim : int, default=1 + The size of the random matrix to generate. + + .. deprecated:: 1.4 + `dim` is deprecated and will be removed in 1.6. + + Returns + ------- + prec : ndarray or sparse matrix of shape (dim, dim) + The generated matrix. If ``sparse_format=None``, this would be an ndarray. + Otherwise, this will be a sparse matrix of the specified format. + + See Also + -------- + make_spd_matrix : Generate a random symmetric, positive-definite matrix. + + Notes + ----- + The sparsity is actually imposed on the cholesky factor of the matrix. + Thus alpha does not translate directly into the filling fraction of + the matrix itself. + + Examples + -------- + >>> from sklearn.datasets import make_sparse_spd_matrix + >>> make_sparse_spd_matrix(n_dim=4, norm_diag=False, random_state=42) + array([[1., 0., 0., 0.], + [0., 1., 0., 0.], + [0., 0., 1., 0.], + [0., 0., 0., 1.]]) + """ + random_state = check_random_state(random_state) + + # TODO(1.6): remove in 1.6 + # Also make sure to change `n_dim` default back to 1 and deprecate None + if n_dim is not None and dim != "deprecated": + raise ValueError( + "`dim` and `n_dim` cannot be both specified. Please use `n_dim` only " + "as `dim` is deprecated in v1.4 and will be removed in v1.6." + ) + + if dim != "deprecated": + warnings.warn( + ( + "dim was deprecated in version 1.4 and will be removed in 1.6." + "Please use ``n_dim`` instead." + ), + FutureWarning, + ) + _n_dim = dim + elif n_dim is None: + _n_dim = 1 + else: + _n_dim = n_dim + + chol = -sp.eye(_n_dim) + aux = sp.random( + m=_n_dim, + n=_n_dim, + density=1 - alpha, + data_rvs=lambda x: random_state.uniform( + low=smallest_coef, high=largest_coef, size=x + ), + random_state=random_state, + ) + # We need to avoid "coo" format because it does not support slicing + aux = sp.tril(aux, k=-1, format="csc") + + # Permute the lines: we don't want to have asymmetries in the final + # SPD matrix + permutation = random_state.permutation(_n_dim) + aux = aux[permutation].T[permutation] + chol += aux + prec = chol.T @ chol + + if norm_diag: + # Form the diagonal vector into a row matrix + d = sp.diags(1.0 / np.sqrt(prec.diagonal())) + prec = d @ prec @ d + + if sparse_format is None: + return prec.toarray() + else: + return prec.asformat(sparse_format) + + +@validate_params( + { + "n_samples": [Interval(Integral, 1, None, closed="left")], + "noise": [Interval(Real, 0, None, closed="left")], + "random_state": ["random_state"], + "hole": ["boolean"], + }, + prefer_skip_nested_validation=True, +) +def make_swiss_roll(n_samples=100, *, noise=0.0, random_state=None, hole=False): + """Generate a swiss roll dataset. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_samples : int, default=100 + The number of sample points on the Swiss Roll. + + noise : float, default=0.0 + The standard deviation of the gaussian noise. + + random_state : int, RandomState instance or None, default=None + Determines random number generation for dataset creation. Pass an int + for reproducible output across multiple function calls. + See :term:`Glossary `. + + hole : bool, default=False + If True generates the swiss roll with hole dataset. + + Returns + ------- + X : ndarray of shape (n_samples, 3) + The points. + + t : ndarray of shape (n_samples,) + The univariate position of the sample according to the main dimension + of the points in the manifold. + + Notes + ----- + The algorithm is from Marsland [1]. + + References + ---------- + .. [1] S. Marsland, "Machine Learning: An Algorithmic Perspective", 2nd edition, + Chapter 6, 2014. + https://homepages.ecs.vuw.ac.nz/~marslast/Code/Ch6/lle.py + """ + generator = check_random_state(random_state) + + if not hole: + t = 1.5 * np.pi * (1 + 2 * generator.uniform(size=n_samples)) + y = 21 * generator.uniform(size=n_samples) + else: + corners = np.array( + [[np.pi * (1.5 + i), j * 7] for i in range(3) for j in range(3)] + ) + corners = np.delete(corners, 4, axis=0) + corner_index = generator.choice(8, n_samples) + parameters = generator.uniform(size=(2, n_samples)) * np.array([[np.pi], [7]]) + t, y = corners[corner_index].T + parameters + + x = t * np.cos(t) + z = t * np.sin(t) + + X = np.vstack((x, y, z)) + X += noise * generator.standard_normal(size=(3, n_samples)) + X = X.T + t = np.squeeze(t) + + return X, t + + +@validate_params( + { + "n_samples": [Interval(Integral, 1, None, closed="left")], + "noise": [Interval(Real, 0, None, closed="left")], + "random_state": ["random_state"], + }, + prefer_skip_nested_validation=True, +) +def make_s_curve(n_samples=100, *, noise=0.0, random_state=None): + """Generate an S curve dataset. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_samples : int, default=100 + The number of sample points on the S curve. + + noise : float, default=0.0 + The standard deviation of the gaussian noise. + + random_state : int, RandomState instance or None, default=None + Determines random number generation for dataset creation. Pass an int + for reproducible output across multiple function calls. + See :term:`Glossary `. + + Returns + ------- + X : ndarray of shape (n_samples, 3) + The points. + + t : ndarray of shape (n_samples,) + The univariate position of the sample according to the main dimension + of the points in the manifold. + """ + generator = check_random_state(random_state) + + t = 3 * np.pi * (generator.uniform(size=(1, n_samples)) - 0.5) + X = np.empty(shape=(n_samples, 3), dtype=np.float64) + X[:, 0] = np.sin(t) + X[:, 1] = 2.0 * generator.uniform(size=n_samples) + X[:, 2] = np.sign(t) * (np.cos(t) - 1) + X += noise * generator.standard_normal(size=(3, n_samples)).T + t = np.squeeze(t) + + return X, t + + +@validate_params( + { + "mean": ["array-like", None], + "cov": [Interval(Real, 0, None, closed="left")], + "n_samples": [Interval(Integral, 1, None, closed="left")], + "n_features": [Interval(Integral, 1, None, closed="left")], + "n_classes": [Interval(Integral, 1, None, closed="left")], + "shuffle": ["boolean"], + "random_state": ["random_state"], + }, + prefer_skip_nested_validation=True, +) +def make_gaussian_quantiles( + *, + mean=None, + cov=1.0, + n_samples=100, + n_features=2, + n_classes=3, + shuffle=True, + random_state=None, +): + r"""Generate isotropic Gaussian and label samples by quantile. + + This classification dataset is constructed by taking a multi-dimensional + standard normal distribution and defining classes separated by nested + concentric multi-dimensional spheres such that roughly equal numbers of + samples are in each class (quantiles of the :math:`\chi^2` distribution). + + For an example of usage, see + :ref:`sphx_glr_auto_examples_datasets_plot_random_dataset.py`. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + mean : array-like of shape (n_features,), default=None + The mean of the multi-dimensional normal distribution. + If None then use the origin (0, 0, ...). + + cov : float, default=1.0 + The covariance matrix will be this value times the unit matrix. This + dataset only produces symmetric normal distributions. + + n_samples : int, default=100 + The total number of points equally divided among classes. + + n_features : int, default=2 + The number of features for each sample. + + n_classes : int, default=3 + The number of classes. + + shuffle : bool, default=True + Shuffle the samples. + + random_state : int, RandomState instance or None, default=None + Determines random number generation for dataset creation. Pass an int + for reproducible output across multiple function calls. + See :term:`Glossary `. + + Returns + ------- + X : ndarray of shape (n_samples, n_features) + The generated samples. + + y : ndarray of shape (n_samples,) + The integer labels for quantile membership of each sample. + + Notes + ----- + The dataset is from Zhu et al [1]. + + References + ---------- + .. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009. + + Examples + -------- + >>> from sklearn.datasets import make_gaussian_quantiles + >>> X, y = make_gaussian_quantiles(random_state=42) + >>> X.shape + (100, 2) + >>> y.shape + (100,) + >>> list(y[:5]) + [2, 0, 1, 0, 2] + """ + if n_samples < n_classes: + raise ValueError("n_samples must be at least n_classes") + + generator = check_random_state(random_state) + + if mean is None: + mean = np.zeros(n_features) + else: + mean = np.array(mean) + + # Build multivariate normal distribution + X = generator.multivariate_normal(mean, cov * np.identity(n_features), (n_samples,)) + + # Sort by distance from origin + idx = np.argsort(np.sum((X - mean[np.newaxis, :]) ** 2, axis=1)) + X = X[idx, :] + + # Label by quantile + step = n_samples // n_classes + + y = np.hstack( + [ + np.repeat(np.arange(n_classes), step), + np.repeat(n_classes - 1, n_samples - step * n_classes), + ] + ) + + if shuffle: + X, y = util_shuffle(X, y, random_state=generator) + + return X, y + + +def _shuffle(data, random_state=None): + generator = check_random_state(random_state) + n_rows, n_cols = data.shape + row_idx = generator.permutation(n_rows) + col_idx = generator.permutation(n_cols) + result = data[row_idx][:, col_idx] + return result, row_idx, col_idx + + +@validate_params( + { + "shape": [tuple], + "n_clusters": [Interval(Integral, 1, None, closed="left")], + "noise": [Interval(Real, 0, None, closed="left")], + "minval": [Interval(Real, None, None, closed="neither")], + "maxval": [Interval(Real, None, None, closed="neither")], + "shuffle": ["boolean"], + "random_state": ["random_state"], + }, + prefer_skip_nested_validation=True, +) +def make_biclusters( + shape, + n_clusters, + *, + noise=0.0, + minval=10, + maxval=100, + shuffle=True, + random_state=None, +): + """Generate a constant block diagonal structure array for biclustering. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + shape : tuple of shape (n_rows, n_cols) + The shape of the result. + + n_clusters : int + The number of biclusters. + + noise : float, default=0.0 + The standard deviation of the gaussian noise. + + minval : float, default=10 + Minimum value of a bicluster. + + maxval : float, default=100 + Maximum value of a bicluster. + + shuffle : bool, default=True + Shuffle the samples. + + random_state : int, RandomState instance or None, default=None + Determines random number generation for dataset creation. Pass an int + for reproducible output across multiple function calls. + See :term:`Glossary `. + + Returns + ------- + X : ndarray of shape `shape` + The generated array. + + rows : ndarray of shape (n_clusters, X.shape[0]) + The indicators for cluster membership of each row. + + cols : ndarray of shape (n_clusters, X.shape[1]) + The indicators for cluster membership of each column. + + See Also + -------- + make_checkerboard: Generate an array with block checkerboard structure for + biclustering. + + References + ---------- + + .. [1] Dhillon, I. S. (2001, August). Co-clustering documents and + words using bipartite spectral graph partitioning. In Proceedings + of the seventh ACM SIGKDD international conference on Knowledge + discovery and data mining (pp. 269-274). ACM. + """ + generator = check_random_state(random_state) + n_rows, n_cols = shape + consts = generator.uniform(minval, maxval, n_clusters) + + # row and column clusters of approximately equal sizes + row_sizes = generator.multinomial(n_rows, np.repeat(1.0 / n_clusters, n_clusters)) + col_sizes = generator.multinomial(n_cols, np.repeat(1.0 / n_clusters, n_clusters)) + + row_labels = np.hstack( + [np.repeat(val, rep) for val, rep in zip(range(n_clusters), row_sizes)] + ) + col_labels = np.hstack( + [np.repeat(val, rep) for val, rep in zip(range(n_clusters), col_sizes)] + ) + + result = np.zeros(shape, dtype=np.float64) + for i in range(n_clusters): + selector = np.outer(row_labels == i, col_labels == i) + result[selector] += consts[i] + + if noise > 0: + result += generator.normal(scale=noise, size=result.shape) + + if shuffle: + result, row_idx, col_idx = _shuffle(result, random_state) + row_labels = row_labels[row_idx] + col_labels = col_labels[col_idx] + + rows = np.vstack([row_labels == c for c in range(n_clusters)]) + cols = np.vstack([col_labels == c for c in range(n_clusters)]) + + return result, rows, cols + + +@validate_params( + { + "shape": [tuple], + "n_clusters": [Interval(Integral, 1, None, closed="left"), "array-like"], + "noise": [Interval(Real, 0, None, closed="left")], + "minval": [Interval(Real, None, None, closed="neither")], + "maxval": [Interval(Real, None, None, closed="neither")], + "shuffle": ["boolean"], + "random_state": ["random_state"], + }, + prefer_skip_nested_validation=True, +) +def make_checkerboard( + shape, + n_clusters, + *, + noise=0.0, + minval=10, + maxval=100, + shuffle=True, + random_state=None, +): + """Generate an array with block checkerboard structure for biclustering. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + shape : tuple of shape (n_rows, n_cols) + The shape of the result. + + n_clusters : int or array-like or shape (n_row_clusters, n_column_clusters) + The number of row and column clusters. + + noise : float, default=0.0 + The standard deviation of the gaussian noise. + + minval : float, default=10 + Minimum value of a bicluster. + + maxval : float, default=100 + Maximum value of a bicluster. + + shuffle : bool, default=True + Shuffle the samples. + + random_state : int, RandomState instance or None, default=None + Determines random number generation for dataset creation. Pass an int + for reproducible output across multiple function calls. + See :term:`Glossary `. + + Returns + ------- + X : ndarray of shape `shape` + The generated array. + + rows : ndarray of shape (n_clusters, X.shape[0]) + The indicators for cluster membership of each row. + + cols : ndarray of shape (n_clusters, X.shape[1]) + The indicators for cluster membership of each column. + + See Also + -------- + make_biclusters : Generate an array with constant block diagonal structure + for biclustering. + + References + ---------- + .. [1] Kluger, Y., Basri, R., Chang, J. T., & Gerstein, M. (2003). + Spectral biclustering of microarray data: coclustering genes + and conditions. Genome research, 13(4), 703-716. + """ + generator = check_random_state(random_state) + + if hasattr(n_clusters, "__len__"): + n_row_clusters, n_col_clusters = n_clusters + else: + n_row_clusters = n_col_clusters = n_clusters + + # row and column clusters of approximately equal sizes + n_rows, n_cols = shape + row_sizes = generator.multinomial( + n_rows, np.repeat(1.0 / n_row_clusters, n_row_clusters) + ) + col_sizes = generator.multinomial( + n_cols, np.repeat(1.0 / n_col_clusters, n_col_clusters) + ) + + row_labels = np.hstack( + [np.repeat(val, rep) for val, rep in zip(range(n_row_clusters), row_sizes)] + ) + col_labels = np.hstack( + [np.repeat(val, rep) for val, rep in zip(range(n_col_clusters), col_sizes)] + ) + + result = np.zeros(shape, dtype=np.float64) + for i in range(n_row_clusters): + for j in range(n_col_clusters): + selector = np.outer(row_labels == i, col_labels == j) + result[selector] += generator.uniform(minval, maxval) + + if noise > 0: + result += generator.normal(scale=noise, size=result.shape) + + if shuffle: + result, row_idx, col_idx = _shuffle(result, random_state) + row_labels = row_labels[row_idx] + col_labels = col_labels[col_idx] + + rows = np.vstack( + [ + row_labels == label + for label in range(n_row_clusters) + for _ in range(n_col_clusters) + ] + ) + cols = np.vstack( + [ + col_labels == label + for _ in range(n_row_clusters) + for label in range(n_col_clusters) + ] + ) + + return result, rows, cols diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/_species_distributions.py b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/_species_distributions.py new file mode 100644 index 0000000000000000000000000000000000000000..7979604afab0e8aa8b64b8a65daa32a9ec2438b3 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/_species_distributions.py @@ -0,0 +1,273 @@ +""" +============================= +Species distribution dataset +============================= + +This dataset represents the geographic distribution of species. +The dataset is provided by Phillips et. al. (2006). + +The two species are: + + - `"Bradypus variegatus" + `_ , + the Brown-throated Sloth. + + - `"Microryzomys minutus" + `_ , + also known as the Forest Small Rice Rat, a rodent that lives in Peru, + Colombia, Ecuador, Peru, and Venezuela. + +References +---------- + +`"Maximum entropy modeling of species geographic distributions" +`_ S. J. Phillips, +R. P. Anderson, R. E. Schapire - Ecological Modelling, 190:231-259, 2006. + +Notes +----- + +For an example of using this dataset, see +:ref:`examples/applications/plot_species_distribution_modeling.py +`. +""" + +# Authors: Peter Prettenhofer +# Jake Vanderplas +# +# License: BSD 3 clause + +import logging +from io import BytesIO +from os import PathLike, makedirs, remove +from os.path import exists + +import joblib +import numpy as np + +from ..utils import Bunch +from ..utils._param_validation import validate_params +from . import get_data_home +from ._base import RemoteFileMetadata, _fetch_remote, _pkl_filepath + +# The original data can be found at: +# https://biodiversityinformatics.amnh.org/open_source/maxent/samples.zip +SAMPLES = RemoteFileMetadata( + filename="samples.zip", + url="https://ndownloader.figshare.com/files/5976075", + checksum="abb07ad284ac50d9e6d20f1c4211e0fd3c098f7f85955e89d321ee8efe37ac28", +) + +# The original data can be found at: +# https://biodiversityinformatics.amnh.org/open_source/maxent/coverages.zip +COVERAGES = RemoteFileMetadata( + filename="coverages.zip", + url="https://ndownloader.figshare.com/files/5976078", + checksum="4d862674d72e79d6cee77e63b98651ec7926043ba7d39dcb31329cf3f6073807", +) + +DATA_ARCHIVE_NAME = "species_coverage.pkz" + + +logger = logging.getLogger(__name__) + + +def _load_coverage(F, header_length=6, dtype=np.int16): + """Load a coverage file from an open file object. + + This will return a numpy array of the given dtype + """ + header = [F.readline() for _ in range(header_length)] + make_tuple = lambda t: (t.split()[0], float(t.split()[1])) + header = dict([make_tuple(line) for line in header]) + + M = np.loadtxt(F, dtype=dtype) + nodata = int(header[b"NODATA_value"]) + if nodata != -9999: + M[nodata] = -9999 + return M + + +def _load_csv(F): + """Load csv file. + + Parameters + ---------- + F : file object + CSV file open in byte mode. + + Returns + ------- + rec : np.ndarray + record array representing the data + """ + names = F.readline().decode("ascii").strip().split(",") + + rec = np.loadtxt(F, skiprows=0, delimiter=",", dtype="S22,f4,f4") + rec.dtype.names = names + return rec + + +def construct_grids(batch): + """Construct the map grid from the batch object + + Parameters + ---------- + batch : Batch object + The object returned by :func:`fetch_species_distributions` + + Returns + ------- + (xgrid, ygrid) : 1-D arrays + The grid corresponding to the values in batch.coverages + """ + # x,y coordinates for corner cells + xmin = batch.x_left_lower_corner + batch.grid_size + xmax = xmin + (batch.Nx * batch.grid_size) + ymin = batch.y_left_lower_corner + batch.grid_size + ymax = ymin + (batch.Ny * batch.grid_size) + + # x coordinates of the grid cells + xgrid = np.arange(xmin, xmax, batch.grid_size) + # y coordinates of the grid cells + ygrid = np.arange(ymin, ymax, batch.grid_size) + + return (xgrid, ygrid) + + +@validate_params( + {"data_home": [str, PathLike, None], "download_if_missing": ["boolean"]}, + prefer_skip_nested_validation=True, +) +def fetch_species_distributions(*, data_home=None, download_if_missing=True): + """Loader for species distribution dataset from Phillips et. al. (2006). + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + data_home : str or path-like, default=None + Specify another download and cache folder for the datasets. By default + all scikit-learn data is stored in '~/scikit_learn_data' subfolders. + + download_if_missing : bool, default=True + If False, raise an OSError if the data is not locally available + instead of trying to download the data from the source site. + + Returns + ------- + data : :class:`~sklearn.utils.Bunch` + Dictionary-like object, with the following attributes. + + coverages : array, shape = [14, 1592, 1212] + These represent the 14 features measured + at each point of the map grid. + The latitude/longitude values for the grid are discussed below. + Missing data is represented by the value -9999. + train : record array, shape = (1624,) + The training points for the data. Each point has three fields: + + - train['species'] is the species name + - train['dd long'] is the longitude, in degrees + - train['dd lat'] is the latitude, in degrees + test : record array, shape = (620,) + The test points for the data. Same format as the training data. + Nx, Ny : integers + The number of longitudes (x) and latitudes (y) in the grid + x_left_lower_corner, y_left_lower_corner : floats + The (x,y) position of the lower-left corner, in degrees + grid_size : float + The spacing between points of the grid, in degrees + + Notes + ----- + + This dataset represents the geographic distribution of species. + The dataset is provided by Phillips et. al. (2006). + + The two species are: + + - `"Bradypus variegatus" + `_ , + the Brown-throated Sloth. + + - `"Microryzomys minutus" + `_ , + also known as the Forest Small Rice Rat, a rodent that lives in Peru, + Colombia, Ecuador, Peru, and Venezuela. + + - For an example of using this dataset with scikit-learn, see + :ref:`examples/applications/plot_species_distribution_modeling.py + `. + + References + ---------- + + * `"Maximum entropy modeling of species geographic distributions" + `_ + S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling, + 190:231-259, 2006. + + Examples + -------- + >>> from sklearn.datasets import fetch_species_distributions + >>> species = fetch_species_distributions() + >>> species.train[:5] + array([(b'microryzomys_minutus', -64.7 , -17.85 ), + (b'microryzomys_minutus', -67.8333, -16.3333), + (b'microryzomys_minutus', -67.8833, -16.3 ), + (b'microryzomys_minutus', -67.8 , -16.2667), + (b'microryzomys_minutus', -67.9833, -15.9 )], + dtype=[('species', 'S22'), ('dd long', ' +# Lars Buitinck +# Olivier Grisel +# License: BSD 3 clause + +import os.path +from contextlib import closing +from numbers import Integral + +import numpy as np +import scipy.sparse as sp + +from .. import __version__ +from ..utils import IS_PYPY, check_array +from ..utils._param_validation import HasMethods, Interval, StrOptions, validate_params + +if not IS_PYPY: + from ._svmlight_format_fast import ( + _dump_svmlight_file, + _load_svmlight_file, + ) +else: + + def _load_svmlight_file(*args, **kwargs): + raise NotImplementedError( + "load_svmlight_file is currently not " + "compatible with PyPy (see " + "https://github.com/scikit-learn/scikit-learn/issues/11543 " + "for the status updates)." + ) + + +@validate_params( + { + "f": [ + str, + Interval(Integral, 0, None, closed="left"), + os.PathLike, + HasMethods("read"), + ], + "n_features": [Interval(Integral, 1, None, closed="left"), None], + "dtype": "no_validation", # delegate validation to numpy + "multilabel": ["boolean"], + "zero_based": ["boolean", StrOptions({"auto"})], + "query_id": ["boolean"], + "offset": [Interval(Integral, 0, None, closed="left")], + "length": [Integral], + }, + prefer_skip_nested_validation=True, +) +def load_svmlight_file( + f, + *, + n_features=None, + dtype=np.float64, + multilabel=False, + zero_based="auto", + query_id=False, + offset=0, + length=-1, +): + """Load datasets in the svmlight / libsvm format into sparse CSR matrix. + + This format is a text-based format, with one sample per line. It does + not store zero valued features hence is suitable for sparse dataset. + + The first element of each line can be used to store a target variable + to predict. + + This format is used as the default format for both svmlight and the + libsvm command line programs. + + Parsing a text based source can be expensive. When repeatedly + working on the same dataset, it is recommended to wrap this + loader with joblib.Memory.cache to store a memmapped backup of the + CSR results of the first call and benefit from the near instantaneous + loading of memmapped structures for the subsequent calls. + + In case the file contains a pairwise preference constraint (known + as "qid" in the svmlight format) these are ignored unless the + query_id parameter is set to True. These pairwise preference + constraints can be used to constraint the combination of samples + when using pairwise loss functions (as is the case in some + learning to rank problems) so that only pairs with the same + query_id value are considered. + + This implementation is written in Cython and is reasonably fast. + However, a faster API-compatible loader is also available at: + + https://github.com/mblondel/svmlight-loader + + Parameters + ---------- + f : str, path-like, file-like or int + (Path to) a file to load. If a path ends in ".gz" or ".bz2", it will + be uncompressed on the fly. If an integer is passed, it is assumed to + be a file descriptor. A file-like or file descriptor will not be closed + by this function. A file-like object must be opened in binary mode. + + .. versionchanged:: 1.2 + Path-like objects are now accepted. + + n_features : int, default=None + The number of features to use. If None, it will be inferred. This + argument is useful to load several files that are subsets of a + bigger sliced dataset: each subset might not have examples of + every feature, hence the inferred shape might vary from one + slice to another. + n_features is only required if ``offset`` or ``length`` are passed a + non-default value. + + dtype : numpy data type, default=np.float64 + Data type of dataset to be loaded. This will be the data type of the + output numpy arrays ``X`` and ``y``. + + multilabel : bool, default=False + Samples may have several labels each (see + https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html). + + zero_based : bool or "auto", default="auto" + Whether column indices in f are zero-based (True) or one-based + (False). If column indices are one-based, they are transformed to + zero-based to match Python/NumPy conventions. + If set to "auto", a heuristic check is applied to determine this from + the file contents. Both kinds of files occur "in the wild", but they + are unfortunately not self-identifying. Using "auto" or True should + always be safe when no ``offset`` or ``length`` is passed. + If ``offset`` or ``length`` are passed, the "auto" mode falls back + to ``zero_based=True`` to avoid having the heuristic check yield + inconsistent results on different segments of the file. + + query_id : bool, default=False + If True, will return the query_id array for each file. + + offset : int, default=0 + Ignore the offset first bytes by seeking forward, then + discarding the following bytes up until the next new line + character. + + length : int, default=-1 + If strictly positive, stop reading any new line of data once the + position in the file has reached the (offset + length) bytes threshold. + + Returns + ------- + X : scipy.sparse matrix of shape (n_samples, n_features) + The data matrix. + + y : ndarray of shape (n_samples,), or a list of tuples of length n_samples + The target. It is a list of tuples when ``multilabel=True``, else a + ndarray. + + query_id : array of shape (n_samples,) + The query_id for each sample. Only returned when query_id is set to + True. + + See Also + -------- + load_svmlight_files : Similar function for loading multiple files in this + format, enforcing the same number of features/columns on all of them. + + Examples + -------- + To use joblib.Memory to cache the svmlight file:: + + from joblib import Memory + from .datasets import load_svmlight_file + mem = Memory("./mycache") + + @mem.cache + def get_data(): + data = load_svmlight_file("mysvmlightfile") + return data[0], data[1] + + X, y = get_data() + """ + return tuple( + load_svmlight_files( + [f], + n_features=n_features, + dtype=dtype, + multilabel=multilabel, + zero_based=zero_based, + query_id=query_id, + offset=offset, + length=length, + ) + ) + + +def _gen_open(f): + if isinstance(f, int): # file descriptor + return open(f, "rb", closefd=False) + elif isinstance(f, os.PathLike): + f = os.fspath(f) + elif not isinstance(f, str): + raise TypeError("expected {str, int, path-like, file-like}, got %s" % type(f)) + + _, ext = os.path.splitext(f) + if ext == ".gz": + import gzip + + return gzip.open(f, "rb") + elif ext == ".bz2": + from bz2 import BZ2File + + return BZ2File(f, "rb") + else: + return open(f, "rb") + + +def _open_and_load(f, dtype, multilabel, zero_based, query_id, offset=0, length=-1): + if hasattr(f, "read"): + actual_dtype, data, ind, indptr, labels, query = _load_svmlight_file( + f, dtype, multilabel, zero_based, query_id, offset, length + ) + else: + with closing(_gen_open(f)) as f: + actual_dtype, data, ind, indptr, labels, query = _load_svmlight_file( + f, dtype, multilabel, zero_based, query_id, offset, length + ) + + # convert from array.array, give data the right dtype + if not multilabel: + labels = np.frombuffer(labels, np.float64) + data = np.frombuffer(data, actual_dtype) + indices = np.frombuffer(ind, np.longlong) + indptr = np.frombuffer(indptr, dtype=np.longlong) # never empty + query = np.frombuffer(query, np.int64) + + data = np.asarray(data, dtype=dtype) # no-op for float{32,64} + return data, indices, indptr, labels, query + + +@validate_params( + { + "files": [ + "array-like", + str, + os.PathLike, + HasMethods("read"), + Interval(Integral, 0, None, closed="left"), + ], + "n_features": [Interval(Integral, 1, None, closed="left"), None], + "dtype": "no_validation", # delegate validation to numpy + "multilabel": ["boolean"], + "zero_based": ["boolean", StrOptions({"auto"})], + "query_id": ["boolean"], + "offset": [Interval(Integral, 0, None, closed="left")], + "length": [Integral], + }, + prefer_skip_nested_validation=True, +) +def load_svmlight_files( + files, + *, + n_features=None, + dtype=np.float64, + multilabel=False, + zero_based="auto", + query_id=False, + offset=0, + length=-1, +): + """Load dataset from multiple files in SVMlight format. + + This function is equivalent to mapping load_svmlight_file over a list of + files, except that the results are concatenated into a single, flat list + and the samples vectors are constrained to all have the same number of + features. + + In case the file contains a pairwise preference constraint (known + as "qid" in the svmlight format) these are ignored unless the + query_id parameter is set to True. These pairwise preference + constraints can be used to constraint the combination of samples + when using pairwise loss functions (as is the case in some + learning to rank problems) so that only pairs with the same + query_id value are considered. + + Parameters + ---------- + files : array-like, dtype=str, path-like, file-like or int + (Paths of) files to load. If a path ends in ".gz" or ".bz2", it will + be uncompressed on the fly. If an integer is passed, it is assumed to + be a file descriptor. File-likes and file descriptors will not be + closed by this function. File-like objects must be opened in binary + mode. + + .. versionchanged:: 1.2 + Path-like objects are now accepted. + + n_features : int, default=None + The number of features to use. If None, it will be inferred from the + maximum column index occurring in any of the files. + + This can be set to a higher value than the actual number of features + in any of the input files, but setting it to a lower value will cause + an exception to be raised. + + dtype : numpy data type, default=np.float64 + Data type of dataset to be loaded. This will be the data type of the + output numpy arrays ``X`` and ``y``. + + multilabel : bool, default=False + Samples may have several labels each (see + https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html). + + zero_based : bool or "auto", default="auto" + Whether column indices in f are zero-based (True) or one-based + (False). If column indices are one-based, they are transformed to + zero-based to match Python/NumPy conventions. + If set to "auto", a heuristic check is applied to determine this from + the file contents. Both kinds of files occur "in the wild", but they + are unfortunately not self-identifying. Using "auto" or True should + always be safe when no offset or length is passed. + If offset or length are passed, the "auto" mode falls back + to zero_based=True to avoid having the heuristic check yield + inconsistent results on different segments of the file. + + query_id : bool, default=False + If True, will return the query_id array for each file. + + offset : int, default=0 + Ignore the offset first bytes by seeking forward, then + discarding the following bytes up until the next new line + character. + + length : int, default=-1 + If strictly positive, stop reading any new line of data once the + position in the file has reached the (offset + length) bytes threshold. + + Returns + ------- + [X1, y1, ..., Xn, yn] or [X1, y1, q1, ..., Xn, yn, qn]: list of arrays + Each (Xi, yi) pair is the result from load_svmlight_file(files[i]). + If query_id is set to True, this will return instead (Xi, yi, qi) + triplets. + + See Also + -------- + load_svmlight_file: Similar function for loading a single file in this + format. + + Notes + ----- + When fitting a model to a matrix X_train and evaluating it against a + matrix X_test, it is essential that X_train and X_test have the same + number of features (X_train.shape[1] == X_test.shape[1]). This may not + be the case if you load the files individually with load_svmlight_file. + """ + if (offset != 0 or length > 0) and zero_based == "auto": + # disable heuristic search to avoid getting inconsistent results on + # different segments of the file + zero_based = True + + if (offset != 0 or length > 0) and n_features is None: + raise ValueError("n_features is required when offset or length is specified.") + + r = [ + _open_and_load( + f, + dtype, + multilabel, + bool(zero_based), + bool(query_id), + offset=offset, + length=length, + ) + for f in files + ] + + if ( + zero_based is False + or zero_based == "auto" + and all(len(tmp[1]) and np.min(tmp[1]) > 0 for tmp in r) + ): + for _, indices, _, _, _ in r: + indices -= 1 + + n_f = max(ind[1].max() if len(ind[1]) else 0 for ind in r) + 1 + + if n_features is None: + n_features = n_f + elif n_features < n_f: + raise ValueError( + "n_features was set to {}, but input file contains {} features".format( + n_features, n_f + ) + ) + + result = [] + for data, indices, indptr, y, query_values in r: + shape = (indptr.shape[0] - 1, n_features) + X = sp.csr_matrix((data, indices, indptr), shape) + X.sort_indices() + result += X, y + if query_id: + result.append(query_values) + + return result + + +def _dump_svmlight(X, y, f, multilabel, one_based, comment, query_id): + if comment: + f.write( + ( + "# Generated by dump_svmlight_file from scikit-learn %s\n" % __version__ + ).encode() + ) + f.write( + ("# Column indices are %s-based\n" % ["zero", "one"][one_based]).encode() + ) + + f.write(b"#\n") + f.writelines(b"# %s\n" % line for line in comment.splitlines()) + X_is_sp = sp.issparse(X) + y_is_sp = sp.issparse(y) + if not multilabel and not y_is_sp: + y = y[:, np.newaxis] + _dump_svmlight_file( + X, + y, + f, + multilabel, + one_based, + query_id, + X_is_sp, + y_is_sp, + ) + + +@validate_params( + { + "X": ["array-like", "sparse matrix"], + "y": ["array-like", "sparse matrix"], + "f": [str, HasMethods(["write"])], + "zero_based": ["boolean"], + "comment": [str, bytes, None], + "query_id": ["array-like", None], + "multilabel": ["boolean"], + }, + prefer_skip_nested_validation=True, +) +def dump_svmlight_file( + X, + y, + f, + *, + zero_based=True, + comment=None, + query_id=None, + multilabel=False, +): + """Dump the dataset in svmlight / libsvm file format. + + This format is a text-based format, with one sample per line. It does + not store zero valued features hence is suitable for sparse dataset. + + The first element of each line can be used to store a target variable + to predict. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training vectors, where `n_samples` is the number of samples and + `n_features` is the number of features. + + y : {array-like, sparse matrix}, shape = (n_samples,) or (n_samples, n_labels) + Target values. Class labels must be an + integer or float, or array-like objects of integer or float for + multilabel classifications. + + f : str or file-like in binary mode + If string, specifies the path that will contain the data. + If file-like, data will be written to f. f should be opened in binary + mode. + + zero_based : bool, default=True + Whether column indices should be written zero-based (True) or one-based + (False). + + comment : str or bytes, default=None + Comment to insert at the top of the file. This should be either a + Unicode string, which will be encoded as UTF-8, or an ASCII byte + string. + If a comment is given, then it will be preceded by one that identifies + the file as having been dumped by scikit-learn. Note that not all + tools grok comments in SVMlight files. + + query_id : array-like of shape (n_samples,), default=None + Array containing pairwise preference constraints (qid in svmlight + format). + + multilabel : bool, default=False + Samples may have several labels each (see + https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html). + + .. versionadded:: 0.17 + parameter `multilabel` to support multilabel datasets. + + Examples + -------- + >>> from sklearn.datasets import dump_svmlight_file, make_classification + >>> X, y = make_classification(random_state=0) + >>> output_file = "my_dataset.svmlight" + >>> dump_svmlight_file(X, y, output_file) # doctest: +SKIP + """ + if comment is not None: + # Convert comment string to list of lines in UTF-8. + # If a byte string is passed, then check whether it's ASCII; + # if a user wants to get fancy, they'll have to decode themselves. + if isinstance(comment, bytes): + comment.decode("ascii") # just for the exception + else: + comment = comment.encode("utf-8") + if b"\0" in comment: + raise ValueError("comment string contains NUL byte") + + yval = check_array(y, accept_sparse="csr", ensure_2d=False) + if sp.issparse(yval): + if yval.shape[1] != 1 and not multilabel: + raise ValueError( + "expected y of shape (n_samples, 1), got %r" % (yval.shape,) + ) + else: + if yval.ndim != 1 and not multilabel: + raise ValueError("expected y of shape (n_samples,), got %r" % (yval.shape,)) + + Xval = check_array(X, accept_sparse="csr") + if Xval.shape[0] != yval.shape[0]: + raise ValueError( + "X.shape[0] and y.shape[0] should be the same, got %r and %r instead." + % (Xval.shape[0], yval.shape[0]) + ) + + # We had some issues with CSR matrices with unsorted indices (e.g. #1501), + # so sort them here, but first make sure we don't modify the user's X. + # TODO We can do this cheaper; sorted_indices copies the whole matrix. + if yval is y and hasattr(yval, "sorted_indices"): + y = yval.sorted_indices() + else: + y = yval + if hasattr(y, "sort_indices"): + y.sort_indices() + + if Xval is X and hasattr(Xval, "sorted_indices"): + X = Xval.sorted_indices() + else: + X = Xval + if hasattr(X, "sort_indices"): + X.sort_indices() + + if query_id is None: + # NOTE: query_id is passed to Cython functions using a fused type on query_id. + # Yet as of Cython>=3.0, memory views can't be None otherwise the runtime + # would not known which concrete implementation to dispatch the Python call to. + # TODO: simplify interfaces and implementations in _svmlight_format_fast.pyx. + query_id = np.array([], dtype=np.int32) + else: + query_id = np.asarray(query_id) + if query_id.shape[0] != y.shape[0]: + raise ValueError( + "expected query_id of shape (n_samples,), got %r" % (query_id.shape,) + ) + + one_based = not zero_based + + if hasattr(f, "write"): + _dump_svmlight(X, y, f, multilabel, one_based, comment, query_id) + else: + with open(f, "wb") as f: + _dump_svmlight(X, y, f, multilabel, one_based, comment, query_id) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/_twenty_newsgroups.py b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/_twenty_newsgroups.py new file mode 100644 index 0000000000000000000000000000000000000000..22ac716871cc284adc3616a8b25e484ab03f0d7a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/_twenty_newsgroups.py @@ -0,0 +1,561 @@ +"""Caching loader for the 20 newsgroups text classification dataset. + + +The description of the dataset is available on the official website at: + + http://people.csail.mit.edu/jrennie/20Newsgroups/ + +Quoting the introduction: + + The 20 Newsgroups data set is a collection of approximately 20,000 + newsgroup documents, partitioned (nearly) evenly across 20 different + newsgroups. To the best of my knowledge, it was originally collected + by Ken Lang, probably for his Newsweeder: Learning to filter netnews + paper, though he does not explicitly mention this collection. The 20 + newsgroups collection has become a popular data set for experiments + in text applications of machine learning techniques, such as text + classification and text clustering. + +This dataset loader will download the recommended "by date" variant of the +dataset and which features a point in time split between the train and +test sets. The compressed dataset size is around 14 Mb compressed. Once +uncompressed the train set is 52 MB and the test set is 34 MB. +""" +# Copyright (c) 2011 Olivier Grisel +# License: BSD 3 clause + +import codecs +import logging +import os +import pickle +import re +import shutil +import tarfile +from contextlib import suppress + +import joblib +import numpy as np +import scipy.sparse as sp + +from .. import preprocessing +from ..feature_extraction.text import CountVectorizer +from ..utils import Bunch, check_random_state +from ..utils._param_validation import StrOptions, validate_params +from . import get_data_home, load_files +from ._base import ( + RemoteFileMetadata, + _convert_data_dataframe, + _fetch_remote, + _pkl_filepath, + load_descr, +) + +logger = logging.getLogger(__name__) + +# The original data can be found at: +# https://people.csail.mit.edu/jrennie/20Newsgroups/20news-bydate.tar.gz +ARCHIVE = RemoteFileMetadata( + filename="20news-bydate.tar.gz", + url="https://ndownloader.figshare.com/files/5975967", + checksum="8f1b2514ca22a5ade8fbb9cfa5727df95fa587f4c87b786e15c759fa66d95610", +) + +CACHE_NAME = "20news-bydate.pkz" +TRAIN_FOLDER = "20news-bydate-train" +TEST_FOLDER = "20news-bydate-test" + + +def _download_20newsgroups(target_dir, cache_path): + """Download the 20 newsgroups data and stored it as a zipped pickle.""" + train_path = os.path.join(target_dir, TRAIN_FOLDER) + test_path = os.path.join(target_dir, TEST_FOLDER) + + os.makedirs(target_dir, exist_ok=True) + + logger.info("Downloading dataset from %s (14 MB)", ARCHIVE.url) + archive_path = _fetch_remote(ARCHIVE, dirname=target_dir) + + logger.debug("Decompressing %s", archive_path) + tarfile.open(archive_path, "r:gz").extractall(path=target_dir) + + with suppress(FileNotFoundError): + os.remove(archive_path) + + # Store a zipped pickle + cache = dict( + train=load_files(train_path, encoding="latin1"), + test=load_files(test_path, encoding="latin1"), + ) + compressed_content = codecs.encode(pickle.dumps(cache), "zlib_codec") + with open(cache_path, "wb") as f: + f.write(compressed_content) + + shutil.rmtree(target_dir) + return cache + + +def strip_newsgroup_header(text): + """ + Given text in "news" format, strip the headers, by removing everything + before the first blank line. + + Parameters + ---------- + text : str + The text from which to remove the signature block. + """ + _before, _blankline, after = text.partition("\n\n") + return after + + +_QUOTE_RE = re.compile( + r"(writes in|writes:|wrote:|says:|said:" r"|^In article|^Quoted from|^\||^>)" +) + + +def strip_newsgroup_quoting(text): + """ + Given text in "news" format, strip lines beginning with the quote + characters > or |, plus lines that often introduce a quoted section + (for example, because they contain the string 'writes:'.) + + Parameters + ---------- + text : str + The text from which to remove the signature block. + """ + good_lines = [line for line in text.split("\n") if not _QUOTE_RE.search(line)] + return "\n".join(good_lines) + + +def strip_newsgroup_footer(text): + """ + Given text in "news" format, attempt to remove a signature block. + + As a rough heuristic, we assume that signatures are set apart by either + a blank line or a line made of hyphens, and that it is the last such line + in the file (disregarding blank lines at the end). + + Parameters + ---------- + text : str + The text from which to remove the signature block. + """ + lines = text.strip().split("\n") + for line_num in range(len(lines) - 1, -1, -1): + line = lines[line_num] + if line.strip().strip("-") == "": + break + + if line_num > 0: + return "\n".join(lines[:line_num]) + else: + return text + + +@validate_params( + { + "data_home": [str, os.PathLike, None], + "subset": [StrOptions({"train", "test", "all"})], + "categories": ["array-like", None], + "shuffle": ["boolean"], + "random_state": ["random_state"], + "remove": [tuple], + "download_if_missing": ["boolean"], + "return_X_y": ["boolean"], + }, + prefer_skip_nested_validation=True, +) +def fetch_20newsgroups( + *, + data_home=None, + subset="train", + categories=None, + shuffle=True, + random_state=42, + remove=(), + download_if_missing=True, + return_X_y=False, +): + """Load the filenames and data from the 20 newsgroups dataset \ +(classification). + + Download it if necessary. + + ================= ========== + Classes 20 + Samples total 18846 + Dimensionality 1 + Features text + ================= ========== + + Read more in the :ref:`User Guide <20newsgroups_dataset>`. + + Parameters + ---------- + data_home : str or path-like, default=None + Specify a download and cache folder for the datasets. If None, + all scikit-learn data is stored in '~/scikit_learn_data' subfolders. + + subset : {'train', 'test', 'all'}, default='train' + Select the dataset to load: 'train' for the training set, 'test' + for the test set, 'all' for both, with shuffled ordering. + + categories : array-like, dtype=str, default=None + If None (default), load all the categories. + If not None, list of category names to load (other categories + ignored). + + shuffle : bool, default=True + Whether or not to shuffle the data: might be important for models that + make the assumption that the samples are independent and identically + distributed (i.i.d.), such as stochastic gradient descent. + + random_state : int, RandomState instance or None, default=42 + Determines random number generation for dataset shuffling. Pass an int + for reproducible output across multiple function calls. + See :term:`Glossary `. + + remove : tuple, default=() + May contain any subset of ('headers', 'footers', 'quotes'). Each of + these are kinds of text that will be detected and removed from the + newsgroup posts, preventing classifiers from overfitting on + metadata. + + 'headers' removes newsgroup headers, 'footers' removes blocks at the + ends of posts that look like signatures, and 'quotes' removes lines + that appear to be quoting another post. + + 'headers' follows an exact standard; the other filters are not always + correct. + + download_if_missing : bool, default=True + If False, raise an OSError if the data is not locally available + instead of trying to download the data from the source site. + + return_X_y : bool, default=False + If True, returns `(data.data, data.target)` instead of a Bunch + object. + + .. versionadded:: 0.22 + + Returns + ------- + bunch : :class:`~sklearn.utils.Bunch` + Dictionary-like object, with the following attributes. + + data : list of shape (n_samples,) + The data list to learn. + target: ndarray of shape (n_samples,) + The target labels. + filenames: list of shape (n_samples,) + The path to the location of the data. + DESCR: str + The full description of the dataset. + target_names: list of shape (n_classes,) + The names of target classes. + + (data, target) : tuple if `return_X_y=True` + A tuple of two ndarrays. The first contains a 2D array of shape + (n_samples, n_classes) with each row representing one sample and each + column representing the features. The second array of shape + (n_samples,) contains the target samples. + + .. versionadded:: 0.22 + """ + + data_home = get_data_home(data_home=data_home) + cache_path = _pkl_filepath(data_home, CACHE_NAME) + twenty_home = os.path.join(data_home, "20news_home") + cache = None + if os.path.exists(cache_path): + try: + with open(cache_path, "rb") as f: + compressed_content = f.read() + uncompressed_content = codecs.decode(compressed_content, "zlib_codec") + cache = pickle.loads(uncompressed_content) + except Exception as e: + print(80 * "_") + print("Cache loading failed") + print(80 * "_") + print(e) + + if cache is None: + if download_if_missing: + logger.info("Downloading 20news dataset. This may take a few minutes.") + cache = _download_20newsgroups( + target_dir=twenty_home, cache_path=cache_path + ) + else: + raise OSError("20Newsgroups dataset not found") + + if subset in ("train", "test"): + data = cache[subset] + elif subset == "all": + data_lst = list() + target = list() + filenames = list() + for subset in ("train", "test"): + data = cache[subset] + data_lst.extend(data.data) + target.extend(data.target) + filenames.extend(data.filenames) + + data.data = data_lst + data.target = np.array(target) + data.filenames = np.array(filenames) + + fdescr = load_descr("twenty_newsgroups.rst") + + data.DESCR = fdescr + + if "headers" in remove: + data.data = [strip_newsgroup_header(text) for text in data.data] + if "footers" in remove: + data.data = [strip_newsgroup_footer(text) for text in data.data] + if "quotes" in remove: + data.data = [strip_newsgroup_quoting(text) for text in data.data] + + if categories is not None: + labels = [(data.target_names.index(cat), cat) for cat in categories] + # Sort the categories to have the ordering of the labels + labels.sort() + labels, categories = zip(*labels) + mask = np.isin(data.target, labels) + data.filenames = data.filenames[mask] + data.target = data.target[mask] + # searchsorted to have continuous labels + data.target = np.searchsorted(labels, data.target) + data.target_names = list(categories) + # Use an object array to shuffle: avoids memory copy + data_lst = np.array(data.data, dtype=object) + data_lst = data_lst[mask] + data.data = data_lst.tolist() + + if shuffle: + random_state = check_random_state(random_state) + indices = np.arange(data.target.shape[0]) + random_state.shuffle(indices) + data.filenames = data.filenames[indices] + data.target = data.target[indices] + # Use an object array to shuffle: avoids memory copy + data_lst = np.array(data.data, dtype=object) + data_lst = data_lst[indices] + data.data = data_lst.tolist() + + if return_X_y: + return data.data, data.target + + return data + + +@validate_params( + { + "subset": [StrOptions({"train", "test", "all"})], + "remove": [tuple], + "data_home": [str, os.PathLike, None], + "download_if_missing": ["boolean"], + "return_X_y": ["boolean"], + "normalize": ["boolean"], + "as_frame": ["boolean"], + }, + prefer_skip_nested_validation=True, +) +def fetch_20newsgroups_vectorized( + *, + subset="train", + remove=(), + data_home=None, + download_if_missing=True, + return_X_y=False, + normalize=True, + as_frame=False, +): + """Load and vectorize the 20 newsgroups dataset (classification). + + Download it if necessary. + + This is a convenience function; the transformation is done using the + default settings for + :class:`~sklearn.feature_extraction.text.CountVectorizer`. For more + advanced usage (stopword filtering, n-gram extraction, etc.), combine + fetch_20newsgroups with a custom + :class:`~sklearn.feature_extraction.text.CountVectorizer`, + :class:`~sklearn.feature_extraction.text.HashingVectorizer`, + :class:`~sklearn.feature_extraction.text.TfidfTransformer` or + :class:`~sklearn.feature_extraction.text.TfidfVectorizer`. + + The resulting counts are normalized using + :func:`sklearn.preprocessing.normalize` unless normalize is set to False. + + ================= ========== + Classes 20 + Samples total 18846 + Dimensionality 130107 + Features real + ================= ========== + + Read more in the :ref:`User Guide <20newsgroups_dataset>`. + + Parameters + ---------- + subset : {'train', 'test', 'all'}, default='train' + Select the dataset to load: 'train' for the training set, 'test' + for the test set, 'all' for both, with shuffled ordering. + + remove : tuple, default=() + May contain any subset of ('headers', 'footers', 'quotes'). Each of + these are kinds of text that will be detected and removed from the + newsgroup posts, preventing classifiers from overfitting on + metadata. + + 'headers' removes newsgroup headers, 'footers' removes blocks at the + ends of posts that look like signatures, and 'quotes' removes lines + that appear to be quoting another post. + + data_home : str or path-like, default=None + Specify an download and cache folder for the datasets. If None, + all scikit-learn data is stored in '~/scikit_learn_data' subfolders. + + download_if_missing : bool, default=True + If False, raise an OSError if the data is not locally available + instead of trying to download the data from the source site. + + return_X_y : bool, default=False + If True, returns ``(data.data, data.target)`` instead of a Bunch + object. + + .. versionadded:: 0.20 + + normalize : bool, default=True + If True, normalizes each document's feature vector to unit norm using + :func:`sklearn.preprocessing.normalize`. + + .. versionadded:: 0.22 + + as_frame : bool, default=False + If True, the data is a pandas DataFrame including columns with + appropriate dtypes (numeric, string, or categorical). The target is + a pandas DataFrame or Series depending on the number of + `target_columns`. + + .. versionadded:: 0.24 + + Returns + ------- + bunch : :class:`~sklearn.utils.Bunch` + Dictionary-like object, with the following attributes. + + data: {sparse matrix, dataframe} of shape (n_samples, n_features) + The input data matrix. If ``as_frame`` is `True`, ``data`` is + a pandas DataFrame with sparse columns. + target: {ndarray, series} of shape (n_samples,) + The target labels. If ``as_frame`` is `True`, ``target`` is a + pandas Series. + target_names: list of shape (n_classes,) + The names of target classes. + DESCR: str + The full description of the dataset. + frame: dataframe of shape (n_samples, n_features + 1) + Only present when `as_frame=True`. Pandas DataFrame with ``data`` + and ``target``. + + .. versionadded:: 0.24 + + (data, target) : tuple if ``return_X_y`` is True + `data` and `target` would be of the format defined in the `Bunch` + description above. + + .. versionadded:: 0.20 + """ + data_home = get_data_home(data_home=data_home) + filebase = "20newsgroup_vectorized" + if remove: + filebase += "remove-" + "-".join(remove) + target_file = _pkl_filepath(data_home, filebase + ".pkl") + + # we shuffle but use a fixed seed for the memoization + data_train = fetch_20newsgroups( + data_home=data_home, + subset="train", + categories=None, + shuffle=True, + random_state=12, + remove=remove, + download_if_missing=download_if_missing, + ) + + data_test = fetch_20newsgroups( + data_home=data_home, + subset="test", + categories=None, + shuffle=True, + random_state=12, + remove=remove, + download_if_missing=download_if_missing, + ) + + if os.path.exists(target_file): + try: + X_train, X_test, feature_names = joblib.load(target_file) + except ValueError as e: + raise ValueError( + f"The cached dataset located in {target_file} was fetched " + "with an older scikit-learn version and it is not compatible " + "with the scikit-learn version imported. You need to " + f"manually delete the file: {target_file}." + ) from e + else: + vectorizer = CountVectorizer(dtype=np.int16) + X_train = vectorizer.fit_transform(data_train.data).tocsr() + X_test = vectorizer.transform(data_test.data).tocsr() + feature_names = vectorizer.get_feature_names_out() + + joblib.dump((X_train, X_test, feature_names), target_file, compress=9) + + # the data is stored as int16 for compactness + # but normalize needs floats + if normalize: + X_train = X_train.astype(np.float64) + X_test = X_test.astype(np.float64) + preprocessing.normalize(X_train, copy=False) + preprocessing.normalize(X_test, copy=False) + + target_names = data_train.target_names + + if subset == "train": + data = X_train + target = data_train.target + elif subset == "test": + data = X_test + target = data_test.target + elif subset == "all": + data = sp.vstack((X_train, X_test)).tocsr() + target = np.concatenate((data_train.target, data_test.target)) + + fdescr = load_descr("twenty_newsgroups.rst") + + frame = None + target_name = ["category_class"] + + if as_frame: + frame, data, target = _convert_data_dataframe( + "fetch_20newsgroups_vectorized", + data, + target, + feature_names, + target_names=target_name, + sparse_data=True, + ) + + if return_X_y: + return data, target + + return Bunch( + data=data, + target=target, + frame=frame, + target_names=target_names, + feature_names=feature_names, + DESCR=fdescr, + ) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/descr/breast_cancer.rst b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/descr/breast_cancer.rst new file mode 100644 index 0000000000000000000000000000000000000000..ceabd33e14ddc9086fae77b66c4213aca0d37a83 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/descr/breast_cancer.rst @@ -0,0 +1,122 @@ +.. _breast_cancer_dataset: + +Breast cancer wisconsin (diagnostic) dataset +-------------------------------------------- + +**Data Set Characteristics:** + +:Number of Instances: 569 + +:Number of Attributes: 30 numeric, predictive attributes and the class + +:Attribute Information: + - radius (mean of distances from center to points on the perimeter) + - texture (standard deviation of gray-scale values) + - perimeter + - area + - smoothness (local variation in radius lengths) + - compactness (perimeter^2 / area - 1.0) + - concavity (severity of concave portions of the contour) + - concave points (number of concave portions of the contour) + - symmetry + - fractal dimension ("coastline approximation" - 1) + + The mean, standard error, and "worst" or largest (mean of the three + worst/largest values) of these features were computed for each image, + resulting in 30 features. For instance, field 0 is Mean Radius, field + 10 is Radius SE, field 20 is Worst Radius. + + - class: + - WDBC-Malignant + - WDBC-Benign + +:Summary Statistics: + +===================================== ====== ====== + Min Max +===================================== ====== ====== +radius (mean): 6.981 28.11 +texture (mean): 9.71 39.28 +perimeter (mean): 43.79 188.5 +area (mean): 143.5 2501.0 +smoothness (mean): 0.053 0.163 +compactness (mean): 0.019 0.345 +concavity (mean): 0.0 0.427 +concave points (mean): 0.0 0.201 +symmetry (mean): 0.106 0.304 +fractal dimension (mean): 0.05 0.097 +radius (standard error): 0.112 2.873 +texture (standard error): 0.36 4.885 +perimeter (standard error): 0.757 21.98 +area (standard error): 6.802 542.2 +smoothness (standard error): 0.002 0.031 +compactness (standard error): 0.002 0.135 +concavity (standard error): 0.0 0.396 +concave points (standard error): 0.0 0.053 +symmetry (standard error): 0.008 0.079 +fractal dimension (standard error): 0.001 0.03 +radius (worst): 7.93 36.04 +texture (worst): 12.02 49.54 +perimeter (worst): 50.41 251.2 +area (worst): 185.2 4254.0 +smoothness (worst): 0.071 0.223 +compactness (worst): 0.027 1.058 +concavity (worst): 0.0 1.252 +concave points (worst): 0.0 0.291 +symmetry (worst): 0.156 0.664 +fractal dimension (worst): 0.055 0.208 +===================================== ====== ====== + +:Missing Attribute Values: None + +:Class Distribution: 212 - Malignant, 357 - Benign + +:Creator: Dr. William H. Wolberg, W. Nick Street, Olvi L. Mangasarian + +:Donor: Nick Street + +:Date: November, 1995 + +This is a copy of UCI ML Breast Cancer Wisconsin (Diagnostic) datasets. +https://goo.gl/U2Uwz2 + +Features are computed from a digitized image of a fine needle +aspirate (FNA) of a breast mass. They describe +characteristics of the cell nuclei present in the image. + +Separating plane described above was obtained using +Multisurface Method-Tree (MSM-T) [K. P. Bennett, "Decision Tree +Construction Via Linear Programming." Proceedings of the 4th +Midwest Artificial Intelligence and Cognitive Science Society, +pp. 97-101, 1992], a classification method which uses linear +programming to construct a decision tree. Relevant features +were selected using an exhaustive search in the space of 1-4 +features and 1-3 separating planes. + +The actual linear program used to obtain the separating plane +in the 3-dimensional space is that described in: +[K. P. Bennett and O. L. Mangasarian: "Robust Linear +Programming Discrimination of Two Linearly Inseparable Sets", +Optimization Methods and Software 1, 1992, 23-34]. + +This database is also available through the UW CS ftp server: + +ftp ftp.cs.wisc.edu +cd math-prog/cpo-dataset/machine-learn/WDBC/ + +|details-start| +**References** +|details-split| + +- W.N. Street, W.H. Wolberg and O.L. Mangasarian. Nuclear feature extraction + for breast tumor diagnosis. IS&T/SPIE 1993 International Symposium on + Electronic Imaging: Science and Technology, volume 1905, pages 861-870, + San Jose, CA, 1993. +- O.L. Mangasarian, W.N. Street and W.H. Wolberg. Breast cancer diagnosis and + prognosis via linear programming. Operations Research, 43(4), pages 570-577, + July-August 1995. +- W.H. Wolberg, W.N. Street, and O.L. Mangasarian. Machine learning techniques + to diagnose breast cancer from fine-needle aspirates. Cancer Letters 77 (1994) + 163-171. + +|details-end| diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/descr/california_housing.rst b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/descr/california_housing.rst new file mode 100644 index 0000000000000000000000000000000000000000..33ff111fef5414cc2611577c5973beec89353774 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/descr/california_housing.rst @@ -0,0 +1,46 @@ +.. _california_housing_dataset: + +California Housing dataset +-------------------------- + +**Data Set Characteristics:** + +:Number of Instances: 20640 + +:Number of Attributes: 8 numeric, predictive attributes and the target + +:Attribute Information: + - MedInc median income in block group + - HouseAge median house age in block group + - AveRooms average number of rooms per household + - AveBedrms average number of bedrooms per household + - Population block group population + - AveOccup average number of household members + - Latitude block group latitude + - Longitude block group longitude + +:Missing Attribute Values: None + +This dataset was obtained from the StatLib repository. +https://www.dcc.fc.up.pt/~ltorgo/Regression/cal_housing.html + +The target variable is the median house value for California districts, +expressed in hundreds of thousands of dollars ($100,000). + +This dataset was derived from the 1990 U.S. census, using one row per census +block group. A block group is the smallest geographical unit for which the U.S. +Census Bureau publishes sample data (a block group typically has a population +of 600 to 3,000 people). + +A household is a group of people residing within a home. Since the average +number of rooms and bedrooms in this dataset are provided per household, these +columns may take surprisingly large values for block groups with few households +and many empty houses, such as vacation resorts. + +It can be downloaded/loaded using the +:func:`sklearn.datasets.fetch_california_housing` function. + +.. topic:: References + + - Pace, R. Kelley and Ronald Barry, Sparse Spatial Autoregressions, + Statistics and Probability Letters, 33 (1997) 291-297 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/descr/covtype.rst b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/descr/covtype.rst new file mode 100644 index 0000000000000000000000000000000000000000..f4b752ade17a7f9325ae1e29297fa8b45097075d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/descr/covtype.rst @@ -0,0 +1,30 @@ +.. _covtype_dataset: + +Forest covertypes +----------------- + +The samples in this dataset correspond to 30×30m patches of forest in the US, +collected for the task of predicting each patch's cover type, +i.e. the dominant species of tree. +There are seven covertypes, making this a multiclass classification problem. +Each sample has 54 features, described on the +`dataset's homepage `__. +Some of the features are boolean indicators, +while others are discrete or continuous measurements. + +**Data Set Characteristics:** + +================= ============ +Classes 7 +Samples total 581012 +Dimensionality 54 +Features int +================= ============ + +:func:`sklearn.datasets.fetch_covtype` will load the covertype dataset; +it returns a dictionary-like 'Bunch' object +with the feature matrix in the ``data`` member +and the target values in ``target``. If optional argument 'as_frame' is +set to 'True', it will return ``data`` and ``target`` as pandas +data frame, and there will be an additional member ``frame`` as well. +The dataset will be downloaded from the web if necessary. diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/descr/iris.rst b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/descr/iris.rst new file mode 100644 index 0000000000000000000000000000000000000000..771c92faa98997d530e46354904bc39cb25ba530 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/descr/iris.rst @@ -0,0 +1,67 @@ +.. _iris_dataset: + +Iris plants dataset +-------------------- + +**Data Set Characteristics:** + +:Number of Instances: 150 (50 in each of three classes) +:Number of Attributes: 4 numeric, predictive attributes and the class +:Attribute Information: + - sepal length in cm + - sepal width in cm + - petal length in cm + - petal width in cm + - class: + - Iris-Setosa + - Iris-Versicolour + - Iris-Virginica + +:Summary Statistics: + +============== ==== ==== ======= ===== ==================== + Min Max Mean SD Class Correlation +============== ==== ==== ======= ===== ==================== +sepal length: 4.3 7.9 5.84 0.83 0.7826 +sepal width: 2.0 4.4 3.05 0.43 -0.4194 +petal length: 1.0 6.9 3.76 1.76 0.9490 (high!) +petal width: 0.1 2.5 1.20 0.76 0.9565 (high!) +============== ==== ==== ======= ===== ==================== + +:Missing Attribute Values: None +:Class Distribution: 33.3% for each of 3 classes. +:Creator: R.A. Fisher +:Donor: Michael Marshall (MARSHALL%PLU@io.arc.nasa.gov) +:Date: July, 1988 + +The famous Iris database, first used by Sir R.A. Fisher. The dataset is taken +from Fisher's paper. Note that it's the same as in R, but not as in the UCI +Machine Learning Repository, which has two wrong data points. + +This is perhaps the best known database to be found in the +pattern recognition literature. Fisher's paper is a classic in the field and +is referenced frequently to this day. (See Duda & Hart, for example.) The +data set contains 3 classes of 50 instances each, where each class refers to a +type of iris plant. One class is linearly separable from the other 2; the +latter are NOT linearly separable from each other. + +|details-start| +**References** +|details-split| + +- Fisher, R.A. "The use of multiple measurements in taxonomic problems" + Annual Eugenics, 7, Part II, 179-188 (1936); also in "Contributions to + Mathematical Statistics" (John Wiley, NY, 1950). +- Duda, R.O., & Hart, P.E. (1973) Pattern Classification and Scene Analysis. + (Q327.D83) John Wiley & Sons. ISBN 0-471-22361-1. See page 218. +- Dasarathy, B.V. (1980) "Nosing Around the Neighborhood: A New System + Structure and Classification Rule for Recognition in Partially Exposed + Environments". IEEE Transactions on Pattern Analysis and Machine + Intelligence, Vol. PAMI-2, No. 1, 67-71. +- Gates, G.W. (1972) "The Reduced Nearest Neighbor Rule". IEEE Transactions + on Information Theory, May 1972, 431-433. +- See also: 1988 MLC Proceedings, 54-64. Cheeseman et al"s AUTOCLASS II + conceptual clustering system finds 3 classes in the data. +- Many, many more ... + +|details-end| diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/descr/kddcup99.rst b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/descr/kddcup99.rst new file mode 100644 index 0000000000000000000000000000000000000000..fe8a0c8f4168c4d537c5687f3964017a43eb7a42 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/descr/kddcup99.rst @@ -0,0 +1,94 @@ +.. _kddcup99_dataset: + +Kddcup 99 dataset +----------------- + +The KDD Cup '99 dataset was created by processing the tcpdump portions +of the 1998 DARPA Intrusion Detection System (IDS) Evaluation dataset, +created by MIT Lincoln Lab [2]_. The artificial data (described on the `dataset's +homepage `_) was +generated using a closed network and hand-injected attacks to produce a +large number of different types of attack with normal activity in the +background. As the initial goal was to produce a large training set for +supervised learning algorithms, there is a large proportion (80.1%) of +abnormal data which is unrealistic in real world, and inappropriate for +unsupervised anomaly detection which aims at detecting 'abnormal' data, i.e.: + +* qualitatively different from normal data +* in large minority among the observations. + +We thus transform the KDD Data set into two different data sets: SA and SF. + +* SA is obtained by simply selecting all the normal data, and a small + proportion of abnormal data to gives an anomaly proportion of 1%. + +* SF is obtained as in [3]_ + by simply picking up the data whose attribute logged_in is positive, thus + focusing on the intrusion attack, which gives a proportion of 0.3% of + attack. + +* http and smtp are two subsets of SF corresponding with third feature + equal to 'http' (resp. to 'smtp'). + +General KDD structure: + +================ ========================================== +Samples total 4898431 +Dimensionality 41 +Features discrete (int) or continuous (float) +Targets str, 'normal.' or name of the anomaly type +================ ========================================== + +SA structure: + +================ ========================================== +Samples total 976158 +Dimensionality 41 +Features discrete (int) or continuous (float) +Targets str, 'normal.' or name of the anomaly type +================ ========================================== + +SF structure: + +================ ========================================== +Samples total 699691 +Dimensionality 4 +Features discrete (int) or continuous (float) +Targets str, 'normal.' or name of the anomaly type +================ ========================================== + +http structure: + +================ ========================================== +Samples total 619052 +Dimensionality 3 +Features discrete (int) or continuous (float) +Targets str, 'normal.' or name of the anomaly type +================ ========================================== + +smtp structure: + +================ ========================================== +Samples total 95373 +Dimensionality 3 +Features discrete (int) or continuous (float) +Targets str, 'normal.' or name of the anomaly type +================ ========================================== + +:func:`sklearn.datasets.fetch_kddcup99` will load the kddcup99 dataset; it +returns a dictionary-like object with the feature matrix in the ``data`` member +and the target values in ``target``. The "as_frame" optional argument converts +``data`` into a pandas DataFrame and ``target`` into a pandas Series. The +dataset will be downloaded from the web if necessary. + +.. topic:: References + + .. [2] Analysis and Results of the 1999 DARPA Off-Line Intrusion + Detection Evaluation, Richard Lippmann, Joshua W. Haines, + David J. Fried, Jonathan Korba, Kumar Das. + + .. [3] K. Yamanishi, J.-I. Takeuchi, G. Williams, and P. Milne. Online + unsupervised outlier detection using finite mixtures with + discounting learning algorithms. In Proceedings of the sixth + ACM SIGKDD international conference on Knowledge discovery + and data mining, pages 320-324. ACM Press, 2000. diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/descr/lfw.rst b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/descr/lfw.rst new file mode 100644 index 0000000000000000000000000000000000000000..f7d80558be3738dce2076d08e4f704400a2486b2 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/descr/lfw.rst @@ -0,0 +1,128 @@ +.. _labeled_faces_in_the_wild_dataset: + +The Labeled Faces in the Wild face recognition dataset +------------------------------------------------------ + +This dataset is a collection of JPEG pictures of famous people collected +over the internet, all details are available on the official website: + +http://vis-www.cs.umass.edu/lfw/ + +Each picture is centered on a single face. The typical task is called +Face Verification: given a pair of two pictures, a binary classifier +must predict whether the two images are from the same person. + +An alternative task, Face Recognition or Face Identification is: +given the picture of the face of an unknown person, identify the name +of the person by referring to a gallery of previously seen pictures of +identified persons. + +Both Face Verification and Face Recognition are tasks that are typically +performed on the output of a model trained to perform Face Detection. The +most popular model for Face Detection is called Viola-Jones and is +implemented in the OpenCV library. The LFW faces were extracted by this +face detector from various online websites. + +**Data Set Characteristics:** + +================= ======================= +Classes 5749 +Samples total 13233 +Dimensionality 5828 +Features real, between 0 and 255 +================= ======================= + +|details-start| +**Usage** +|details-split| + +``scikit-learn`` provides two loaders that will automatically download, +cache, parse the metadata files, decode the jpeg and convert the +interesting slices into memmapped numpy arrays. This dataset size is more +than 200 MB. The first load typically takes more than a couple of minutes +to fully decode the relevant part of the JPEG files into numpy arrays. If +the dataset has been loaded once, the following times the loading times +less than 200ms by using a memmapped version memoized on the disk in the +``~/scikit_learn_data/lfw_home/`` folder using ``joblib``. + +The first loader is used for the Face Identification task: a multi-class +classification task (hence supervised learning):: + + >>> from sklearn.datasets import fetch_lfw_people + >>> lfw_people = fetch_lfw_people(min_faces_per_person=70, resize=0.4) + + >>> for name in lfw_people.target_names: + ... print(name) + ... + Ariel Sharon + Colin Powell + Donald Rumsfeld + George W Bush + Gerhard Schroeder + Hugo Chavez + Tony Blair + +The default slice is a rectangular shape around the face, removing +most of the background:: + + >>> lfw_people.data.dtype + dtype('float32') + + >>> lfw_people.data.shape + (1288, 1850) + + >>> lfw_people.images.shape + (1288, 50, 37) + +Each of the ``1140`` faces is assigned to a single person id in the ``target`` +array:: + + >>> lfw_people.target.shape + (1288,) + + >>> list(lfw_people.target[:10]) + [5, 6, 3, 1, 0, 1, 3, 4, 3, 0] + +The second loader is typically used for the face verification task: each sample +is a pair of two picture belonging or not to the same person:: + + >>> from sklearn.datasets import fetch_lfw_pairs + >>> lfw_pairs_train = fetch_lfw_pairs(subset='train') + + >>> list(lfw_pairs_train.target_names) + ['Different persons', 'Same person'] + + >>> lfw_pairs_train.pairs.shape + (2200, 2, 62, 47) + + >>> lfw_pairs_train.data.shape + (2200, 5828) + + >>> lfw_pairs_train.target.shape + (2200,) + +Both for the :func:`sklearn.datasets.fetch_lfw_people` and +:func:`sklearn.datasets.fetch_lfw_pairs` function it is +possible to get an additional dimension with the RGB color channels by +passing ``color=True``, in that case the shape will be +``(2200, 2, 62, 47, 3)``. + +The :func:`sklearn.datasets.fetch_lfw_pairs` datasets is subdivided into +3 subsets: the development ``train`` set, the development ``test`` set and +an evaluation ``10_folds`` set meant to compute performance metrics using a +10-folds cross validation scheme. + +|details-end| + +.. topic:: References: + + * `Labeled Faces in the Wild: A Database for Studying Face Recognition + in Unconstrained Environments. + `_ + Gary B. Huang, Manu Ramesh, Tamara Berg, and Erik Learned-Miller. + University of Massachusetts, Amherst, Technical Report 07-49, October, 2007. + + +.. topic:: Examples: + + * :ref:`sphx_glr_auto_examples_applications_plot_face_recognition.py` diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/descr/rcv1.rst b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/descr/rcv1.rst new file mode 100644 index 0000000000000000000000000000000000000000..7cf3730a175545cf39f8d314cb74c0ca573c35b6 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/descr/rcv1.rst @@ -0,0 +1,72 @@ +.. _rcv1_dataset: + +RCV1 dataset +------------ + +Reuters Corpus Volume I (RCV1) is an archive of over 800,000 manually +categorized newswire stories made available by Reuters, Ltd. for research +purposes. The dataset is extensively described in [1]_. + +**Data Set Characteristics:** + +============== ===================== +Classes 103 +Samples total 804414 +Dimensionality 47236 +Features real, between 0 and 1 +============== ===================== + +:func:`sklearn.datasets.fetch_rcv1` will load the following +version: RCV1-v2, vectors, full sets, topics multilabels:: + + >>> from sklearn.datasets import fetch_rcv1 + >>> rcv1 = fetch_rcv1() + +It returns a dictionary-like object, with the following attributes: + +``data``: +The feature matrix is a scipy CSR sparse matrix, with 804414 samples and +47236 features. Non-zero values contains cosine-normalized, log TF-IDF vectors. +A nearly chronological split is proposed in [1]_: The first 23149 samples are +the training set. The last 781265 samples are the testing set. This follows +the official LYRL2004 chronological split. The array has 0.16% of non zero +values:: + + >>> rcv1.data.shape + (804414, 47236) + +``target``: +The target values are stored in a scipy CSR sparse matrix, with 804414 samples +and 103 categories. Each sample has a value of 1 in its categories, and 0 in +others. The array has 3.15% of non zero values:: + + >>> rcv1.target.shape + (804414, 103) + +``sample_id``: +Each sample can be identified by its ID, ranging (with gaps) from 2286 +to 810596:: + + >>> rcv1.sample_id[:3] + array([2286, 2287, 2288], dtype=uint32) + +``target_names``: +The target values are the topics of each sample. Each sample belongs to at +least one topic, and to up to 17 topics. There are 103 topics, each +represented by a string. Their corpus frequencies span five orders of +magnitude, from 5 occurrences for 'GMIL', to 381327 for 'CCAT':: + + >>> rcv1.target_names[:3].tolist() # doctest: +SKIP + ['E11', 'ECAT', 'M11'] + +The dataset will be downloaded from the `rcv1 homepage`_ if necessary. +The compressed size is about 656 MB. + +.. _rcv1 homepage: http://jmlr.csail.mit.edu/papers/volume5/lewis04a/ + + +.. topic:: References + + .. [1] Lewis, D. D., Yang, Y., Rose, T. G., & Li, F. (2004). + RCV1: A new benchmark collection for text categorization research. + The Journal of Machine Learning Research, 5, 361-397. diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/descr/twenty_newsgroups.rst b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/descr/twenty_newsgroups.rst new file mode 100644 index 0000000000000000000000000000000000000000..d1a049869dd7f4e5d8b9eed78b9ff784a2c704ed --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/descr/twenty_newsgroups.rst @@ -0,0 +1,264 @@ +.. _20newsgroups_dataset: + +The 20 newsgroups text dataset +------------------------------ + +The 20 newsgroups dataset comprises around 18000 newsgroups posts on +20 topics split in two subsets: one for training (or development) +and the other one for testing (or for performance evaluation). The split +between the train and test set is based upon a messages posted before +and after a specific date. + +This module contains two loaders. The first one, +:func:`sklearn.datasets.fetch_20newsgroups`, +returns a list of the raw texts that can be fed to text feature +extractors such as :class:`~sklearn.feature_extraction.text.CountVectorizer` +with custom parameters so as to extract feature vectors. +The second one, :func:`sklearn.datasets.fetch_20newsgroups_vectorized`, +returns ready-to-use features, i.e., it is not necessary to use a feature +extractor. + +**Data Set Characteristics:** + +================= ========== +Classes 20 +Samples total 18846 +Dimensionality 1 +Features text +================= ========== + +|details-start| +**Usage** +|details-split| + +The :func:`sklearn.datasets.fetch_20newsgroups` function is a data +fetching / caching functions that downloads the data archive from +the original `20 newsgroups website`_, extracts the archive contents +in the ``~/scikit_learn_data/20news_home`` folder and calls the +:func:`sklearn.datasets.load_files` on either the training or +testing set folder, or both of them:: + + >>> from sklearn.datasets import fetch_20newsgroups + >>> newsgroups_train = fetch_20newsgroups(subset='train') + + >>> from pprint import pprint + >>> pprint(list(newsgroups_train.target_names)) + ['alt.atheism', + 'comp.graphics', + 'comp.os.ms-windows.misc', + 'comp.sys.ibm.pc.hardware', + 'comp.sys.mac.hardware', + 'comp.windows.x', + 'misc.forsale', + 'rec.autos', + 'rec.motorcycles', + 'rec.sport.baseball', + 'rec.sport.hockey', + 'sci.crypt', + 'sci.electronics', + 'sci.med', + 'sci.space', + 'soc.religion.christian', + 'talk.politics.guns', + 'talk.politics.mideast', + 'talk.politics.misc', + 'talk.religion.misc'] + +The real data lies in the ``filenames`` and ``target`` attributes. The target +attribute is the integer index of the category:: + + >>> newsgroups_train.filenames.shape + (11314,) + >>> newsgroups_train.target.shape + (11314,) + >>> newsgroups_train.target[:10] + array([ 7, 4, 4, 1, 14, 16, 13, 3, 2, 4]) + +It is possible to load only a sub-selection of the categories by passing the +list of the categories to load to the +:func:`sklearn.datasets.fetch_20newsgroups` function:: + + >>> cats = ['alt.atheism', 'sci.space'] + >>> newsgroups_train = fetch_20newsgroups(subset='train', categories=cats) + + >>> list(newsgroups_train.target_names) + ['alt.atheism', 'sci.space'] + >>> newsgroups_train.filenames.shape + (1073,) + >>> newsgroups_train.target.shape + (1073,) + >>> newsgroups_train.target[:10] + array([0, 1, 1, 1, 0, 1, 1, 0, 0, 0]) + +|details-end| + +|details-start| +**Converting text to vectors** +|details-split| + +In order to feed predictive or clustering models with the text data, +one first need to turn the text into vectors of numerical values suitable +for statistical analysis. This can be achieved with the utilities of the +``sklearn.feature_extraction.text`` as demonstrated in the following +example that extract `TF-IDF`_ vectors of unigram tokens +from a subset of 20news:: + + >>> from sklearn.feature_extraction.text import TfidfVectorizer + >>> categories = ['alt.atheism', 'talk.religion.misc', + ... 'comp.graphics', 'sci.space'] + >>> newsgroups_train = fetch_20newsgroups(subset='train', + ... categories=categories) + >>> vectorizer = TfidfVectorizer() + >>> vectors = vectorizer.fit_transform(newsgroups_train.data) + >>> vectors.shape + (2034, 34118) + +The extracted TF-IDF vectors are very sparse, with an average of 159 non-zero +components by sample in a more than 30000-dimensional space +(less than .5% non-zero features):: + + >>> vectors.nnz / float(vectors.shape[0]) + 159.01327... + +:func:`sklearn.datasets.fetch_20newsgroups_vectorized` is a function which +returns ready-to-use token counts features instead of file names. + +.. _`20 newsgroups website`: http://people.csail.mit.edu/jrennie/20Newsgroups/ +.. _`TF-IDF`: https://en.wikipedia.org/wiki/Tf-idf + +|details-end| + +|details-start| +**Filtering text for more realistic training** +|details-split| + +It is easy for a classifier to overfit on particular things that appear in the +20 Newsgroups data, such as newsgroup headers. Many classifiers achieve very +high F-scores, but their results would not generalize to other documents that +aren't from this window of time. + +For example, let's look at the results of a multinomial Naive Bayes classifier, +which is fast to train and achieves a decent F-score:: + + >>> from sklearn.naive_bayes import MultinomialNB + >>> from sklearn import metrics + >>> newsgroups_test = fetch_20newsgroups(subset='test', + ... categories=categories) + >>> vectors_test = vectorizer.transform(newsgroups_test.data) + >>> clf = MultinomialNB(alpha=.01) + >>> clf.fit(vectors, newsgroups_train.target) + MultinomialNB(alpha=0.01, class_prior=None, fit_prior=True) + + >>> pred = clf.predict(vectors_test) + >>> metrics.f1_score(newsgroups_test.target, pred, average='macro') + 0.88213... + +(The example :ref:`sphx_glr_auto_examples_text_plot_document_classification_20newsgroups.py` shuffles +the training and test data, instead of segmenting by time, and in that case +multinomial Naive Bayes gets a much higher F-score of 0.88. Are you suspicious +yet of what's going on inside this classifier?) + +Let's take a look at what the most informative features are: + + >>> import numpy as np + >>> def show_top10(classifier, vectorizer, categories): + ... feature_names = vectorizer.get_feature_names_out() + ... for i, category in enumerate(categories): + ... top10 = np.argsort(classifier.coef_[i])[-10:] + ... print("%s: %s" % (category, " ".join(feature_names[top10]))) + ... + >>> show_top10(clf, vectorizer, newsgroups_train.target_names) + alt.atheism: edu it and in you that is of to the + comp.graphics: edu in graphics it is for and of to the + sci.space: edu it that is in and space to of the + talk.religion.misc: not it you in is that and to of the + + +You can now see many things that these features have overfit to: + +- Almost every group is distinguished by whether headers such as + ``NNTP-Posting-Host:`` and ``Distribution:`` appear more or less often. +- Another significant feature involves whether the sender is affiliated with + a university, as indicated either by their headers or their signature. +- The word "article" is a significant feature, based on how often people quote + previous posts like this: "In article [article ID], [name] <[e-mail address]> + wrote:" +- Other features match the names and e-mail addresses of particular people who + were posting at the time. + +With such an abundance of clues that distinguish newsgroups, the classifiers +barely have to identify topics from text at all, and they all perform at the +same high level. + +For this reason, the functions that load 20 Newsgroups data provide a +parameter called **remove**, telling it what kinds of information to strip out +of each file. **remove** should be a tuple containing any subset of +``('headers', 'footers', 'quotes')``, telling it to remove headers, signature +blocks, and quotation blocks respectively. + + >>> newsgroups_test = fetch_20newsgroups(subset='test', + ... remove=('headers', 'footers', 'quotes'), + ... categories=categories) + >>> vectors_test = vectorizer.transform(newsgroups_test.data) + >>> pred = clf.predict(vectors_test) + >>> metrics.f1_score(pred, newsgroups_test.target, average='macro') + 0.77310... + +This classifier lost over a lot of its F-score, just because we removed +metadata that has little to do with topic classification. +It loses even more if we also strip this metadata from the training data: + + >>> newsgroups_train = fetch_20newsgroups(subset='train', + ... remove=('headers', 'footers', 'quotes'), + ... categories=categories) + >>> vectors = vectorizer.fit_transform(newsgroups_train.data) + >>> clf = MultinomialNB(alpha=.01) + >>> clf.fit(vectors, newsgroups_train.target) + MultinomialNB(alpha=0.01, class_prior=None, fit_prior=True) + + >>> vectors_test = vectorizer.transform(newsgroups_test.data) + >>> pred = clf.predict(vectors_test) + >>> metrics.f1_score(newsgroups_test.target, pred, average='macro') + 0.76995... + +Some other classifiers cope better with this harder version of the task. Try the +:ref:`sphx_glr_auto_examples_model_selection_plot_grid_search_text_feature_extraction.py` +example with and without the `remove` option to compare the results. +|details-end| + +.. topic:: Data Considerations + + The Cleveland Indians is a major league baseball team based in Cleveland, + Ohio, USA. In December 2020, it was reported that "After several months of + discussion sparked by the death of George Floyd and a national reckoning over + race and colonialism, the Cleveland Indians have decided to change their + name." Team owner Paul Dolan "did make it clear that the team will not make + its informal nickname -- the Tribe -- its new team name." "It's not going to + be a half-step away from the Indians," Dolan said."We will not have a Native + American-themed name." + + https://www.mlb.com/news/cleveland-indians-team-name-change + +.. topic:: Recommendation + + - When evaluating text classifiers on the 20 Newsgroups data, you + should strip newsgroup-related metadata. In scikit-learn, you can do this + by setting ``remove=('headers', 'footers', 'quotes')``. The F-score will be + lower because it is more realistic. + - This text dataset contains data which may be inappropriate for certain NLP + applications. An example is listed in the "Data Considerations" section + above. The challenge with using current text datasets in NLP for tasks such + as sentence completion, clustering, and other applications is that text + that is culturally biased and inflammatory will propagate biases. This + should be taken into consideration when using the dataset, reviewing the + output, and the bias should be documented. + +.. topic:: Examples + + * :ref:`sphx_glr_auto_examples_model_selection_plot_grid_search_text_feature_extraction.py` + + * :ref:`sphx_glr_auto_examples_text_plot_document_classification_20newsgroups.py` + + * :ref:`sphx_glr_auto_examples_text_plot_hashing_vs_dict_vectorizer.py` + + * :ref:`sphx_glr_auto_examples_text_plot_document_clustering.py` diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..68022c966dfec710eedfef467da4f638d96dc062 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/metadata_routing_common.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/metadata_routing_common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..852f14fe8ab42e94bc80b464c3b9fbd9f697be6f Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/metadata_routing_common.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/random_seed.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/random_seed.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..19ff26ea3e59c85b07269f5efa62f76906355657 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/random_seed.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_base.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1740c79ce3c14a48664664ac10dc2696277e74e8 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_base.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_build.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_build.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..34756bc919edce493b513080ca837db9089c646f Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_build.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_calibration.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_calibration.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0330d9abced58bd5f5f37662175be49606dcbd95 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_calibration.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_check_build.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_check_build.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..958cc74a0802b6f5f1f0eabad4fa4cbda0274ca2 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_check_build.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_common.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2bec629d04eb3c075086287eb8f4657b151122b8 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_common.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_config.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_config.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..36f0be629f084ae9eb7fb4cf2a4381ee4f73a787 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_config.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_discriminant_analysis.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_discriminant_analysis.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6846854e7e039299434d1e90402dd0eef2fb9af3 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_discriminant_analysis.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_docstring_parameters.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_docstring_parameters.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3886b564ade1554c60b13a34103780c0d2d29179 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_docstring_parameters.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_docstrings.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_docstrings.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b17af9b36917d7247cb0885f1f828d3885e38d03 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_docstrings.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_dummy.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_dummy.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cf4a1f9da4d6624a9fa75375dc0c8f9ca642b864 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_dummy.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_init.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_init.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0fc49481e30cac19eeac8d353f36f1a587706cef Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_init.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_isotonic.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_isotonic.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fb310a3411b8fce9cba823bdfccc30e28fd68030 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_isotonic.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_kernel_approximation.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_kernel_approximation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..37b95c283b3e63cd56f108f4f86636e00df33abc Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_kernel_approximation.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_kernel_ridge.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_kernel_ridge.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c925f7fca15f1a05919619d05874e14b6df8fe54 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_kernel_ridge.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_metadata_routing.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_metadata_routing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6bd69360e04d9ecfdceea720c687dd9e8cb403f8 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_metadata_routing.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_metaestimators.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_metaestimators.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0eed28110f2d593958d554f18c4d05bd20a50e67 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_metaestimators.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_metaestimators_metadata_routing.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_metaestimators_metadata_routing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9d0ac9b5182a7f7fe9af1a95e97a0cb2e8831eed Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_metaestimators_metadata_routing.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_min_dependencies_readme.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_min_dependencies_readme.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..12ca65a0bf1a6538b59e799b62c44cc23c52eaef Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_min_dependencies_readme.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_multiclass.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_multiclass.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3b968631c89a24c07db767c0b379459b2c2ec3ab Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_multiclass.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_multioutput.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_multioutput.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3b1976302352c97bdb3490b7714c5cd66b243ac0 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_multioutput.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_naive_bayes.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_naive_bayes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9de10c863d1c950287e604d0689517913db27dc4 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_naive_bayes.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_pipeline.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_pipeline.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b36edfb2e460d6bb1a9f89b1ac1dc79b3dafdc73 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_pipeline.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_public_functions.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_public_functions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c574ccace5a4c802c5c53af7b2fa1bf104985338 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_public_functions.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_random_projection.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_random_projection.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a95ba7c6a55d2e434568f1fc86554ba4023d7000 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_random_projection.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/tests/test_base.py b/env-llmeval/lib/python3.10/site-packages/sklearn/tests/test_base.py new file mode 100644 index 0000000000000000000000000000000000000000..3bbc236e703df6f2ae037da7ee0ea2a93f289383 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/tests/test_base.py @@ -0,0 +1,921 @@ +# Author: Gael Varoquaux +# License: BSD 3 clause + +import pickle +import re +import warnings + +import numpy as np +import pytest +import scipy.sparse as sp +from numpy.testing import assert_allclose + +import sklearn +from sklearn import config_context, datasets +from sklearn.base import ( + BaseEstimator, + OutlierMixin, + TransformerMixin, + clone, + is_classifier, +) +from sklearn.decomposition import PCA +from sklearn.exceptions import InconsistentVersionWarning +from sklearn.model_selection import GridSearchCV +from sklearn.pipeline import Pipeline +from sklearn.preprocessing import StandardScaler +from sklearn.svm import SVC +from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor +from sklearn.utils._mocking import MockDataFrame +from sklearn.utils._set_output import _get_output_config +from sklearn.utils._testing import ( + _convert_container, + assert_array_equal, + assert_no_warnings, + ignore_warnings, +) + + +############################################################################# +# A few test classes +class MyEstimator(BaseEstimator): + def __init__(self, l1=0, empty=None): + self.l1 = l1 + self.empty = empty + + +class K(BaseEstimator): + def __init__(self, c=None, d=None): + self.c = c + self.d = d + + +class T(BaseEstimator): + def __init__(self, a=None, b=None): + self.a = a + self.b = b + + +class NaNTag(BaseEstimator): + def _more_tags(self): + return {"allow_nan": True} + + +class NoNaNTag(BaseEstimator): + def _more_tags(self): + return {"allow_nan": False} + + +class OverrideTag(NaNTag): + def _more_tags(self): + return {"allow_nan": False} + + +class DiamondOverwriteTag(NaNTag, NoNaNTag): + def _more_tags(self): + return dict() + + +class InheritDiamondOverwriteTag(DiamondOverwriteTag): + pass + + +class ModifyInitParams(BaseEstimator): + """Deprecated behavior. + Equal parameters but with a type cast. + Doesn't fulfill a is a + """ + + def __init__(self, a=np.array([0])): + self.a = a.copy() + + +class Buggy(BaseEstimator): + "A buggy estimator that does not set its parameters right." + + def __init__(self, a=None): + self.a = 1 + + +class NoEstimator: + def __init__(self): + pass + + def fit(self, X=None, y=None): + return self + + def predict(self, X=None): + return None + + +class VargEstimator(BaseEstimator): + """scikit-learn estimators shouldn't have vargs.""" + + def __init__(self, *vargs): + pass + + +############################################################################# +# The tests + + +def test_clone(): + # Tests that clone creates a correct deep copy. + # We create an estimator, make a copy of its original state + # (which, in this case, is the current state of the estimator), + # and check that the obtained copy is a correct deep copy. + + from sklearn.feature_selection import SelectFpr, f_classif + + selector = SelectFpr(f_classif, alpha=0.1) + new_selector = clone(selector) + assert selector is not new_selector + assert selector.get_params() == new_selector.get_params() + + selector = SelectFpr(f_classif, alpha=np.zeros((10, 2))) + new_selector = clone(selector) + assert selector is not new_selector + + +def test_clone_2(): + # Tests that clone doesn't copy everything. + # We first create an estimator, give it an own attribute, and + # make a copy of its original state. Then we check that the copy doesn't + # have the specific attribute we manually added to the initial estimator. + + from sklearn.feature_selection import SelectFpr, f_classif + + selector = SelectFpr(f_classif, alpha=0.1) + selector.own_attribute = "test" + new_selector = clone(selector) + assert not hasattr(new_selector, "own_attribute") + + +def test_clone_buggy(): + # Check that clone raises an error on buggy estimators. + buggy = Buggy() + buggy.a = 2 + with pytest.raises(RuntimeError): + clone(buggy) + + no_estimator = NoEstimator() + with pytest.raises(TypeError): + clone(no_estimator) + + varg_est = VargEstimator() + with pytest.raises(RuntimeError): + clone(varg_est) + + est = ModifyInitParams() + with pytest.raises(RuntimeError): + clone(est) + + +def test_clone_empty_array(): + # Regression test for cloning estimators with empty arrays + clf = MyEstimator(empty=np.array([])) + clf2 = clone(clf) + assert_array_equal(clf.empty, clf2.empty) + + clf = MyEstimator(empty=sp.csr_matrix(np.array([[0]]))) + clf2 = clone(clf) + assert_array_equal(clf.empty.data, clf2.empty.data) + + +def test_clone_nan(): + # Regression test for cloning estimators with default parameter as np.nan + clf = MyEstimator(empty=np.nan) + clf2 = clone(clf) + + assert clf.empty is clf2.empty + + +def test_clone_dict(): + # test that clone creates a clone of a dict + orig = {"a": MyEstimator()} + cloned = clone(orig) + assert orig["a"] is not cloned["a"] + + +def test_clone_sparse_matrices(): + sparse_matrix_classes = [ + cls + for name in dir(sp) + if name.endswith("_matrix") and type(cls := getattr(sp, name)) is type + ] + + for cls in sparse_matrix_classes: + sparse_matrix = cls(np.eye(5)) + clf = MyEstimator(empty=sparse_matrix) + clf_cloned = clone(clf) + assert clf.empty.__class__ is clf_cloned.empty.__class__ + assert_array_equal(clf.empty.toarray(), clf_cloned.empty.toarray()) + + +def test_clone_estimator_types(): + # Check that clone works for parameters that are types rather than + # instances + clf = MyEstimator(empty=MyEstimator) + clf2 = clone(clf) + + assert clf.empty is clf2.empty + + +def test_clone_class_rather_than_instance(): + # Check that clone raises expected error message when + # cloning class rather than instance + msg = "You should provide an instance of scikit-learn estimator" + with pytest.raises(TypeError, match=msg): + clone(MyEstimator) + + +def test_repr(): + # Smoke test the repr of the base estimator. + my_estimator = MyEstimator() + repr(my_estimator) + test = T(K(), K()) + assert repr(test) == "T(a=K(), b=K())" + + some_est = T(a=["long_params"] * 1000) + assert len(repr(some_est)) == 485 + + +def test_str(): + # Smoke test the str of the base estimator + my_estimator = MyEstimator() + str(my_estimator) + + +def test_get_params(): + test = T(K(), K) + + assert "a__d" in test.get_params(deep=True) + assert "a__d" not in test.get_params(deep=False) + + test.set_params(a__d=2) + assert test.a.d == 2 + + with pytest.raises(ValueError): + test.set_params(a__a=2) + + +def test_is_classifier(): + svc = SVC() + assert is_classifier(svc) + assert is_classifier(GridSearchCV(svc, {"C": [0.1, 1]})) + assert is_classifier(Pipeline([("svc", svc)])) + assert is_classifier(Pipeline([("svc_cv", GridSearchCV(svc, {"C": [0.1, 1]}))])) + + +def test_set_params(): + # test nested estimator parameter setting + clf = Pipeline([("svc", SVC())]) + + # non-existing parameter in svc + with pytest.raises(ValueError): + clf.set_params(svc__stupid_param=True) + + # non-existing parameter of pipeline + with pytest.raises(ValueError): + clf.set_params(svm__stupid_param=True) + + # we don't currently catch if the things in pipeline are estimators + # bad_pipeline = Pipeline([("bad", NoEstimator())]) + # assert_raises(AttributeError, bad_pipeline.set_params, + # bad__stupid_param=True) + + +def test_set_params_passes_all_parameters(): + # Make sure all parameters are passed together to set_params + # of nested estimator. Regression test for #9944 + + class TestDecisionTree(DecisionTreeClassifier): + def set_params(self, **kwargs): + super().set_params(**kwargs) + # expected_kwargs is in test scope + assert kwargs == expected_kwargs + return self + + expected_kwargs = {"max_depth": 5, "min_samples_leaf": 2} + for est in [ + Pipeline([("estimator", TestDecisionTree())]), + GridSearchCV(TestDecisionTree(), {}), + ]: + est.set_params(estimator__max_depth=5, estimator__min_samples_leaf=2) + + +def test_set_params_updates_valid_params(): + # Check that set_params tries to set SVC().C, not + # DecisionTreeClassifier().C + gscv = GridSearchCV(DecisionTreeClassifier(), {}) + gscv.set_params(estimator=SVC(), estimator__C=42.0) + assert gscv.estimator.C == 42.0 + + +@pytest.mark.parametrize( + "tree,dataset", + [ + ( + DecisionTreeClassifier(max_depth=2, random_state=0), + datasets.make_classification(random_state=0), + ), + ( + DecisionTreeRegressor(max_depth=2, random_state=0), + datasets.make_regression(random_state=0), + ), + ], +) +def test_score_sample_weight(tree, dataset): + rng = np.random.RandomState(0) + # check that the score with and without sample weights are different + X, y = dataset + + tree.fit(X, y) + # generate random sample weights + sample_weight = rng.randint(1, 10, size=len(y)) + score_unweighted = tree.score(X, y) + score_weighted = tree.score(X, y, sample_weight=sample_weight) + msg = "Unweighted and weighted scores are unexpectedly equal" + assert score_unweighted != score_weighted, msg + + +def test_clone_pandas_dataframe(): + class DummyEstimator(TransformerMixin, BaseEstimator): + """This is a dummy class for generating numerical features + + This feature extractor extracts numerical features from pandas data + frame. + + Parameters + ---------- + + df: pandas data frame + The pandas data frame parameter. + + Notes + ----- + """ + + def __init__(self, df=None, scalar_param=1): + self.df = df + self.scalar_param = scalar_param + + def fit(self, X, y=None): + pass + + def transform(self, X): + pass + + # build and clone estimator + d = np.arange(10) + df = MockDataFrame(d) + e = DummyEstimator(df, scalar_param=1) + cloned_e = clone(e) + + # the test + assert (e.df == cloned_e.df).values.all() + assert e.scalar_param == cloned_e.scalar_param + + +def test_clone_protocol(): + """Checks that clone works with `__sklearn_clone__` protocol.""" + + class FrozenEstimator(BaseEstimator): + def __init__(self, fitted_estimator): + self.fitted_estimator = fitted_estimator + + def __getattr__(self, name): + return getattr(self.fitted_estimator, name) + + def __sklearn_clone__(self): + return self + + def fit(self, *args, **kwargs): + return self + + def fit_transform(self, *args, **kwargs): + return self.fitted_estimator.transform(*args, **kwargs) + + X = np.array([[-1, -1], [-2, -1], [-3, -2]]) + pca = PCA().fit(X) + components = pca.components_ + + frozen_pca = FrozenEstimator(pca) + assert_allclose(frozen_pca.components_, components) + + # Calling PCA methods such as `get_feature_names_out` still works + assert_array_equal(frozen_pca.get_feature_names_out(), pca.get_feature_names_out()) + + # Fitting on a new data does not alter `components_` + X_new = np.asarray([[-1, 2], [3, 4], [1, 2]]) + frozen_pca.fit(X_new) + assert_allclose(frozen_pca.components_, components) + + # `fit_transform` does not alter state + frozen_pca.fit_transform(X_new) + assert_allclose(frozen_pca.components_, components) + + # Cloning estimator is a no-op + clone_frozen_pca = clone(frozen_pca) + assert clone_frozen_pca is frozen_pca + assert_allclose(clone_frozen_pca.components_, components) + + +def test_pickle_version_warning_is_not_raised_with_matching_version(): + iris = datasets.load_iris() + tree = DecisionTreeClassifier().fit(iris.data, iris.target) + tree_pickle = pickle.dumps(tree) + assert b"_sklearn_version" in tree_pickle + tree_restored = assert_no_warnings(pickle.loads, tree_pickle) + + # test that we can predict with the restored decision tree classifier + score_of_original = tree.score(iris.data, iris.target) + score_of_restored = tree_restored.score(iris.data, iris.target) + assert score_of_original == score_of_restored + + +class TreeBadVersion(DecisionTreeClassifier): + def __getstate__(self): + return dict(self.__dict__.items(), _sklearn_version="something") + + +pickle_error_message = ( + "Trying to unpickle estimator {estimator} from " + "version {old_version} when using version " + "{current_version}. This might " + "lead to breaking code or invalid results. " + "Use at your own risk." +) + + +def test_pickle_version_warning_is_issued_upon_different_version(): + iris = datasets.load_iris() + tree = TreeBadVersion().fit(iris.data, iris.target) + tree_pickle_other = pickle.dumps(tree) + message = pickle_error_message.format( + estimator="TreeBadVersion", + old_version="something", + current_version=sklearn.__version__, + ) + with pytest.warns(UserWarning, match=message) as warning_record: + pickle.loads(tree_pickle_other) + + message = warning_record.list[0].message + assert isinstance(message, InconsistentVersionWarning) + assert message.estimator_name == "TreeBadVersion" + assert message.original_sklearn_version == "something" + assert message.current_sklearn_version == sklearn.__version__ + + +class TreeNoVersion(DecisionTreeClassifier): + def __getstate__(self): + return self.__dict__ + + +def test_pickle_version_warning_is_issued_when_no_version_info_in_pickle(): + iris = datasets.load_iris() + # TreeNoVersion has no getstate, like pre-0.18 + tree = TreeNoVersion().fit(iris.data, iris.target) + + tree_pickle_noversion = pickle.dumps(tree) + assert b"_sklearn_version" not in tree_pickle_noversion + message = pickle_error_message.format( + estimator="TreeNoVersion", + old_version="pre-0.18", + current_version=sklearn.__version__, + ) + # check we got the warning about using pre-0.18 pickle + with pytest.warns(UserWarning, match=message): + pickle.loads(tree_pickle_noversion) + + +def test_pickle_version_no_warning_is_issued_with_non_sklearn_estimator(): + iris = datasets.load_iris() + tree = TreeNoVersion().fit(iris.data, iris.target) + tree_pickle_noversion = pickle.dumps(tree) + try: + module_backup = TreeNoVersion.__module__ + TreeNoVersion.__module__ = "notsklearn" + assert_no_warnings(pickle.loads, tree_pickle_noversion) + finally: + TreeNoVersion.__module__ = module_backup + + +class DontPickleAttributeMixin: + def __getstate__(self): + data = self.__dict__.copy() + data["_attribute_not_pickled"] = None + return data + + def __setstate__(self, state): + state["_restored"] = True + self.__dict__.update(state) + + +class MultiInheritanceEstimator(DontPickleAttributeMixin, BaseEstimator): + def __init__(self, attribute_pickled=5): + self.attribute_pickled = attribute_pickled + self._attribute_not_pickled = None + + +def test_pickling_when_getstate_is_overwritten_by_mixin(): + estimator = MultiInheritanceEstimator() + estimator._attribute_not_pickled = "this attribute should not be pickled" + + serialized = pickle.dumps(estimator) + estimator_restored = pickle.loads(serialized) + assert estimator_restored.attribute_pickled == 5 + assert estimator_restored._attribute_not_pickled is None + assert estimator_restored._restored + + +def test_pickling_when_getstate_is_overwritten_by_mixin_outside_of_sklearn(): + try: + estimator = MultiInheritanceEstimator() + text = "this attribute should not be pickled" + estimator._attribute_not_pickled = text + old_mod = type(estimator).__module__ + type(estimator).__module__ = "notsklearn" + + serialized = estimator.__getstate__() + assert serialized == {"_attribute_not_pickled": None, "attribute_pickled": 5} + + serialized["attribute_pickled"] = 4 + estimator.__setstate__(serialized) + assert estimator.attribute_pickled == 4 + assert estimator._restored + finally: + type(estimator).__module__ = old_mod + + +class SingleInheritanceEstimator(BaseEstimator): + def __init__(self, attribute_pickled=5): + self.attribute_pickled = attribute_pickled + self._attribute_not_pickled = None + + def __getstate__(self): + data = self.__dict__.copy() + data["_attribute_not_pickled"] = None + return data + + +@ignore_warnings(category=(UserWarning)) +def test_pickling_works_when_getstate_is_overwritten_in_the_child_class(): + estimator = SingleInheritanceEstimator() + estimator._attribute_not_pickled = "this attribute should not be pickled" + + serialized = pickle.dumps(estimator) + estimator_restored = pickle.loads(serialized) + assert estimator_restored.attribute_pickled == 5 + assert estimator_restored._attribute_not_pickled is None + + +def test_tag_inheritance(): + # test that changing tags by inheritance is not allowed + + nan_tag_est = NaNTag() + no_nan_tag_est = NoNaNTag() + assert nan_tag_est._get_tags()["allow_nan"] + assert not no_nan_tag_est._get_tags()["allow_nan"] + + redefine_tags_est = OverrideTag() + assert not redefine_tags_est._get_tags()["allow_nan"] + + diamond_tag_est = DiamondOverwriteTag() + assert diamond_tag_est._get_tags()["allow_nan"] + + inherit_diamond_tag_est = InheritDiamondOverwriteTag() + assert inherit_diamond_tag_est._get_tags()["allow_nan"] + + +def test_raises_on_get_params_non_attribute(): + class MyEstimator(BaseEstimator): + def __init__(self, param=5): + pass + + def fit(self, X, y=None): + return self + + est = MyEstimator() + msg = "'MyEstimator' object has no attribute 'param'" + + with pytest.raises(AttributeError, match=msg): + est.get_params() + + +def test_repr_mimebundle_(): + # Checks the display configuration flag controls the json output + tree = DecisionTreeClassifier() + output = tree._repr_mimebundle_() + assert "text/plain" in output + assert "text/html" in output + + with config_context(display="text"): + output = tree._repr_mimebundle_() + assert "text/plain" in output + assert "text/html" not in output + + +def test_repr_html_wraps(): + # Checks the display configuration flag controls the html output + tree = DecisionTreeClassifier() + + output = tree._repr_html_() + assert "