diff --git a/ckpts/universal/global_step40/zero/23.attention.dense.weight/exp_avg.pt b/ckpts/universal/global_step40/zero/23.attention.dense.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..68051d9fe82b6e9823bcc7a5fcf91365cf0e1437 --- /dev/null +++ b/ckpts/universal/global_step40/zero/23.attention.dense.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aa9f765aa7814461dbecc370b8515288885948e8afbbbbe4b565514bee1d1481 +size 16778396 diff --git a/ckpts/universal/global_step40/zero/23.mlp.dense_4h_to_h.weight/exp_avg.pt b/ckpts/universal/global_step40/zero/23.mlp.dense_4h_to_h.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..976542ed1b3eeeccd0388d42f4903f4755f8d1e1 --- /dev/null +++ b/ckpts/universal/global_step40/zero/23.mlp.dense_4h_to_h.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:41075e102137caa1e8745c571f82083723c462bf9ff1249ca75595f67f475897 +size 33555612 diff --git a/ckpts/universal/global_step40/zero/23.mlp.dense_4h_to_h.weight/exp_avg_sq.pt b/ckpts/universal/global_step40/zero/23.mlp.dense_4h_to_h.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..2d20cba6af84026c8682010236fdc86c90e09fb7 --- /dev/null +++ b/ckpts/universal/global_step40/zero/23.mlp.dense_4h_to_h.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:48f20aad6c18db2d99498dd71e920c8bb03025779589319272f087c6248d2453 +size 33555627 diff --git a/ckpts/universal/global_step40/zero/23.mlp.dense_4h_to_h.weight/fp32.pt b/ckpts/universal/global_step40/zero/23.mlp.dense_4h_to_h.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..6668db4b38e7ab70acd6abc1806b5d6b12844a20 --- /dev/null +++ b/ckpts/universal/global_step40/zero/23.mlp.dense_4h_to_h.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e31ca62821543e3cbbac929cd9187fa3c82f07a9a41942a3f2dd8698ddc209eb +size 33555533 diff --git a/ckpts/universal/global_step40/zero/24.mlp.dense_h_to_4h.weight/exp_avg.pt b/ckpts/universal/global_step40/zero/24.mlp.dense_h_to_4h.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..ab3268d92ecdaa8c8f1083cc716d1ebed9e3d79b --- /dev/null +++ b/ckpts/universal/global_step40/zero/24.mlp.dense_h_to_4h.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4534d085bc382985eb6cd40e84a61a5079e049be0ea2c05e40cd890f0eda71cd +size 33555612 diff --git a/ckpts/universal/global_step40/zero/24.mlp.dense_h_to_4h.weight/exp_avg_sq.pt b/ckpts/universal/global_step40/zero/24.mlp.dense_h_to_4h.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..8666ec3ac3f8dd8f16f2c13401e7ad6eaaa62d0a --- /dev/null +++ b/ckpts/universal/global_step40/zero/24.mlp.dense_h_to_4h.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ef8be073710cc5ec015a0dcd6891e7ed954253af39e802e1844bae546db26dc6 +size 33555627 diff --git a/ckpts/universal/global_step40/zero/24.mlp.dense_h_to_4h.weight/fp32.pt b/ckpts/universal/global_step40/zero/24.mlp.dense_h_to_4h.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..1961a5e4a62666d9bd2dbf8eceef4f37c5a5410b --- /dev/null +++ b/ckpts/universal/global_step40/zero/24.mlp.dense_h_to_4h.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c82ab44226b2005a8b55040ac6cac7a56f4cbb9344f4a332697041b457d67478 +size 33555533 diff --git a/ckpts/universal/global_step40/zero/25.input_layernorm.weight/exp_avg_sq.pt b/ckpts/universal/global_step40/zero/25.input_layernorm.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..348f8852e84685b8b4a9aa650fd4bcc0ec4796da --- /dev/null +++ b/ckpts/universal/global_step40/zero/25.input_layernorm.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8543fd9fe7fff423557d08ddedc20c96ddda4fd6e4db89e9f4848bf7342e1898 +size 9387 diff --git a/ckpts/universal/global_step40/zero/25.mlp.dense_4h_to_h.weight/exp_avg.pt b/ckpts/universal/global_step40/zero/25.mlp.dense_4h_to_h.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..1a14066b31affab2e7370eef8998f9dc5f657d82 --- /dev/null +++ b/ckpts/universal/global_step40/zero/25.mlp.dense_4h_to_h.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8621b78afe936756a9952fa31d3ce1f3f0cc64ac84d1e0b4f3ac2ba23f89fee6 +size 33555612 diff --git a/ckpts/universal/global_step40/zero/25.mlp.dense_4h_to_h.weight/exp_avg_sq.pt b/ckpts/universal/global_step40/zero/25.mlp.dense_4h_to_h.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..f4ccdf73ab75949d794c5fc529e9d4b08bef148b --- /dev/null +++ b/ckpts/universal/global_step40/zero/25.mlp.dense_4h_to_h.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3db1a383ebabc03297f2d5f8d907b1185c656124131e1fd7fdb34465d6decddc +size 33555627 diff --git a/ckpts/universal/global_step40/zero/25.mlp.dense_4h_to_h.weight/fp32.pt b/ckpts/universal/global_step40/zero/25.mlp.dense_4h_to_h.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..636e34e76daffac994481abf8e9d1dacc665fea8 --- /dev/null +++ b/ckpts/universal/global_step40/zero/25.mlp.dense_4h_to_h.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c0a3307676c70e489cc33ee5db5e70a3ea50c1c3be1e49668a31a330c32f1811 +size 33555533 diff --git a/ckpts/universal/global_step40/zero/4.attention.dense.weight/exp_avg.pt b/ckpts/universal/global_step40/zero/4.attention.dense.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..bd43b70f1137ae31413d1639d0d3cf871870afcf --- /dev/null +++ b/ckpts/universal/global_step40/zero/4.attention.dense.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b77231c886bbd3f915b506fcb52525d543f3bbf983ec097d70106ecc3cb270a3 +size 16778396 diff --git a/ckpts/universal/global_step40/zero/4.attention.dense.weight/exp_avg_sq.pt b/ckpts/universal/global_step40/zero/4.attention.dense.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..9f4b52e1e0577f7e63964e9527e9f081ec8a562d --- /dev/null +++ b/ckpts/universal/global_step40/zero/4.attention.dense.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:13ce3d24964147d1243e591e91c30aa056e0015a6b6d43de7a5678538af3a9fa +size 16778411 diff --git a/ckpts/universal/global_step40/zero/4.attention.dense.weight/fp32.pt b/ckpts/universal/global_step40/zero/4.attention.dense.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..cb2f1c12bc0616b036b409fefb4616bc83e1279e --- /dev/null +++ b/ckpts/universal/global_step40/zero/4.attention.dense.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:212a1c16128c00a974a895b7ce9ac104655436f387808b79dfe1154da1d5ed19 +size 16778317 diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/_arff_parser.py b/venv/lib/python3.10/site-packages/sklearn/datasets/_arff_parser.py new file mode 100644 index 0000000000000000000000000000000000000000..6ce1a5d3e3d0c9b1f993ca98cbc05f639e47bb14 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/datasets/_arff_parser.py @@ -0,0 +1,542 @@ +"""Implementation of ARFF parsers: via LIAC-ARFF and pandas.""" +import itertools +import re +from collections import OrderedDict +from collections.abc import Generator +from typing import List + +import numpy as np +import scipy as sp + +from ..externals import _arff +from ..externals._arff import ArffSparseDataType +from ..utils import ( + _chunk_generator, + check_pandas_support, + get_chunk_n_rows, +) +from ..utils.fixes import pd_fillna + + +def _split_sparse_columns( + arff_data: ArffSparseDataType, include_columns: List +) -> ArffSparseDataType: + """Obtains several columns from sparse ARFF representation. Additionally, + the column indices are re-labelled, given the columns that are not + included. (e.g., when including [1, 2, 3], the columns will be relabelled + to [0, 1, 2]). + + Parameters + ---------- + arff_data : tuple + A tuple of three lists of equal size; first list indicating the value, + second the x coordinate and the third the y coordinate. + + include_columns : list + A list of columns to include. + + Returns + ------- + arff_data_new : tuple + Subset of arff data with only the include columns indicated by the + include_columns argument. + """ + arff_data_new: ArffSparseDataType = (list(), list(), list()) + reindexed_columns = { + column_idx: array_idx for array_idx, column_idx in enumerate(include_columns) + } + for val, row_idx, col_idx in zip(arff_data[0], arff_data[1], arff_data[2]): + if col_idx in include_columns: + arff_data_new[0].append(val) + arff_data_new[1].append(row_idx) + arff_data_new[2].append(reindexed_columns[col_idx]) + return arff_data_new + + +def _sparse_data_to_array( + arff_data: ArffSparseDataType, include_columns: List +) -> np.ndarray: + # turns the sparse data back into an array (can't use toarray() function, + # as this does only work on numeric data) + num_obs = max(arff_data[1]) + 1 + y_shape = (num_obs, len(include_columns)) + reindexed_columns = { + column_idx: array_idx for array_idx, column_idx in enumerate(include_columns) + } + # TODO: improve for efficiency + y = np.empty(y_shape, dtype=np.float64) + for val, row_idx, col_idx in zip(arff_data[0], arff_data[1], arff_data[2]): + if col_idx in include_columns: + y[row_idx, reindexed_columns[col_idx]] = val + return y + + +def _post_process_frame(frame, feature_names, target_names): + """Post process a dataframe to select the desired columns in `X` and `y`. + + Parameters + ---------- + frame : dataframe + The dataframe to split into `X` and `y`. + + feature_names : list of str + The list of feature names to populate `X`. + + target_names : list of str + The list of target names to populate `y`. + + Returns + ------- + X : dataframe + The dataframe containing the features. + + y : {series, dataframe} or None + The series or dataframe containing the target. + """ + X = frame[feature_names] + if len(target_names) >= 2: + y = frame[target_names] + elif len(target_names) == 1: + y = frame[target_names[0]] + else: + y = None + return X, y + + +def _liac_arff_parser( + gzip_file, + output_arrays_type, + openml_columns_info, + feature_names_to_select, + target_names_to_select, + shape=None, +): + """ARFF parser using the LIAC-ARFF library coded purely in Python. + + This parser is quite slow but consumes a generator. Currently it is needed + to parse sparse datasets. For dense datasets, it is recommended to instead + use the pandas-based parser, although it does not always handles the + dtypes exactly the same. + + Parameters + ---------- + gzip_file : GzipFile instance + The file compressed to be read. + + output_arrays_type : {"numpy", "sparse", "pandas"} + The type of the arrays that will be returned. The possibilities ara: + + - `"numpy"`: both `X` and `y` will be NumPy arrays; + - `"sparse"`: `X` will be sparse matrix and `y` will be a NumPy array; + - `"pandas"`: `X` will be a pandas DataFrame and `y` will be either a + pandas Series or DataFrame. + + columns_info : dict + The information provided by OpenML regarding the columns of the ARFF + file. + + feature_names_to_select : list of str + A list of the feature names to be selected. + + target_names_to_select : list of str + A list of the target names to be selected. + + Returns + ------- + X : {ndarray, sparse matrix, dataframe} + The data matrix. + + y : {ndarray, dataframe, series} + The target. + + frame : dataframe or None + A dataframe containing both `X` and `y`. `None` if + `output_array_type != "pandas"`. + + categories : list of str or None + The names of the features that are categorical. `None` if + `output_array_type == "pandas"`. + """ + + def _io_to_generator(gzip_file): + for line in gzip_file: + yield line.decode("utf-8") + + stream = _io_to_generator(gzip_file) + + # find which type (dense or sparse) ARFF type we will have to deal with + return_type = _arff.COO if output_arrays_type == "sparse" else _arff.DENSE_GEN + # we should not let LIAC-ARFF to encode the nominal attributes with NumPy + # arrays to have only numerical values. + encode_nominal = not (output_arrays_type == "pandas") + arff_container = _arff.load( + stream, return_type=return_type, encode_nominal=encode_nominal + ) + columns_to_select = feature_names_to_select + target_names_to_select + + categories = { + name: cat + for name, cat in arff_container["attributes"] + if isinstance(cat, list) and name in columns_to_select + } + if output_arrays_type == "pandas": + pd = check_pandas_support("fetch_openml with as_frame=True") + + columns_info = OrderedDict(arff_container["attributes"]) + columns_names = list(columns_info.keys()) + + # calculate chunksize + first_row = next(arff_container["data"]) + first_df = pd.DataFrame([first_row], columns=columns_names, copy=False) + + row_bytes = first_df.memory_usage(deep=True).sum() + chunksize = get_chunk_n_rows(row_bytes) + + # read arff data with chunks + columns_to_keep = [col for col in columns_names if col in columns_to_select] + dfs = [first_df[columns_to_keep]] + for data in _chunk_generator(arff_container["data"], chunksize): + dfs.append( + pd.DataFrame(data, columns=columns_names, copy=False)[columns_to_keep] + ) + # dfs[0] contains only one row, which may not have enough data to infer to + # column's dtype. Here we use `dfs[1]` to configure the dtype in dfs[0] + if len(dfs) >= 2: + dfs[0] = dfs[0].astype(dfs[1].dtypes) + + # liac-arff parser does not depend on NumPy and uses None to represent + # missing values. To be consistent with the pandas parser, we replace + # None with np.nan. + frame = pd.concat(dfs, ignore_index=True) + frame = pd_fillna(pd, frame) + del dfs, first_df + + # cast the columns frame + dtypes = {} + for name in frame.columns: + column_dtype = openml_columns_info[name]["data_type"] + if column_dtype.lower() == "integer": + # Use a pandas extension array instead of np.int64 to be able + # to support missing values. + dtypes[name] = "Int64" + elif column_dtype.lower() == "nominal": + dtypes[name] = "category" + else: + dtypes[name] = frame.dtypes[name] + frame = frame.astype(dtypes) + + X, y = _post_process_frame( + frame, feature_names_to_select, target_names_to_select + ) + else: + arff_data = arff_container["data"] + + feature_indices_to_select = [ + int(openml_columns_info[col_name]["index"]) + for col_name in feature_names_to_select + ] + target_indices_to_select = [ + int(openml_columns_info[col_name]["index"]) + for col_name in target_names_to_select + ] + + if isinstance(arff_data, Generator): + if shape is None: + raise ValueError( + "shape must be provided when arr['data'] is a Generator" + ) + if shape[0] == -1: + count = -1 + else: + count = shape[0] * shape[1] + data = np.fromiter( + itertools.chain.from_iterable(arff_data), + dtype="float64", + count=count, + ) + data = data.reshape(*shape) + X = data[:, feature_indices_to_select] + y = data[:, target_indices_to_select] + elif isinstance(arff_data, tuple): + arff_data_X = _split_sparse_columns(arff_data, feature_indices_to_select) + num_obs = max(arff_data[1]) + 1 + X_shape = (num_obs, len(feature_indices_to_select)) + X = sp.sparse.coo_matrix( + (arff_data_X[0], (arff_data_X[1], arff_data_X[2])), + shape=X_shape, + dtype=np.float64, + ) + X = X.tocsr() + y = _sparse_data_to_array(arff_data, target_indices_to_select) + else: + # This should never happen + raise ValueError( + f"Unexpected type for data obtained from arff: {type(arff_data)}" + ) + + is_classification = { + col_name in categories for col_name in target_names_to_select + } + if not is_classification: + # No target + pass + elif all(is_classification): + y = np.hstack( + [ + np.take( + np.asarray(categories.pop(col_name), dtype="O"), + y[:, i : i + 1].astype(int, copy=False), + ) + for i, col_name in enumerate(target_names_to_select) + ] + ) + elif any(is_classification): + raise ValueError( + "Mix of nominal and non-nominal targets is not currently supported" + ) + + # reshape y back to 1-D array, if there is only 1 target column; + # back to None if there are not target columns + if y.shape[1] == 1: + y = y.reshape((-1,)) + elif y.shape[1] == 0: + y = None + + if output_arrays_type == "pandas": + return X, y, frame, None + return X, y, None, categories + + +def _pandas_arff_parser( + gzip_file, + output_arrays_type, + openml_columns_info, + feature_names_to_select, + target_names_to_select, + read_csv_kwargs=None, +): + """ARFF parser using `pandas.read_csv`. + + This parser uses the metadata fetched directly from OpenML and skips the metadata + headers of ARFF file itself. The data is loaded as a CSV file. + + Parameters + ---------- + gzip_file : GzipFile instance + The GZip compressed file with the ARFF formatted payload. + + output_arrays_type : {"numpy", "sparse", "pandas"} + The type of the arrays that will be returned. The possibilities are: + + - `"numpy"`: both `X` and `y` will be NumPy arrays; + - `"sparse"`: `X` will be sparse matrix and `y` will be a NumPy array; + - `"pandas"`: `X` will be a pandas DataFrame and `y` will be either a + pandas Series or DataFrame. + + openml_columns_info : dict + The information provided by OpenML regarding the columns of the ARFF + file. + + feature_names_to_select : list of str + A list of the feature names to be selected to build `X`. + + target_names_to_select : list of str + A list of the target names to be selected to build `y`. + + read_csv_kwargs : dict, default=None + Keyword arguments to pass to `pandas.read_csv`. It allows to overwrite + the default options. + + Returns + ------- + X : {ndarray, sparse matrix, dataframe} + The data matrix. + + y : {ndarray, dataframe, series} + The target. + + frame : dataframe or None + A dataframe containing both `X` and `y`. `None` if + `output_array_type != "pandas"`. + + categories : list of str or None + The names of the features that are categorical. `None` if + `output_array_type == "pandas"`. + """ + import pandas as pd + + # read the file until the data section to skip the ARFF metadata headers + for line in gzip_file: + if line.decode("utf-8").lower().startswith("@data"): + break + + dtypes = {} + for name in openml_columns_info: + column_dtype = openml_columns_info[name]["data_type"] + if column_dtype.lower() == "integer": + # Use Int64 to infer missing values from data + # XXX: this line is not covered by our tests. Is this really needed? + dtypes[name] = "Int64" + elif column_dtype.lower() == "nominal": + dtypes[name] = "category" + # since we will not pass `names` when reading the ARFF file, we need to translate + # `dtypes` from column names to column indices to pass to `pandas.read_csv` + dtypes_positional = { + col_idx: dtypes[name] + for col_idx, name in enumerate(openml_columns_info) + if name in dtypes + } + + default_read_csv_kwargs = { + "header": None, + "index_col": False, # always force pandas to not use the first column as index + "na_values": ["?"], # missing values are represented by `?` + "keep_default_na": False, # only `?` is a missing value given the ARFF specs + "comment": "%", # skip line starting by `%` since they are comments + "quotechar": '"', # delimiter to use for quoted strings + "skipinitialspace": True, # skip spaces after delimiter to follow ARFF specs + "escapechar": "\\", + "dtype": dtypes_positional, + } + read_csv_kwargs = {**default_read_csv_kwargs, **(read_csv_kwargs or {})} + frame = pd.read_csv(gzip_file, **read_csv_kwargs) + try: + # Setting the columns while reading the file will select the N first columns + # and not raise a ParserError. Instead, we set the columns after reading the + # file and raise a ParserError if the number of columns does not match the + # number of columns in the metadata given by OpenML. + frame.columns = [name for name in openml_columns_info] + except ValueError as exc: + raise pd.errors.ParserError( + "The number of columns provided by OpenML does not match the number of " + "columns inferred by pandas when reading the file." + ) from exc + + columns_to_select = feature_names_to_select + target_names_to_select + columns_to_keep = [col for col in frame.columns if col in columns_to_select] + frame = frame[columns_to_keep] + + # `pd.read_csv` automatically handles double quotes for quoting non-numeric + # CSV cell values. Contrary to LIAC-ARFF, `pd.read_csv` cannot be configured to + # consider either single quotes and double quotes as valid quoting chars at + # the same time since this case does not occur in regular (non-ARFF) CSV files. + # To mimic the behavior of LIAC-ARFF parser, we manually strip single quotes + # on categories as a post-processing steps if needed. + # + # Note however that we intentionally do not attempt to do this kind of manual + # post-processing of (non-categorical) string-typed columns because we cannot + # resolve the ambiguity of the case of CSV cell with nesting quoting such as + # `"'some string value'"` with pandas. + single_quote_pattern = re.compile(r"^'(?P.*)'$") + + def strip_single_quotes(input_string): + match = re.search(single_quote_pattern, input_string) + if match is None: + return input_string + + return match.group("contents") + + categorical_columns = [ + name + for name, dtype in frame.dtypes.items() + if isinstance(dtype, pd.CategoricalDtype) + ] + for col in categorical_columns: + frame[col] = frame[col].cat.rename_categories(strip_single_quotes) + + X, y = _post_process_frame(frame, feature_names_to_select, target_names_to_select) + + if output_arrays_type == "pandas": + return X, y, frame, None + else: + X, y = X.to_numpy(), y.to_numpy() + + categories = { + name: dtype.categories.tolist() + for name, dtype in frame.dtypes.items() + if isinstance(dtype, pd.CategoricalDtype) + } + return X, y, None, categories + + +def load_arff_from_gzip_file( + gzip_file, + parser, + output_type, + openml_columns_info, + feature_names_to_select, + target_names_to_select, + shape=None, + read_csv_kwargs=None, +): + """Load a compressed ARFF file using a given parser. + + Parameters + ---------- + gzip_file : GzipFile instance + The file compressed to be read. + + parser : {"pandas", "liac-arff"} + The parser used to parse the ARFF file. "pandas" is recommended + but only supports loading dense datasets. + + output_type : {"numpy", "sparse", "pandas"} + The type of the arrays that will be returned. The possibilities ara: + + - `"numpy"`: both `X` and `y` will be NumPy arrays; + - `"sparse"`: `X` will be sparse matrix and `y` will be a NumPy array; + - `"pandas"`: `X` will be a pandas DataFrame and `y` will be either a + pandas Series or DataFrame. + + openml_columns_info : dict + The information provided by OpenML regarding the columns of the ARFF + file. + + feature_names_to_select : list of str + A list of the feature names to be selected. + + target_names_to_select : list of str + A list of the target names to be selected. + + read_csv_kwargs : dict, default=None + Keyword arguments to pass to `pandas.read_csv`. It allows to overwrite + the default options. + + Returns + ------- + X : {ndarray, sparse matrix, dataframe} + The data matrix. + + y : {ndarray, dataframe, series} + The target. + + frame : dataframe or None + A dataframe containing both `X` and `y`. `None` if + `output_array_type != "pandas"`. + + categories : list of str or None + The names of the features that are categorical. `None` if + `output_array_type == "pandas"`. + """ + if parser == "liac-arff": + return _liac_arff_parser( + gzip_file, + output_type, + openml_columns_info, + feature_names_to_select, + target_names_to_select, + shape, + ) + elif parser == "pandas": + return _pandas_arff_parser( + gzip_file, + output_type, + openml_columns_info, + feature_names_to_select, + target_names_to_select, + read_csv_kwargs, + ) + else: + raise ValueError( + f"Unknown parser: '{parser}'. Should be 'liac-arff' or 'pandas'." + ) diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/_base.py b/venv/lib/python3.10/site-packages/sklearn/datasets/_base.py new file mode 100644 index 0000000000000000000000000000000000000000..e055b47ab13a2edceb0027407519ab4fc0dc8766 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/datasets/_base.py @@ -0,0 +1,1441 @@ +""" +Base IO code for all datasets +""" + +# Copyright (c) 2007 David Cournapeau +# 2010 Fabian Pedregosa +# 2010 Olivier Grisel +# License: BSD 3 clause +import csv +import gzip +import hashlib +import os +import shutil +from collections import namedtuple +from importlib import resources +from numbers import Integral +from os import environ, listdir, makedirs +from os.path import expanduser, isdir, join, splitext +from pathlib import Path +from urllib.request import urlretrieve + +import numpy as np + +from ..preprocessing import scale +from ..utils import Bunch, check_pandas_support, check_random_state +from ..utils._param_validation import Interval, StrOptions, validate_params + +DATA_MODULE = "sklearn.datasets.data" +DESCR_MODULE = "sklearn.datasets.descr" +IMAGES_MODULE = "sklearn.datasets.images" + +RemoteFileMetadata = namedtuple("RemoteFileMetadata", ["filename", "url", "checksum"]) + + +@validate_params( + { + "data_home": [str, os.PathLike, None], + }, + prefer_skip_nested_validation=True, +) +def get_data_home(data_home=None) -> str: + """Return the path of the scikit-learn data directory. + + This folder is used by some large dataset loaders to avoid downloading the + data several times. + + By default the data directory is set to a folder named 'scikit_learn_data' in the + user home folder. + + Alternatively, it can be set by the 'SCIKIT_LEARN_DATA' environment + variable or programmatically by giving an explicit folder path. The '~' + symbol is expanded to the user home folder. + + If the folder does not already exist, it is automatically created. + + Parameters + ---------- + data_home : str or path-like, default=None + The path to scikit-learn data directory. If `None`, the default path + is `~/scikit_learn_data`. + + Returns + ------- + data_home: str + The path to scikit-learn data directory. + """ + if data_home is None: + data_home = environ.get("SCIKIT_LEARN_DATA", join("~", "scikit_learn_data")) + data_home = expanduser(data_home) + makedirs(data_home, exist_ok=True) + return data_home + + +@validate_params( + { + "data_home": [str, os.PathLike, None], + }, + prefer_skip_nested_validation=True, +) +def clear_data_home(data_home=None): + """Delete all the content of the data home cache. + + Parameters + ---------- + data_home : str or path-like, default=None + The path to scikit-learn data directory. If `None`, the default path + is `~/scikit_learn_data`. + + Examples + -------- + >>> from sklearn.datasets import clear_data_home + >>> clear_data_home() # doctest: +SKIP + """ + data_home = get_data_home(data_home) + shutil.rmtree(data_home) + + +def _convert_data_dataframe( + caller_name, data, target, feature_names, target_names, sparse_data=False +): + pd = check_pandas_support("{} with as_frame=True".format(caller_name)) + if not sparse_data: + data_df = pd.DataFrame(data, columns=feature_names, copy=False) + else: + data_df = pd.DataFrame.sparse.from_spmatrix(data, columns=feature_names) + + target_df = pd.DataFrame(target, columns=target_names) + combined_df = pd.concat([data_df, target_df], axis=1) + X = combined_df[feature_names] + y = combined_df[target_names] + if y.shape[1] == 1: + y = y.iloc[:, 0] + return combined_df, X, y + + +@validate_params( + { + "container_path": [str, os.PathLike], + "description": [str, None], + "categories": [list, None], + "load_content": ["boolean"], + "shuffle": ["boolean"], + "encoding": [str, None], + "decode_error": [StrOptions({"strict", "ignore", "replace"})], + "random_state": ["random_state"], + "allowed_extensions": [list, None], + }, + prefer_skip_nested_validation=True, +) +def load_files( + container_path, + *, + description=None, + categories=None, + load_content=True, + shuffle=True, + encoding=None, + decode_error="strict", + random_state=0, + allowed_extensions=None, +): + """Load text files with categories as subfolder names. + + Individual samples are assumed to be files stored a two levels folder + structure such as the following: + + container_folder/ + category_1_folder/ + file_1.txt + file_2.txt + ... + file_42.txt + category_2_folder/ + file_43.txt + file_44.txt + ... + + The folder names are used as supervised signal label names. The individual + file names are not important. + + This function does not try to extract features into a numpy array or scipy + sparse matrix. In addition, if load_content is false it does not try to + load the files in memory. + + To use text files in a scikit-learn classification or clustering algorithm, + you will need to use the :mod:`~sklearn.feature_extraction.text` module to + build a feature extraction transformer that suits your problem. + + If you set load_content=True, you should also specify the encoding of the + text using the 'encoding' parameter. For many modern text files, 'utf-8' + will be the correct encoding. If you leave encoding equal to None, then the + content will be made of bytes instead of Unicode, and you will not be able + to use most functions in :mod:`~sklearn.feature_extraction.text`. + + Similar feature extractors should be built for other kind of unstructured + data input such as images, audio, video, ... + + If you want files with a specific file extension (e.g. `.txt`) then you + can pass a list of those file extensions to `allowed_extensions`. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + container_path : str + Path to the main folder holding one subfolder per category. + + description : str, default=None + A paragraph describing the characteristic of the dataset: its source, + reference, etc. + + categories : list of str, default=None + If None (default), load all the categories. If not None, list of + category names to load (other categories ignored). + + load_content : bool, default=True + Whether to load or not the content of the different files. If true a + 'data' attribute containing the text information is present in the data + structure returned. If not, a filenames attribute gives the path to the + files. + + shuffle : bool, default=True + Whether or not to shuffle the data: might be important for models that + make the assumption that the samples are independent and identically + distributed (i.i.d.), such as stochastic gradient descent. + + encoding : str, default=None + If None, do not try to decode the content of the files (e.g. for images + or other non-text content). If not None, encoding to use to decode text + files to Unicode if load_content is True. + + decode_error : {'strict', 'ignore', 'replace'}, default='strict' + Instruction on what to do if a byte sequence is given to analyze that + contains characters not of the given `encoding`. Passed as keyword + argument 'errors' to bytes.decode. + + random_state : int, RandomState instance or None, default=0 + Determines random number generation for dataset shuffling. Pass an int + for reproducible output across multiple function calls. + See :term:`Glossary `. + + allowed_extensions : list of str, default=None + List of desired file extensions to filter the files to be loaded. + + Returns + ------- + data : :class:`~sklearn.utils.Bunch` + Dictionary-like object, with the following attributes. + + data : list of str + Only present when `load_content=True`. + The raw text data to learn. + target : ndarray + The target labels (integer index). + target_names : list + The names of target classes. + DESCR : str + The full description of the dataset. + filenames: ndarray + The filenames holding the dataset. + + Examples + -------- + >>> from sklearn.datasets import load_files + >>> container_path = "./" + >>> load_files(container_path) # doctest: +SKIP + """ + + target = [] + target_names = [] + filenames = [] + + folders = [ + f for f in sorted(listdir(container_path)) if isdir(join(container_path, f)) + ] + + if categories is not None: + folders = [f for f in folders if f in categories] + + if allowed_extensions is not None: + allowed_extensions = frozenset(allowed_extensions) + + for label, folder in enumerate(folders): + target_names.append(folder) + folder_path = join(container_path, folder) + files = sorted(listdir(folder_path)) + if allowed_extensions is not None: + documents = [ + join(folder_path, file) + for file in files + if os.path.splitext(file)[1] in allowed_extensions + ] + else: + documents = [join(folder_path, file) for file in files] + target.extend(len(documents) * [label]) + filenames.extend(documents) + + # convert to array for fancy indexing + filenames = np.array(filenames) + target = np.array(target) + + if shuffle: + random_state = check_random_state(random_state) + indices = np.arange(filenames.shape[0]) + random_state.shuffle(indices) + filenames = filenames[indices] + target = target[indices] + + if load_content: + data = [] + for filename in filenames: + data.append(Path(filename).read_bytes()) + if encoding is not None: + data = [d.decode(encoding, decode_error) for d in data] + return Bunch( + data=data, + filenames=filenames, + target_names=target_names, + target=target, + DESCR=description, + ) + + return Bunch( + filenames=filenames, target_names=target_names, target=target, DESCR=description + ) + + +def load_csv_data( + data_file_name, + *, + data_module=DATA_MODULE, + descr_file_name=None, + descr_module=DESCR_MODULE, + encoding="utf-8", +): + """Loads `data_file_name` from `data_module with `importlib.resources`. + + Parameters + ---------- + data_file_name : str + Name of csv file to be loaded from `data_module/data_file_name`. + For example `'wine_data.csv'`. + + data_module : str or module, default='sklearn.datasets.data' + Module where data lives. The default is `'sklearn.datasets.data'`. + + descr_file_name : str, default=None + Name of rst file to be loaded from `descr_module/descr_file_name`. + For example `'wine_data.rst'`. See also :func:`load_descr`. + If not None, also returns the corresponding description of + the dataset. + + descr_module : str or module, default='sklearn.datasets.descr' + Module where `descr_file_name` lives. See also :func:`load_descr`. + The default is `'sklearn.datasets.descr'`. + + Returns + ------- + data : ndarray of shape (n_samples, n_features) + A 2D array with each row representing one sample and each column + representing the features of a given sample. + + target : ndarry of shape (n_samples,) + A 1D array holding target variables for all the samples in `data`. + For example target[0] is the target variable for data[0]. + + target_names : ndarry of shape (n_samples,) + A 1D array containing the names of the classifications. For example + target_names[0] is the name of the target[0] class. + + descr : str, optional + Description of the dataset (the content of `descr_file_name`). + Only returned if `descr_file_name` is not None. + + encoding : str, optional + Text encoding of the CSV file. + + .. versionadded:: 1.4 + """ + data_path = resources.files(data_module) / data_file_name + with data_path.open("r", encoding="utf-8") as csv_file: + data_file = csv.reader(csv_file) + temp = next(data_file) + n_samples = int(temp[0]) + n_features = int(temp[1]) + target_names = np.array(temp[2:]) + data = np.empty((n_samples, n_features)) + target = np.empty((n_samples,), dtype=int) + + for i, ir in enumerate(data_file): + data[i] = np.asarray(ir[:-1], dtype=np.float64) + target[i] = np.asarray(ir[-1], dtype=int) + + if descr_file_name is None: + return data, target, target_names + else: + assert descr_module is not None + descr = load_descr(descr_module=descr_module, descr_file_name=descr_file_name) + return data, target, target_names, descr + + +def load_gzip_compressed_csv_data( + data_file_name, + *, + data_module=DATA_MODULE, + descr_file_name=None, + descr_module=DESCR_MODULE, + encoding="utf-8", + **kwargs, +): + """Loads gzip-compressed with `importlib.resources`. + + 1) Open resource file with `importlib.resources.open_binary` + 2) Decompress file obj with `gzip.open` + 3) Load decompressed data with `np.loadtxt` + + Parameters + ---------- + data_file_name : str + Name of gzip-compressed csv file (`'*.csv.gz'`) to be loaded from + `data_module/data_file_name`. For example `'diabetes_data.csv.gz'`. + + data_module : str or module, default='sklearn.datasets.data' + Module where data lives. The default is `'sklearn.datasets.data'`. + + descr_file_name : str, default=None + Name of rst file to be loaded from `descr_module/descr_file_name`. + For example `'wine_data.rst'`. See also :func:`load_descr`. + If not None, also returns the corresponding description of + the dataset. + + descr_module : str or module, default='sklearn.datasets.descr' + Module where `descr_file_name` lives. See also :func:`load_descr`. + The default is `'sklearn.datasets.descr'`. + + encoding : str, default="utf-8" + Name of the encoding that the gzip-decompressed file will be + decoded with. The default is 'utf-8'. + + **kwargs : dict, optional + Keyword arguments to be passed to `np.loadtxt`; + e.g. delimiter=','. + + Returns + ------- + data : ndarray of shape (n_samples, n_features) + A 2D array with each row representing one sample and each column + representing the features and/or target of a given sample. + + descr : str, optional + Description of the dataset (the content of `descr_file_name`). + Only returned if `descr_file_name` is not None. + """ + data_path = resources.files(data_module) / data_file_name + with data_path.open("rb") as compressed_file: + compressed_file = gzip.open(compressed_file, mode="rt", encoding=encoding) + data = np.loadtxt(compressed_file, **kwargs) + + if descr_file_name is None: + return data + else: + assert descr_module is not None + descr = load_descr(descr_module=descr_module, descr_file_name=descr_file_name) + return data, descr + + +def load_descr(descr_file_name, *, descr_module=DESCR_MODULE, encoding="utf-8"): + """Load `descr_file_name` from `descr_module` with `importlib.resources`. + + Parameters + ---------- + descr_file_name : str, default=None + Name of rst file to be loaded from `descr_module/descr_file_name`. + For example `'wine_data.rst'`. See also :func:`load_descr`. + If not None, also returns the corresponding description of + the dataset. + + descr_module : str or module, default='sklearn.datasets.descr' + Module where `descr_file_name` lives. See also :func:`load_descr`. + The default is `'sklearn.datasets.descr'`. + + encoding : str, default="utf-8" + Name of the encoding that `descr_file_name` will be decoded with. + The default is 'utf-8'. + + .. versionadded:: 1.4 + + Returns + ------- + fdescr : str + Content of `descr_file_name`. + """ + path = resources.files(descr_module) / descr_file_name + return path.read_text(encoding=encoding) + + +@validate_params( + { + "return_X_y": ["boolean"], + "as_frame": ["boolean"], + }, + prefer_skip_nested_validation=True, +) +def load_wine(*, return_X_y=False, as_frame=False): + """Load and return the wine dataset (classification). + + .. versionadded:: 0.18 + + The wine dataset is a classic and very easy multi-class classification + dataset. + + ================= ============== + Classes 3 + Samples per class [59,71,48] + Samples total 178 + Dimensionality 13 + Features real, positive + ================= ============== + + The copy of UCI ML Wine Data Set dataset is downloaded and modified to fit + standard format from: + https://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + return_X_y : bool, default=False + If True, returns ``(data, target)`` instead of a Bunch object. + See below for more information about the `data` and `target` object. + + as_frame : bool, default=False + If True, the data is a pandas DataFrame including columns with + appropriate dtypes (numeric). The target is + a pandas DataFrame or Series depending on the number of target columns. + If `return_X_y` is True, then (`data`, `target`) will be pandas + DataFrames or Series as described below. + + .. versionadded:: 0.23 + + Returns + ------- + data : :class:`~sklearn.utils.Bunch` + Dictionary-like object, with the following attributes. + + data : {ndarray, dataframe} of shape (178, 13) + The data matrix. If `as_frame=True`, `data` will be a pandas + DataFrame. + target: {ndarray, Series} of shape (178,) + The classification target. If `as_frame=True`, `target` will be + a pandas Series. + feature_names: list + The names of the dataset columns. + target_names: list + The names of target classes. + frame: DataFrame of shape (178, 14) + Only present when `as_frame=True`. DataFrame with `data` and + `target`. + + .. versionadded:: 0.23 + DESCR: str + The full description of the dataset. + + (data, target) : tuple if ``return_X_y`` is True + A tuple of two ndarrays by default. The first contains a 2D array of shape + (178, 13) with each row representing one sample and each column representing + the features. The second array of shape (178,) contains the target samples. + + Examples + -------- + Let's say you are interested in the samples 10, 80, and 140, and want to + know their class name. + + >>> from sklearn.datasets import load_wine + >>> data = load_wine() + >>> data.target[[10, 80, 140]] + array([0, 1, 2]) + >>> list(data.target_names) + ['class_0', 'class_1', 'class_2'] + """ + + data, target, target_names, fdescr = load_csv_data( + data_file_name="wine_data.csv", descr_file_name="wine_data.rst" + ) + + feature_names = [ + "alcohol", + "malic_acid", + "ash", + "alcalinity_of_ash", + "magnesium", + "total_phenols", + "flavanoids", + "nonflavanoid_phenols", + "proanthocyanins", + "color_intensity", + "hue", + "od280/od315_of_diluted_wines", + "proline", + ] + + frame = None + target_columns = [ + "target", + ] + if as_frame: + frame, data, target = _convert_data_dataframe( + "load_wine", data, target, feature_names, target_columns + ) + + if return_X_y: + return data, target + + return Bunch( + data=data, + target=target, + frame=frame, + target_names=target_names, + DESCR=fdescr, + feature_names=feature_names, + ) + + +@validate_params( + {"return_X_y": ["boolean"], "as_frame": ["boolean"]}, + prefer_skip_nested_validation=True, +) +def load_iris(*, return_X_y=False, as_frame=False): + """Load and return the iris dataset (classification). + + The iris dataset is a classic and very easy multi-class classification + dataset. + + ================= ============== + Classes 3 + Samples per class 50 + Samples total 150 + Dimensionality 4 + Features real, positive + ================= ============== + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + return_X_y : bool, default=False + If True, returns ``(data, target)`` instead of a Bunch object. See + below for more information about the `data` and `target` object. + + .. versionadded:: 0.18 + + as_frame : bool, default=False + If True, the data is a pandas DataFrame including columns with + appropriate dtypes (numeric). The target is + a pandas DataFrame or Series depending on the number of target columns. + If `return_X_y` is True, then (`data`, `target`) will be pandas + DataFrames or Series as described below. + + .. versionadded:: 0.23 + + Returns + ------- + data : :class:`~sklearn.utils.Bunch` + Dictionary-like object, with the following attributes. + + data : {ndarray, dataframe} of shape (150, 4) + The data matrix. If `as_frame=True`, `data` will be a pandas + DataFrame. + target: {ndarray, Series} of shape (150,) + The classification target. If `as_frame=True`, `target` will be + a pandas Series. + feature_names: list + The names of the dataset columns. + target_names: list + The names of target classes. + frame: DataFrame of shape (150, 5) + Only present when `as_frame=True`. DataFrame with `data` and + `target`. + + .. versionadded:: 0.23 + DESCR: str + The full description of the dataset. + filename: str + The path to the location of the data. + + .. versionadded:: 0.20 + + (data, target) : tuple if ``return_X_y`` is True + A tuple of two ndarray. The first containing a 2D array of shape + (n_samples, n_features) with each row representing one sample and + each column representing the features. The second ndarray of shape + (n_samples,) containing the target samples. + + .. versionadded:: 0.18 + + Notes + ----- + .. versionchanged:: 0.20 + Fixed two wrong data points according to Fisher's paper. + The new version is the same as in R, but not as in the UCI + Machine Learning Repository. + + Examples + -------- + Let's say you are interested in the samples 10, 25, and 50, and want to + know their class name. + + >>> from sklearn.datasets import load_iris + >>> data = load_iris() + >>> data.target[[10, 25, 50]] + array([0, 0, 1]) + >>> list(data.target_names) + ['setosa', 'versicolor', 'virginica'] + + See :ref:`sphx_glr_auto_examples_datasets_plot_iris_dataset.py` for a more + detailed example of how to work with the iris dataset. + """ + data_file_name = "iris.csv" + data, target, target_names, fdescr = load_csv_data( + data_file_name=data_file_name, descr_file_name="iris.rst" + ) + + feature_names = [ + "sepal length (cm)", + "sepal width (cm)", + "petal length (cm)", + "petal width (cm)", + ] + + frame = None + target_columns = [ + "target", + ] + if as_frame: + frame, data, target = _convert_data_dataframe( + "load_iris", data, target, feature_names, target_columns + ) + + if return_X_y: + return data, target + + return Bunch( + data=data, + target=target, + frame=frame, + target_names=target_names, + DESCR=fdescr, + feature_names=feature_names, + filename=data_file_name, + data_module=DATA_MODULE, + ) + + +@validate_params( + {"return_X_y": ["boolean"], "as_frame": ["boolean"]}, + prefer_skip_nested_validation=True, +) +def load_breast_cancer(*, return_X_y=False, as_frame=False): + """Load and return the breast cancer wisconsin dataset (classification). + + The breast cancer dataset is a classic and very easy binary classification + dataset. + + ================= ============== + Classes 2 + Samples per class 212(M),357(B) + Samples total 569 + Dimensionality 30 + Features real, positive + ================= ============== + + The copy of UCI ML Breast Cancer Wisconsin (Diagnostic) dataset is + downloaded from: + https://archive.ics.uci.edu/dataset/17/breast+cancer+wisconsin+diagnostic + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + return_X_y : bool, default=False + If True, returns ``(data, target)`` instead of a Bunch object. + See below for more information about the `data` and `target` object. + + .. versionadded:: 0.18 + + as_frame : bool, default=False + If True, the data is a pandas DataFrame including columns with + appropriate dtypes (numeric). The target is + a pandas DataFrame or Series depending on the number of target columns. + If `return_X_y` is True, then (`data`, `target`) will be pandas + DataFrames or Series as described below. + + .. versionadded:: 0.23 + + Returns + ------- + data : :class:`~sklearn.utils.Bunch` + Dictionary-like object, with the following attributes. + + data : {ndarray, dataframe} of shape (569, 30) + The data matrix. If `as_frame=True`, `data` will be a pandas + DataFrame. + target : {ndarray, Series} of shape (569,) + The classification target. If `as_frame=True`, `target` will be + a pandas Series. + feature_names : ndarray of shape (30,) + The names of the dataset columns. + target_names : ndarray of shape (2,) + The names of target classes. + frame : DataFrame of shape (569, 31) + Only present when `as_frame=True`. DataFrame with `data` and + `target`. + + .. versionadded:: 0.23 + DESCR : str + The full description of the dataset. + filename : str + The path to the location of the data. + + .. versionadded:: 0.20 + + (data, target) : tuple if ``return_X_y`` is True + A tuple of two ndarrays by default. The first contains a 2D ndarray of + shape (569, 30) with each row representing one sample and each column + representing the features. The second ndarray of shape (569,) contains + the target samples. If `as_frame=True`, both arrays are pandas objects, + i.e. `X` a dataframe and `y` a series. + + .. versionadded:: 0.18 + + Examples + -------- + Let's say you are interested in the samples 10, 50, and 85, and want to + know their class name. + + >>> from sklearn.datasets import load_breast_cancer + >>> data = load_breast_cancer() + >>> data.target[[10, 50, 85]] + array([0, 1, 0]) + >>> list(data.target_names) + ['malignant', 'benign'] + """ + data_file_name = "breast_cancer.csv" + data, target, target_names, fdescr = load_csv_data( + data_file_name=data_file_name, descr_file_name="breast_cancer.rst" + ) + + feature_names = np.array( + [ + "mean radius", + "mean texture", + "mean perimeter", + "mean area", + "mean smoothness", + "mean compactness", + "mean concavity", + "mean concave points", + "mean symmetry", + "mean fractal dimension", + "radius error", + "texture error", + "perimeter error", + "area error", + "smoothness error", + "compactness error", + "concavity error", + "concave points error", + "symmetry error", + "fractal dimension error", + "worst radius", + "worst texture", + "worst perimeter", + "worst area", + "worst smoothness", + "worst compactness", + "worst concavity", + "worst concave points", + "worst symmetry", + "worst fractal dimension", + ] + ) + + frame = None + target_columns = [ + "target", + ] + if as_frame: + frame, data, target = _convert_data_dataframe( + "load_breast_cancer", data, target, feature_names, target_columns + ) + + if return_X_y: + return data, target + + return Bunch( + data=data, + target=target, + frame=frame, + target_names=target_names, + DESCR=fdescr, + feature_names=feature_names, + filename=data_file_name, + data_module=DATA_MODULE, + ) + + +@validate_params( + { + "n_class": [Interval(Integral, 1, 10, closed="both")], + "return_X_y": ["boolean"], + "as_frame": ["boolean"], + }, + prefer_skip_nested_validation=True, +) +def load_digits(*, n_class=10, return_X_y=False, as_frame=False): + """Load and return the digits dataset (classification). + + Each datapoint is a 8x8 image of a digit. + + ================= ============== + Classes 10 + Samples per class ~180 + Samples total 1797 + Dimensionality 64 + Features integers 0-16 + ================= ============== + + This is a copy of the test set of the UCI ML hand-written digits datasets + https://archive.ics.uci.edu/ml/datasets/Optical+Recognition+of+Handwritten+Digits + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_class : int, default=10 + The number of classes to return. Between 0 and 10. + + return_X_y : bool, default=False + If True, returns ``(data, target)`` instead of a Bunch object. + See below for more information about the `data` and `target` object. + + .. versionadded:: 0.18 + + as_frame : bool, default=False + If True, the data is a pandas DataFrame including columns with + appropriate dtypes (numeric). The target is + a pandas DataFrame or Series depending on the number of target columns. + If `return_X_y` is True, then (`data`, `target`) will be pandas + DataFrames or Series as described below. + + .. versionadded:: 0.23 + + Returns + ------- + data : :class:`~sklearn.utils.Bunch` + Dictionary-like object, with the following attributes. + + data : {ndarray, dataframe} of shape (1797, 64) + The flattened data matrix. If `as_frame=True`, `data` will be + a pandas DataFrame. + target: {ndarray, Series} of shape (1797,) + The classification target. If `as_frame=True`, `target` will be + a pandas Series. + feature_names: list + The names of the dataset columns. + target_names: list + The names of target classes. + + .. versionadded:: 0.20 + + frame: DataFrame of shape (1797, 65) + Only present when `as_frame=True`. DataFrame with `data` and + `target`. + + .. versionadded:: 0.23 + images: {ndarray} of shape (1797, 8, 8) + The raw image data. + DESCR: str + The full description of the dataset. + + (data, target) : tuple if ``return_X_y`` is True + A tuple of two ndarrays by default. The first contains a 2D ndarray of + shape (1797, 64) with each row representing one sample and each column + representing the features. The second ndarray of shape (1797) contains + the target samples. If `as_frame=True`, both arrays are pandas objects, + i.e. `X` a dataframe and `y` a series. + + .. versionadded:: 0.18 + + Examples + -------- + To load the data and visualize the images:: + + >>> from sklearn.datasets import load_digits + >>> digits = load_digits() + >>> print(digits.data.shape) + (1797, 64) + >>> import matplotlib.pyplot as plt + >>> plt.gray() + >>> plt.matshow(digits.images[0]) + <...> + >>> plt.show() + """ + + data, fdescr = load_gzip_compressed_csv_data( + data_file_name="digits.csv.gz", descr_file_name="digits.rst", delimiter="," + ) + + target = data[:, -1].astype(int, copy=False) + flat_data = data[:, :-1] + images = flat_data.view() + images.shape = (-1, 8, 8) + + if n_class < 10: + idx = target < n_class + flat_data, target = flat_data[idx], target[idx] + images = images[idx] + + feature_names = [ + "pixel_{}_{}".format(row_idx, col_idx) + for row_idx in range(8) + for col_idx in range(8) + ] + + frame = None + target_columns = [ + "target", + ] + if as_frame: + frame, flat_data, target = _convert_data_dataframe( + "load_digits", flat_data, target, feature_names, target_columns + ) + + if return_X_y: + return flat_data, target + + return Bunch( + data=flat_data, + target=target, + frame=frame, + feature_names=feature_names, + target_names=np.arange(10), + images=images, + DESCR=fdescr, + ) + + +@validate_params( + {"return_X_y": ["boolean"], "as_frame": ["boolean"], "scaled": ["boolean"]}, + prefer_skip_nested_validation=True, +) +def load_diabetes(*, return_X_y=False, as_frame=False, scaled=True): + """Load and return the diabetes dataset (regression). + + ============== ================== + Samples total 442 + Dimensionality 10 + Features real, -.2 < x < .2 + Targets integer 25 - 346 + ============== ================== + + .. note:: + The meaning of each feature (i.e. `feature_names`) might be unclear + (especially for `ltg`) as the documentation of the original dataset is + not explicit. We provide information that seems correct in regard with + the scientific literature in this field of research. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + return_X_y : bool, default=False + If True, returns ``(data, target)`` instead of a Bunch object. + See below for more information about the `data` and `target` object. + + .. versionadded:: 0.18 + + as_frame : bool, default=False + If True, the data is a pandas DataFrame including columns with + appropriate dtypes (numeric). The target is + a pandas DataFrame or Series depending on the number of target columns. + If `return_X_y` is True, then (`data`, `target`) will be pandas + DataFrames or Series as described below. + + .. versionadded:: 0.23 + + scaled : bool, default=True + If True, the feature variables are mean centered and scaled by the + standard deviation times the square root of `n_samples`. + If False, raw data is returned for the feature variables. + + .. versionadded:: 1.1 + + Returns + ------- + data : :class:`~sklearn.utils.Bunch` + Dictionary-like object, with the following attributes. + + data : {ndarray, dataframe} of shape (442, 10) + The data matrix. If `as_frame=True`, `data` will be a pandas + DataFrame. + target: {ndarray, Series} of shape (442,) + The regression target. If `as_frame=True`, `target` will be + a pandas Series. + feature_names: list + The names of the dataset columns. + frame: DataFrame of shape (442, 11) + Only present when `as_frame=True`. DataFrame with `data` and + `target`. + + .. versionadded:: 0.23 + DESCR: str + The full description of the dataset. + data_filename: str + The path to the location of the data. + target_filename: str + The path to the location of the target. + + (data, target) : tuple if ``return_X_y`` is True + Returns a tuple of two ndarray of shape (n_samples, n_features) + A 2D array with each row representing one sample and each column + representing the features and/or target of a given sample. + + .. versionadded:: 0.18 + + Examples + -------- + >>> from sklearn.datasets import load_diabetes + >>> diabetes = load_diabetes() + >>> diabetes.target[:3] + array([151., 75., 141.]) + >>> diabetes.data.shape + (442, 10) + """ + data_filename = "diabetes_data_raw.csv.gz" + target_filename = "diabetes_target.csv.gz" + data = load_gzip_compressed_csv_data(data_filename) + target = load_gzip_compressed_csv_data(target_filename) + + if scaled: + data = scale(data, copy=False) + data /= data.shape[0] ** 0.5 + + fdescr = load_descr("diabetes.rst") + + feature_names = ["age", "sex", "bmi", "bp", "s1", "s2", "s3", "s4", "s5", "s6"] + + frame = None + target_columns = [ + "target", + ] + if as_frame: + frame, data, target = _convert_data_dataframe( + "load_diabetes", data, target, feature_names, target_columns + ) + + if return_X_y: + return data, target + + return Bunch( + data=data, + target=target, + frame=frame, + DESCR=fdescr, + feature_names=feature_names, + data_filename=data_filename, + target_filename=target_filename, + data_module=DATA_MODULE, + ) + + +@validate_params( + { + "return_X_y": ["boolean"], + "as_frame": ["boolean"], + }, + prefer_skip_nested_validation=True, +) +def load_linnerud(*, return_X_y=False, as_frame=False): + """Load and return the physical exercise Linnerud dataset. + + This dataset is suitable for multi-output regression tasks. + + ============== ============================ + Samples total 20 + Dimensionality 3 (for both data and target) + Features integer + Targets integer + ============== ============================ + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + return_X_y : bool, default=False + If True, returns ``(data, target)`` instead of a Bunch object. + See below for more information about the `data` and `target` object. + + .. versionadded:: 0.18 + + as_frame : bool, default=False + If True, the data is a pandas DataFrame including columns with + appropriate dtypes (numeric, string or categorical). The target is + a pandas DataFrame or Series depending on the number of target columns. + If `return_X_y` is True, then (`data`, `target`) will be pandas + DataFrames or Series as described below. + + .. versionadded:: 0.23 + + Returns + ------- + data : :class:`~sklearn.utils.Bunch` + Dictionary-like object, with the following attributes. + + data : {ndarray, dataframe} of shape (20, 3) + The data matrix. If `as_frame=True`, `data` will be a pandas + DataFrame. + target: {ndarray, dataframe} of shape (20, 3) + The regression targets. If `as_frame=True`, `target` will be + a pandas DataFrame. + feature_names: list + The names of the dataset columns. + target_names: list + The names of the target columns. + frame: DataFrame of shape (20, 6) + Only present when `as_frame=True`. DataFrame with `data` and + `target`. + + .. versionadded:: 0.23 + DESCR: str + The full description of the dataset. + data_filename: str + The path to the location of the data. + target_filename: str + The path to the location of the target. + + .. versionadded:: 0.20 + + (data, target) : tuple if ``return_X_y`` is True + Returns a tuple of two ndarrays or dataframe of shape + `(20, 3)`. Each row represents one sample and each column represents the + features in `X` and a target in `y` of a given sample. + + .. versionadded:: 0.18 + """ + data_filename = "linnerud_exercise.csv" + target_filename = "linnerud_physiological.csv" + + data_module_path = resources.files(DATA_MODULE) + # Read header and data + data_path = data_module_path / data_filename + with data_path.open("r", encoding="utf-8") as f: + header_exercise = f.readline().split() + f.seek(0) # reset file obj + data_exercise = np.loadtxt(f, skiprows=1) + + target_path = data_module_path / target_filename + with target_path.open("r", encoding="utf-8") as f: + header_physiological = f.readline().split() + f.seek(0) # reset file obj + data_physiological = np.loadtxt(f, skiprows=1) + + fdescr = load_descr("linnerud.rst") + + frame = None + if as_frame: + (frame, data_exercise, data_physiological) = _convert_data_dataframe( + "load_linnerud", + data_exercise, + data_physiological, + header_exercise, + header_physiological, + ) + if return_X_y: + return data_exercise, data_physiological + + return Bunch( + data=data_exercise, + feature_names=header_exercise, + target=data_physiological, + target_names=header_physiological, + frame=frame, + DESCR=fdescr, + data_filename=data_filename, + target_filename=target_filename, + data_module=DATA_MODULE, + ) + + +def load_sample_images(): + """Load sample images for image manipulation. + + Loads both, ``china`` and ``flower``. + + Read more in the :ref:`User Guide `. + + Returns + ------- + data : :class:`~sklearn.utils.Bunch` + Dictionary-like object, with the following attributes. + + images : list of ndarray of shape (427, 640, 3) + The two sample image. + filenames : list + The filenames for the images. + DESCR : str + The full description of the dataset. + + Examples + -------- + To load the data and visualize the images: + + >>> from sklearn.datasets import load_sample_images + >>> dataset = load_sample_images() #doctest: +SKIP + >>> len(dataset.images) #doctest: +SKIP + 2 + >>> first_img_data = dataset.images[0] #doctest: +SKIP + >>> first_img_data.shape #doctest: +SKIP + (427, 640, 3) + >>> first_img_data.dtype #doctest: +SKIP + dtype('uint8') + """ + try: + from PIL import Image + except ImportError: + raise ImportError( + "The Python Imaging Library (PIL) is required to load data " + "from jpeg files. Please refer to " + "https://pillow.readthedocs.io/en/stable/installation.html " + "for installing PIL." + ) + + descr = load_descr("README.txt", descr_module=IMAGES_MODULE) + + filenames, images = [], [] + + jpg_paths = sorted( + resource + for resource in resources.files(IMAGES_MODULE).iterdir() + if resource.is_file() and resource.match("*.jpg") + ) + + for path in jpg_paths: + filenames.append(str(path)) + with path.open("rb") as image_file: + pil_image = Image.open(image_file) + image = np.asarray(pil_image) + images.append(image) + + return Bunch(images=images, filenames=filenames, DESCR=descr) + + +@validate_params( + { + "image_name": [StrOptions({"china.jpg", "flower.jpg"})], + }, + prefer_skip_nested_validation=True, +) +def load_sample_image(image_name): + """Load the numpy array of a single sample image. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + image_name : {`china.jpg`, `flower.jpg`} + The name of the sample image loaded. + + Returns + ------- + img : 3D array + The image as a numpy array: height x width x color. + + Examples + -------- + + >>> from sklearn.datasets import load_sample_image + >>> china = load_sample_image('china.jpg') # doctest: +SKIP + >>> china.dtype # doctest: +SKIP + dtype('uint8') + >>> china.shape # doctest: +SKIP + (427, 640, 3) + >>> flower = load_sample_image('flower.jpg') # doctest: +SKIP + >>> flower.dtype # doctest: +SKIP + dtype('uint8') + >>> flower.shape # doctest: +SKIP + (427, 640, 3) + """ + images = load_sample_images() + index = None + for i, filename in enumerate(images.filenames): + if filename.endswith(image_name): + index = i + break + if index is None: + raise AttributeError("Cannot find sample image: %s" % image_name) + return images.images[index] + + +def _pkl_filepath(*args, **kwargs): + """Return filename for Python 3 pickles + + args[-1] is expected to be the ".pkl" filename. For compatibility with + older scikit-learn versions, a suffix is inserted before the extension. + + _pkl_filepath('/path/to/folder', 'filename.pkl') returns + '/path/to/folder/filename_py3.pkl' + + """ + py3_suffix = kwargs.get("py3_suffix", "_py3") + basename, ext = splitext(args[-1]) + basename += py3_suffix + new_args = args[:-1] + (basename + ext,) + return join(*new_args) + + +def _sha256(path): + """Calculate the sha256 hash of the file at path.""" + sha256hash = hashlib.sha256() + chunk_size = 8192 + with open(path, "rb") as f: + while True: + buffer = f.read(chunk_size) + if not buffer: + break + sha256hash.update(buffer) + return sha256hash.hexdigest() + + +def _fetch_remote(remote, dirname=None): + """Helper function to download a remote dataset into path + + Fetch a dataset pointed by remote's url, save into path using remote's + filename and ensure its integrity based on the SHA256 Checksum of the + downloaded file. + + Parameters + ---------- + remote : RemoteFileMetadata + Named tuple containing remote dataset meta information: url, filename + and checksum + + dirname : str + Directory to save the file to. + + Returns + ------- + file_path: str + Full path of the created file. + """ + + file_path = remote.filename if dirname is None else join(dirname, remote.filename) + urlretrieve(remote.url, file_path) + checksum = _sha256(file_path) + if remote.checksum != checksum: + raise OSError( + "{} has an SHA256 checksum ({}) " + "differing from expected ({}), " + "file may be corrupted.".format(file_path, checksum, remote.checksum) + ) + return file_path diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/_covtype.py b/venv/lib/python3.10/site-packages/sklearn/datasets/_covtype.py new file mode 100644 index 0000000000000000000000000000000000000000..4e1b1d7961f2e4b0352e8f2ca189e5d0eae08cd3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/datasets/_covtype.py @@ -0,0 +1,236 @@ +"""Forest covertype dataset. + +A classic dataset for classification benchmarks, featuring categorical and +real-valued features. + +The dataset page is available from UCI Machine Learning Repository + + https://archive.ics.uci.edu/ml/datasets/Covertype + +Courtesy of Jock A. Blackard and Colorado State University. +""" + +# Author: Lars Buitinck +# Peter Prettenhofer +# License: BSD 3 clause + +import logging +import os +from gzip import GzipFile +from os.path import exists, join +from tempfile import TemporaryDirectory + +import joblib +import numpy as np + +from ..utils import Bunch, check_random_state +from ..utils._param_validation import validate_params +from . import get_data_home +from ._base import ( + RemoteFileMetadata, + _convert_data_dataframe, + _fetch_remote, + _pkl_filepath, + load_descr, +) + +# The original data can be found in: +# https://archive.ics.uci.edu/ml/machine-learning-databases/covtype/covtype.data.gz +ARCHIVE = RemoteFileMetadata( + filename="covtype.data.gz", + url="https://ndownloader.figshare.com/files/5976039", + checksum="614360d0257557dd1792834a85a1cdebfadc3c4f30b011d56afee7ffb5b15771", +) + +logger = logging.getLogger(__name__) + +# Column names reference: +# https://archive.ics.uci.edu/ml/machine-learning-databases/covtype/covtype.info +FEATURE_NAMES = [ + "Elevation", + "Aspect", + "Slope", + "Horizontal_Distance_To_Hydrology", + "Vertical_Distance_To_Hydrology", + "Horizontal_Distance_To_Roadways", + "Hillshade_9am", + "Hillshade_Noon", + "Hillshade_3pm", + "Horizontal_Distance_To_Fire_Points", +] +FEATURE_NAMES += [f"Wilderness_Area_{i}" for i in range(4)] +FEATURE_NAMES += [f"Soil_Type_{i}" for i in range(40)] +TARGET_NAMES = ["Cover_Type"] + + +@validate_params( + { + "data_home": [str, os.PathLike, None], + "download_if_missing": ["boolean"], + "random_state": ["random_state"], + "shuffle": ["boolean"], + "return_X_y": ["boolean"], + "as_frame": ["boolean"], + }, + prefer_skip_nested_validation=True, +) +def fetch_covtype( + *, + data_home=None, + download_if_missing=True, + random_state=None, + shuffle=False, + return_X_y=False, + as_frame=False, +): + """Load the covertype dataset (classification). + + Download it if necessary. + + ================= ============ + Classes 7 + Samples total 581012 + Dimensionality 54 + Features int + ================= ============ + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + data_home : str or path-like, default=None + Specify another download and cache folder for the datasets. By default + all scikit-learn data is stored in '~/scikit_learn_data' subfolders. + + download_if_missing : bool, default=True + If False, raise an OSError if the data is not locally available + instead of trying to download the data from the source site. + + random_state : int, RandomState instance or None, default=None + Determines random number generation for dataset shuffling. Pass an int + for reproducible output across multiple function calls. + See :term:`Glossary `. + + shuffle : bool, default=False + Whether to shuffle dataset. + + return_X_y : bool, default=False + If True, returns ``(data.data, data.target)`` instead of a Bunch + object. + + .. versionadded:: 0.20 + + as_frame : bool, default=False + If True, the data is a pandas DataFrame including columns with + appropriate dtypes (numeric). The target is a pandas DataFrame or + Series depending on the number of target columns. If `return_X_y` is + True, then (`data`, `target`) will be pandas DataFrames or Series as + described below. + + .. versionadded:: 0.24 + + Returns + ------- + dataset : :class:`~sklearn.utils.Bunch` + Dictionary-like object, with the following attributes. + + data : ndarray of shape (581012, 54) + Each row corresponds to the 54 features in the dataset. + target : ndarray of shape (581012,) + Each value corresponds to one of + the 7 forest covertypes with values + ranging between 1 to 7. + frame : dataframe of shape (581012, 55) + Only present when `as_frame=True`. Contains `data` and `target`. + DESCR : str + Description of the forest covertype dataset. + feature_names : list + The names of the dataset columns. + target_names: list + The names of the target columns. + + (data, target) : tuple if ``return_X_y`` is True + A tuple of two ndarray. The first containing a 2D array of + shape (n_samples, n_features) with each row representing one + sample and each column representing the features. The second + ndarray of shape (n_samples,) containing the target samples. + + .. versionadded:: 0.20 + + Examples + -------- + >>> from sklearn.datasets import fetch_covtype + >>> cov_type = fetch_covtype() + >>> cov_type.data.shape + (581012, 54) + >>> cov_type.target.shape + (581012,) + >>> # Let's check the 4 first feature names + >>> cov_type.feature_names[:4] + ['Elevation', 'Aspect', 'Slope', 'Horizontal_Distance_To_Hydrology'] + """ + data_home = get_data_home(data_home=data_home) + covtype_dir = join(data_home, "covertype") + samples_path = _pkl_filepath(covtype_dir, "samples") + targets_path = _pkl_filepath(covtype_dir, "targets") + available = exists(samples_path) and exists(targets_path) + + if download_if_missing and not available: + os.makedirs(covtype_dir, exist_ok=True) + + # Creating temp_dir as a direct subdirectory of the target directory + # guarantees that both reside on the same filesystem, so that we can use + # os.rename to atomically move the data files to their target location. + with TemporaryDirectory(dir=covtype_dir) as temp_dir: + logger.info(f"Downloading {ARCHIVE.url}") + archive_path = _fetch_remote(ARCHIVE, dirname=temp_dir) + Xy = np.genfromtxt(GzipFile(filename=archive_path), delimiter=",") + + X = Xy[:, :-1] + y = Xy[:, -1].astype(np.int32, copy=False) + + samples_tmp_path = _pkl_filepath(temp_dir, "samples") + joblib.dump(X, samples_tmp_path, compress=9) + os.rename(samples_tmp_path, samples_path) + + targets_tmp_path = _pkl_filepath(temp_dir, "targets") + joblib.dump(y, targets_tmp_path, compress=9) + os.rename(targets_tmp_path, targets_path) + + elif not available and not download_if_missing: + raise OSError("Data not found and `download_if_missing` is False") + try: + X, y + except NameError: + X = joblib.load(samples_path) + y = joblib.load(targets_path) + + if shuffle: + ind = np.arange(X.shape[0]) + rng = check_random_state(random_state) + rng.shuffle(ind) + X = X[ind] + y = y[ind] + + fdescr = load_descr("covtype.rst") + + frame = None + if as_frame: + frame, X, y = _convert_data_dataframe( + caller_name="fetch_covtype", + data=X, + target=y, + feature_names=FEATURE_NAMES, + target_names=TARGET_NAMES, + ) + if return_X_y: + return X, y + + return Bunch( + data=X, + target=y, + frame=frame, + target_names=TARGET_NAMES, + feature_names=FEATURE_NAMES, + DESCR=fdescr, + ) diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/_olivetti_faces.py b/venv/lib/python3.10/site-packages/sklearn/datasets/_olivetti_faces.py new file mode 100644 index 0000000000000000000000000000000000000000..8e1b3c91e254b80bff7b52d7e671ac15ba079264 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/datasets/_olivetti_faces.py @@ -0,0 +1,156 @@ +"""Modified Olivetti faces dataset. + +The original database was available from (now defunct) + + https://www.cl.cam.ac.uk/research/dtg/attarchive/facedatabase.html + +The version retrieved here comes in MATLAB format from the personal +web page of Sam Roweis: + + https://cs.nyu.edu/~roweis/ +""" + +# Copyright (c) 2011 David Warde-Farley +# License: BSD 3 clause + +from os import PathLike, makedirs, remove +from os.path import exists + +import joblib +import numpy as np +from scipy.io import loadmat + +from ..utils import Bunch, check_random_state +from ..utils._param_validation import validate_params +from . import get_data_home +from ._base import RemoteFileMetadata, _fetch_remote, _pkl_filepath, load_descr + +# The original data can be found at: +# https://cs.nyu.edu/~roweis/data/olivettifaces.mat +FACES = RemoteFileMetadata( + filename="olivettifaces.mat", + url="https://ndownloader.figshare.com/files/5976027", + checksum="b612fb967f2dc77c9c62d3e1266e0c73d5fca46a4b8906c18e454d41af987794", +) + + +@validate_params( + { + "data_home": [str, PathLike, None], + "shuffle": ["boolean"], + "random_state": ["random_state"], + "download_if_missing": ["boolean"], + "return_X_y": ["boolean"], + }, + prefer_skip_nested_validation=True, +) +def fetch_olivetti_faces( + *, + data_home=None, + shuffle=False, + random_state=0, + download_if_missing=True, + return_X_y=False, +): + """Load the Olivetti faces data-set from AT&T (classification). + + Download it if necessary. + + ================= ===================== + Classes 40 + Samples total 400 + Dimensionality 4096 + Features real, between 0 and 1 + ================= ===================== + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + data_home : str or path-like, default=None + Specify another download and cache folder for the datasets. By default + all scikit-learn data is stored in '~/scikit_learn_data' subfolders. + + shuffle : bool, default=False + If True the order of the dataset is shuffled to avoid having + images of the same person grouped. + + random_state : int, RandomState instance or None, default=0 + Determines random number generation for dataset shuffling. Pass an int + for reproducible output across multiple function calls. + See :term:`Glossary `. + + download_if_missing : bool, default=True + If False, raise an OSError if the data is not locally available + instead of trying to download the data from the source site. + + return_X_y : bool, default=False + If True, returns `(data, target)` instead of a `Bunch` object. See + below for more information about the `data` and `target` object. + + .. versionadded:: 0.22 + + Returns + ------- + data : :class:`~sklearn.utils.Bunch` + Dictionary-like object, with the following attributes. + + data: ndarray, shape (400, 4096) + Each row corresponds to a ravelled + face image of original size 64 x 64 pixels. + images : ndarray, shape (400, 64, 64) + Each row is a face image + corresponding to one of the 40 subjects of the dataset. + target : ndarray, shape (400,) + Labels associated to each face image. + Those labels are ranging from 0-39 and correspond to the + Subject IDs. + DESCR : str + Description of the modified Olivetti Faces Dataset. + + (data, target) : tuple if `return_X_y=True` + Tuple with the `data` and `target` objects described above. + + .. versionadded:: 0.22 + """ + data_home = get_data_home(data_home=data_home) + if not exists(data_home): + makedirs(data_home) + filepath = _pkl_filepath(data_home, "olivetti.pkz") + if not exists(filepath): + if not download_if_missing: + raise OSError("Data not found and `download_if_missing` is False") + + print("downloading Olivetti faces from %s to %s" % (FACES.url, data_home)) + mat_path = _fetch_remote(FACES, dirname=data_home) + mfile = loadmat(file_name=mat_path) + # delete raw .mat data + remove(mat_path) + + faces = mfile["faces"].T.copy() + joblib.dump(faces, filepath, compress=6) + del mfile + else: + faces = joblib.load(filepath) + + # We want floating point data, but float32 is enough (there is only + # one byte of precision in the original uint8s anyway) + faces = np.float32(faces) + faces = faces - faces.min() + faces /= faces.max() + faces = faces.reshape((400, 64, 64)).transpose(0, 2, 1) + # 10 images per class, 400 images total, each class is contiguous. + target = np.array([i // 10 for i in range(400)]) + if shuffle: + random_state = check_random_state(random_state) + order = random_state.permutation(len(faces)) + faces = faces[order] + target = target[order] + faces_vectorized = faces.reshape(len(faces), -1) + + fdescr = load_descr("olivetti_faces.rst") + + if return_X_y: + return faces_vectorized, target + + return Bunch(data=faces_vectorized, images=faces, target=target, DESCR=fdescr) diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/_openml.py b/venv/lib/python3.10/site-packages/sklearn/datasets/_openml.py new file mode 100644 index 0000000000000000000000000000000000000000..d1745042bfcba7aef3696290973e07d71785d2fb --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/datasets/_openml.py @@ -0,0 +1,1158 @@ +import gzip +import hashlib +import json +import os +import shutil +import time +from contextlib import closing +from functools import wraps +from os.path import join +from tempfile import TemporaryDirectory +from typing import Any, Callable, Dict, List, Optional, Tuple, Union +from urllib.error import HTTPError, URLError +from urllib.request import Request, urlopen +from warnings import warn + +import numpy as np + +from ..utils import ( + Bunch, + check_pandas_support, # noqa # noqa +) +from ..utils._param_validation import ( + Integral, + Interval, + Real, + StrOptions, + validate_params, +) +from . import get_data_home +from ._arff_parser import load_arff_from_gzip_file + +__all__ = ["fetch_openml"] + +_OPENML_PREFIX = "https://api.openml.org/" +_SEARCH_NAME = "api/v1/json/data/list/data_name/{}/limit/2" +_DATA_INFO = "api/v1/json/data/{}" +_DATA_FEATURES = "api/v1/json/data/features/{}" +_DATA_QUALITIES = "api/v1/json/data/qualities/{}" +_DATA_FILE = "data/v1/download/{}" + +OpenmlQualitiesType = List[Dict[str, str]] +OpenmlFeaturesType = List[Dict[str, str]] + + +def _get_local_path(openml_path: str, data_home: str) -> str: + return os.path.join(data_home, "openml.org", openml_path + ".gz") + + +def _retry_with_clean_cache( + openml_path: str, + data_home: Optional[str], + no_retry_exception: Optional[Exception] = None, +) -> Callable: + """If the first call to the decorated function fails, the local cached + file is removed, and the function is called again. If ``data_home`` is + ``None``, then the function is called once. We can provide a specific + exception to not retry on using `no_retry_exception` parameter. + """ + + def decorator(f): + @wraps(f) + def wrapper(*args, **kw): + if data_home is None: + return f(*args, **kw) + try: + return f(*args, **kw) + except URLError: + raise + except Exception as exc: + if no_retry_exception is not None and isinstance( + exc, no_retry_exception + ): + raise + warn("Invalid cache, redownloading file", RuntimeWarning) + local_path = _get_local_path(openml_path, data_home) + if os.path.exists(local_path): + os.unlink(local_path) + return f(*args, **kw) + + return wrapper + + return decorator + + +def _retry_on_network_error( + n_retries: int = 3, delay: float = 1.0, url: str = "" +) -> Callable: + """If the function call results in a network error, call the function again + up to ``n_retries`` times with a ``delay`` between each call. If the error + has a 412 status code, don't call the function again as this is a specific + OpenML error. + The url parameter is used to give more information to the user about the + error. + """ + + def decorator(f): + @wraps(f) + def wrapper(*args, **kwargs): + retry_counter = n_retries + while True: + try: + return f(*args, **kwargs) + except (URLError, TimeoutError) as e: + # 412 is a specific OpenML error code. + if isinstance(e, HTTPError) and e.code == 412: + raise + if retry_counter == 0: + raise + warn( + f"A network error occurred while downloading {url}. Retrying..." + ) + retry_counter -= 1 + time.sleep(delay) + + return wrapper + + return decorator + + +def _open_openml_url( + openml_path: str, data_home: Optional[str], n_retries: int = 3, delay: float = 1.0 +): + """ + Returns a resource from OpenML.org. Caches it to data_home if required. + + Parameters + ---------- + openml_path : str + OpenML URL that will be accessed. This will be prefixes with + _OPENML_PREFIX. + + data_home : str + Directory to which the files will be cached. If None, no caching will + be applied. + + n_retries : int, default=3 + Number of retries when HTTP errors are encountered. Error with status + code 412 won't be retried as they represent OpenML generic errors. + + delay : float, default=1.0 + Number of seconds between retries. + + Returns + ------- + result : stream + A stream to the OpenML resource. + """ + + def is_gzip_encoded(_fsrc): + return _fsrc.info().get("Content-Encoding", "") == "gzip" + + req = Request(_OPENML_PREFIX + openml_path) + req.add_header("Accept-encoding", "gzip") + + if data_home is None: + fsrc = _retry_on_network_error(n_retries, delay, req.full_url)(urlopen)(req) + if is_gzip_encoded(fsrc): + return gzip.GzipFile(fileobj=fsrc, mode="rb") + return fsrc + + local_path = _get_local_path(openml_path, data_home) + dir_name, file_name = os.path.split(local_path) + if not os.path.exists(local_path): + os.makedirs(dir_name, exist_ok=True) + try: + # Create a tmpdir as a subfolder of dir_name where the final file will + # be moved to if the download is successful. This guarantees that the + # renaming operation to the final location is atomic to ensure the + # concurrence safety of the dataset caching mechanism. + with TemporaryDirectory(dir=dir_name) as tmpdir: + with closing( + _retry_on_network_error(n_retries, delay, req.full_url)(urlopen)( + req + ) + ) as fsrc: + opener: Callable + if is_gzip_encoded(fsrc): + opener = open + else: + opener = gzip.GzipFile + with opener(os.path.join(tmpdir, file_name), "wb") as fdst: + shutil.copyfileobj(fsrc, fdst) + shutil.move(fdst.name, local_path) + except Exception: + if os.path.exists(local_path): + os.unlink(local_path) + raise + + # XXX: First time, decompression will not be necessary (by using fsrc), but + # it will happen nonetheless + return gzip.GzipFile(local_path, "rb") + + +class OpenMLError(ValueError): + """HTTP 412 is a specific OpenML error code, indicating a generic error""" + + pass + + +def _get_json_content_from_openml_api( + url: str, + error_message: Optional[str], + data_home: Optional[str], + n_retries: int = 3, + delay: float = 1.0, +) -> Dict: + """ + Loads json data from the openml api. + + Parameters + ---------- + url : str + The URL to load from. Should be an official OpenML endpoint. + + error_message : str or None + The error message to raise if an acceptable OpenML error is thrown + (acceptable error is, e.g., data id not found. Other errors, like 404's + will throw the native error message). + + data_home : str or None + Location to cache the response. None if no cache is required. + + n_retries : int, default=3 + Number of retries when HTTP errors are encountered. Error with status + code 412 won't be retried as they represent OpenML generic errors. + + delay : float, default=1.0 + Number of seconds between retries. + + Returns + ------- + json_data : json + the json result from the OpenML server if the call was successful. + An exception otherwise. + """ + + @_retry_with_clean_cache(url, data_home=data_home) + def _load_json(): + with closing( + _open_openml_url(url, data_home, n_retries=n_retries, delay=delay) + ) as response: + return json.loads(response.read().decode("utf-8")) + + try: + return _load_json() + except HTTPError as error: + # 412 is an OpenML specific error code, indicating a generic error + # (e.g., data not found) + if error.code != 412: + raise error + + # 412 error, not in except for nicer traceback + raise OpenMLError(error_message) + + +def _get_data_info_by_name( + name: str, + version: Union[int, str], + data_home: Optional[str], + n_retries: int = 3, + delay: float = 1.0, +): + """ + Utilizes the openml dataset listing api to find a dataset by + name/version + OpenML api function: + https://www.openml.org/api_docs#!/data/get_data_list_data_name_data_name + + Parameters + ---------- + name : str + name of the dataset + + version : int or str + If version is an integer, the exact name/version will be obtained from + OpenML. If version is a string (value: "active") it will take the first + version from OpenML that is annotated as active. Any other string + values except "active" are treated as integer. + + data_home : str or None + Location to cache the response. None if no cache is required. + + n_retries : int, default=3 + Number of retries when HTTP errors are encountered. Error with status + code 412 won't be retried as they represent OpenML generic errors. + + delay : float, default=1.0 + Number of seconds between retries. + + Returns + ------- + first_dataset : json + json representation of the first dataset object that adhired to the + search criteria + + """ + if version == "active": + # situation in which we return the oldest active version + url = _SEARCH_NAME.format(name) + "/status/active/" + error_msg = "No active dataset {} found.".format(name) + json_data = _get_json_content_from_openml_api( + url, + error_msg, + data_home=data_home, + n_retries=n_retries, + delay=delay, + ) + res = json_data["data"]["dataset"] + if len(res) > 1: + first_version = version = res[0]["version"] + warning_msg = ( + "Multiple active versions of the dataset matching the name" + f" {name} exist. Versions may be fundamentally different, " + f"returning version {first_version}. " + "Available versions:\n" + ) + for r in res: + warning_msg += f"- version {r['version']}, status: {r['status']}\n" + warning_msg += ( + f" url: https://www.openml.org/search?type=data&id={r['did']}\n" + ) + warn(warning_msg) + return res[0] + + # an integer version has been provided + url = (_SEARCH_NAME + "/data_version/{}").format(name, version) + try: + json_data = _get_json_content_from_openml_api( + url, + error_message=None, + data_home=data_home, + n_retries=n_retries, + delay=delay, + ) + except OpenMLError: + # we can do this in 1 function call if OpenML does not require the + # specification of the dataset status (i.e., return datasets with a + # given name / version regardless of active, deactivated, etc. ) + # TODO: feature request OpenML. + url += "/status/deactivated" + error_msg = "Dataset {} with version {} not found.".format(name, version) + json_data = _get_json_content_from_openml_api( + url, + error_msg, + data_home=data_home, + n_retries=n_retries, + delay=delay, + ) + + return json_data["data"]["dataset"][0] + + +def _get_data_description_by_id( + data_id: int, + data_home: Optional[str], + n_retries: int = 3, + delay: float = 1.0, +) -> Dict[str, Any]: + # OpenML API function: https://www.openml.org/api_docs#!/data/get_data_id + url = _DATA_INFO.format(data_id) + error_message = "Dataset with data_id {} not found.".format(data_id) + json_data = _get_json_content_from_openml_api( + url, + error_message, + data_home=data_home, + n_retries=n_retries, + delay=delay, + ) + return json_data["data_set_description"] + + +def _get_data_features( + data_id: int, + data_home: Optional[str], + n_retries: int = 3, + delay: float = 1.0, +) -> OpenmlFeaturesType: + # OpenML function: + # https://www.openml.org/api_docs#!/data/get_data_features_id + url = _DATA_FEATURES.format(data_id) + error_message = "Dataset with data_id {} not found.".format(data_id) + json_data = _get_json_content_from_openml_api( + url, + error_message, + data_home=data_home, + n_retries=n_retries, + delay=delay, + ) + return json_data["data_features"]["feature"] + + +def _get_data_qualities( + data_id: int, + data_home: Optional[str], + n_retries: int = 3, + delay: float = 1.0, +) -> OpenmlQualitiesType: + # OpenML API function: + # https://www.openml.org/api_docs#!/data/get_data_qualities_id + url = _DATA_QUALITIES.format(data_id) + error_message = "Dataset with data_id {} not found.".format(data_id) + json_data = _get_json_content_from_openml_api( + url, + error_message, + data_home=data_home, + n_retries=n_retries, + delay=delay, + ) + # the qualities might not be available, but we still try to process + # the data + return json_data.get("data_qualities", {}).get("quality", []) + + +def _get_num_samples(data_qualities: OpenmlQualitiesType) -> int: + """Get the number of samples from data qualities. + + Parameters + ---------- + data_qualities : list of dict + Used to retrieve the number of instances (samples) in the dataset. + + Returns + ------- + n_samples : int + The number of samples in the dataset or -1 if data qualities are + unavailable. + """ + # If the data qualities are unavailable, we return -1 + default_n_samples = -1 + + qualities = {d["name"]: d["value"] for d in data_qualities} + return int(float(qualities.get("NumberOfInstances", default_n_samples))) + + +def _load_arff_response( + url: str, + data_home: Optional[str], + parser: str, + output_type: str, + openml_columns_info: dict, + feature_names_to_select: List[str], + target_names_to_select: List[str], + shape: Optional[Tuple[int, int]], + md5_checksum: str, + n_retries: int = 3, + delay: float = 1.0, + read_csv_kwargs: Optional[Dict] = None, +): + """Load the ARFF data associated with the OpenML URL. + + In addition of loading the data, this function will also check the + integrity of the downloaded file from OpenML using MD5 checksum. + + Parameters + ---------- + url : str + The URL of the ARFF file on OpenML. + + data_home : str + The location where to cache the data. + + parser : {"liac-arff", "pandas"} + The parser used to parse the ARFF file. + + output_type : {"numpy", "pandas", "sparse"} + The type of the arrays that will be returned. The possibilities are: + + - `"numpy"`: both `X` and `y` will be NumPy arrays; + - `"sparse"`: `X` will be sparse matrix and `y` will be a NumPy array; + - `"pandas"`: `X` will be a pandas DataFrame and `y` will be either a + pandas Series or DataFrame. + + openml_columns_info : dict + The information provided by OpenML regarding the columns of the ARFF + file. + + feature_names_to_select : list of str + The list of the features to be selected. + + target_names_to_select : list of str + The list of the target variables to be selected. + + shape : tuple or None + With `parser="liac-arff"`, when using a generator to load the data, + one needs to provide the shape of the data beforehand. + + md5_checksum : str + The MD5 checksum provided by OpenML to check the data integrity. + + n_retries : int, default=3 + The number of times to retry downloading the data if it fails. + + delay : float, default=1.0 + The delay between two consecutive downloads in seconds. + + read_csv_kwargs : dict, default=None + Keyword arguments to pass to `pandas.read_csv` when using the pandas parser. + It allows to overwrite the default options. + + .. versionadded:: 1.3 + + Returns + ------- + X : {ndarray, sparse matrix, dataframe} + The data matrix. + + y : {ndarray, dataframe, series} + The target. + + frame : dataframe or None + A dataframe containing both `X` and `y`. `None` if + `output_array_type != "pandas"`. + + categories : list of str or None + The names of the features that are categorical. `None` if + `output_array_type == "pandas"`. + """ + gzip_file = _open_openml_url(url, data_home, n_retries=n_retries, delay=delay) + with closing(gzip_file): + md5 = hashlib.md5() + for chunk in iter(lambda: gzip_file.read(4096), b""): + md5.update(chunk) + actual_md5_checksum = md5.hexdigest() + + if actual_md5_checksum != md5_checksum: + raise ValueError( + f"md5 checksum of local file for {url} does not match description: " + f"expected: {md5_checksum} but got {actual_md5_checksum}. " + "Downloaded file could have been modified / corrupted, clean cache " + "and retry..." + ) + + def _open_url_and_load_gzip_file(url, data_home, n_retries, delay, arff_params): + gzip_file = _open_openml_url(url, data_home, n_retries=n_retries, delay=delay) + with closing(gzip_file): + return load_arff_from_gzip_file(gzip_file, **arff_params) + + arff_params: Dict = dict( + parser=parser, + output_type=output_type, + openml_columns_info=openml_columns_info, + feature_names_to_select=feature_names_to_select, + target_names_to_select=target_names_to_select, + shape=shape, + read_csv_kwargs=read_csv_kwargs or {}, + ) + try: + X, y, frame, categories = _open_url_and_load_gzip_file( + url, data_home, n_retries, delay, arff_params + ) + except Exception as exc: + if parser != "pandas": + raise + + from pandas.errors import ParserError + + if not isinstance(exc, ParserError): + raise + + # A parsing error could come from providing the wrong quotechar + # to pandas. By default, we use a double quote. Thus, we retry + # with a single quote before to raise the error. + arff_params["read_csv_kwargs"].update(quotechar="'") + X, y, frame, categories = _open_url_and_load_gzip_file( + url, data_home, n_retries, delay, arff_params + ) + + return X, y, frame, categories + + +def _download_data_to_bunch( + url: str, + sparse: bool, + data_home: Optional[str], + *, + as_frame: bool, + openml_columns_info: List[dict], + data_columns: List[str], + target_columns: List[str], + shape: Optional[Tuple[int, int]], + md5_checksum: str, + n_retries: int = 3, + delay: float = 1.0, + parser: str, + read_csv_kwargs: Optional[Dict] = None, +): + """Download ARFF data, load it to a specific container and create to Bunch. + + This function has a mechanism to retry/cache/clean the data. + + Parameters + ---------- + url : str + The URL of the ARFF file on OpenML. + + sparse : bool + Whether the dataset is expected to use the sparse ARFF format. + + data_home : str + The location where to cache the data. + + as_frame : bool + Whether or not to return the data into a pandas DataFrame. + + openml_columns_info : list of dict + The information regarding the columns provided by OpenML for the + ARFF dataset. The information is stored as a list of dictionaries. + + data_columns : list of str + The list of the features to be selected. + + target_columns : list of str + The list of the target variables to be selected. + + shape : tuple or None + With `parser="liac-arff"`, when using a generator to load the data, + one needs to provide the shape of the data beforehand. + + md5_checksum : str + The MD5 checksum provided by OpenML to check the data integrity. + + n_retries : int, default=3 + Number of retries when HTTP errors are encountered. Error with status + code 412 won't be retried as they represent OpenML generic errors. + + delay : float, default=1.0 + Number of seconds between retries. + + parser : {"liac-arff", "pandas"} + The parser used to parse the ARFF file. + + read_csv_kwargs : dict, default=None + Keyword arguments to pass to `pandas.read_csv` when using the pandas parser. + It allows to overwrite the default options. + + .. versionadded:: 1.3 + + Returns + ------- + data : :class:`~sklearn.utils.Bunch` + Dictionary-like object, with the following attributes. + + X : {ndarray, sparse matrix, dataframe} + The data matrix. + y : {ndarray, dataframe, series} + The target. + frame : dataframe or None + A dataframe containing both `X` and `y`. `None` if + `output_array_type != "pandas"`. + categories : list of str or None + The names of the features that are categorical. `None` if + `output_array_type == "pandas"`. + """ + # Prepare which columns and data types should be returned for the X and y + features_dict = {feature["name"]: feature for feature in openml_columns_info} + + if sparse: + output_type = "sparse" + elif as_frame: + output_type = "pandas" + else: + output_type = "numpy" + + # XXX: target columns should all be categorical or all numeric + _verify_target_data_type(features_dict, target_columns) + for name in target_columns: + column_info = features_dict[name] + n_missing_values = int(column_info["number_of_missing_values"]) + if n_missing_values > 0: + raise ValueError( + f"Target column '{column_info['name']}' has {n_missing_values} missing " + "values. Missing values are not supported for target columns." + ) + + no_retry_exception = None + if parser == "pandas": + # If we get a ParserError with pandas, then we don't want to retry and we raise + # early. + from pandas.errors import ParserError + + no_retry_exception = ParserError + + X, y, frame, categories = _retry_with_clean_cache( + url, data_home, no_retry_exception + )(_load_arff_response)( + url, + data_home, + parser=parser, + output_type=output_type, + openml_columns_info=features_dict, + feature_names_to_select=data_columns, + target_names_to_select=target_columns, + shape=shape, + md5_checksum=md5_checksum, + n_retries=n_retries, + delay=delay, + read_csv_kwargs=read_csv_kwargs, + ) + + return Bunch( + data=X, + target=y, + frame=frame, + categories=categories, + feature_names=data_columns, + target_names=target_columns, + ) + + +def _verify_target_data_type(features_dict, target_columns): + # verifies the data type of the y array in case there are multiple targets + # (throws an error if these targets do not comply with sklearn support) + if not isinstance(target_columns, list): + raise ValueError("target_column should be list, got: %s" % type(target_columns)) + found_types = set() + for target_column in target_columns: + if target_column not in features_dict: + raise KeyError(f"Could not find target_column='{target_column}'") + if features_dict[target_column]["data_type"] == "numeric": + found_types.add(np.float64) + else: + found_types.add(object) + + # note: we compare to a string, not boolean + if features_dict[target_column]["is_ignore"] == "true": + warn(f"target_column='{target_column}' has flag is_ignore.") + if features_dict[target_column]["is_row_identifier"] == "true": + warn(f"target_column='{target_column}' has flag is_row_identifier.") + if len(found_types) > 1: + raise ValueError( + "Can only handle homogeneous multi-target datasets, " + "i.e., all targets are either numeric or " + "categorical." + ) + + +def _valid_data_column_names(features_list, target_columns): + # logic for determining on which columns can be learned. Note that from the + # OpenML guide follows that columns that have the `is_row_identifier` or + # `is_ignore` flag, these can not be learned on. Also target columns are + # excluded. + valid_data_column_names = [] + for feature in features_list: + if ( + feature["name"] not in target_columns + and feature["is_ignore"] != "true" + and feature["is_row_identifier"] != "true" + ): + valid_data_column_names.append(feature["name"]) + return valid_data_column_names + + +@validate_params( + { + "name": [str, None], + "version": [Interval(Integral, 1, None, closed="left"), StrOptions({"active"})], + "data_id": [Interval(Integral, 1, None, closed="left"), None], + "data_home": [str, os.PathLike, None], + "target_column": [str, list, None], + "cache": [bool], + "return_X_y": [bool], + "as_frame": [bool, StrOptions({"auto"})], + "n_retries": [Interval(Integral, 1, None, closed="left")], + "delay": [Interval(Real, 0, None, closed="right")], + "parser": [ + StrOptions({"auto", "pandas", "liac-arff"}), + ], + "read_csv_kwargs": [dict, None], + }, + prefer_skip_nested_validation=True, +) +def fetch_openml( + name: Optional[str] = None, + *, + version: Union[str, int] = "active", + data_id: Optional[int] = None, + data_home: Optional[Union[str, os.PathLike]] = None, + target_column: Optional[Union[str, List]] = "default-target", + cache: bool = True, + return_X_y: bool = False, + as_frame: Union[str, bool] = "auto", + n_retries: int = 3, + delay: float = 1.0, + parser: str = "auto", + read_csv_kwargs: Optional[Dict] = None, +): + """Fetch dataset from openml by name or dataset id. + + Datasets are uniquely identified by either an integer ID or by a + combination of name and version (i.e. there might be multiple + versions of the 'iris' dataset). Please give either name or data_id + (not both). In case a name is given, a version can also be + provided. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.20 + + .. note:: EXPERIMENTAL + + The API is experimental (particularly the return value structure), + and might have small backward-incompatible changes without notice + or warning in future releases. + + Parameters + ---------- + name : str, default=None + String identifier of the dataset. Note that OpenML can have multiple + datasets with the same name. + + version : int or 'active', default='active' + Version of the dataset. Can only be provided if also ``name`` is given. + If 'active' the oldest version that's still active is used. Since + there may be more than one active version of a dataset, and those + versions may fundamentally be different from one another, setting an + exact version is highly recommended. + + data_id : int, default=None + OpenML ID of the dataset. The most specific way of retrieving a + dataset. If data_id is not given, name (and potential version) are + used to obtain a dataset. + + data_home : str or path-like, default=None + Specify another download and cache folder for the data sets. By default + all scikit-learn data is stored in '~/scikit_learn_data' subfolders. + + target_column : str, list or None, default='default-target' + Specify the column name in the data to use as target. If + 'default-target', the standard target column a stored on the server + is used. If ``None``, all columns are returned as data and the + target is ``None``. If list (of strings), all columns with these names + are returned as multi-target (Note: not all scikit-learn classifiers + can handle all types of multi-output combinations). + + cache : bool, default=True + Whether to cache the downloaded datasets into `data_home`. + + return_X_y : bool, default=False + If True, returns ``(data, target)`` instead of a Bunch object. See + below for more information about the `data` and `target` objects. + + as_frame : bool or 'auto', default='auto' + If True, the data is a pandas DataFrame including columns with + appropriate dtypes (numeric, string or categorical). The target is + a pandas DataFrame or Series depending on the number of target_columns. + The Bunch will contain a ``frame`` attribute with the target and the + data. If ``return_X_y`` is True, then ``(data, target)`` will be pandas + DataFrames or Series as describe above. + + If `as_frame` is 'auto', the data and target will be converted to + DataFrame or Series as if `as_frame` is set to True, unless the dataset + is stored in sparse format. + + If `as_frame` is False, the data and target will be NumPy arrays and + the `data` will only contain numerical values when `parser="liac-arff"` + where the categories are provided in the attribute `categories` of the + `Bunch` instance. When `parser="pandas"`, no ordinal encoding is made. + + .. versionchanged:: 0.24 + The default value of `as_frame` changed from `False` to `'auto'` + in 0.24. + + n_retries : int, default=3 + Number of retries when HTTP errors or network timeouts are encountered. + Error with status code 412 won't be retried as they represent OpenML + generic errors. + + delay : float, default=1.0 + Number of seconds between retries. + + parser : {"auto", "pandas", "liac-arff"}, default="auto" + Parser used to load the ARFF file. Two parsers are implemented: + + - `"pandas"`: this is the most efficient parser. However, it requires + pandas to be installed and can only open dense datasets. + - `"liac-arff"`: this is a pure Python ARFF parser that is much less + memory- and CPU-efficient. It deals with sparse ARFF datasets. + + If `"auto"`, the parser is chosen automatically such that `"liac-arff"` + is selected for sparse ARFF datasets, otherwise `"pandas"` is selected. + + .. versionadded:: 1.2 + .. versionchanged:: 1.4 + The default value of `parser` changes from `"liac-arff"` to + `"auto"`. + + read_csv_kwargs : dict, default=None + Keyword arguments passed to :func:`pandas.read_csv` when loading the data + from a ARFF file and using the pandas parser. It can allow to + overwrite some default parameters. + + .. versionadded:: 1.3 + + Returns + ------- + data : :class:`~sklearn.utils.Bunch` + Dictionary-like object, with the following attributes. + + data : np.array, scipy.sparse.csr_matrix of floats, or pandas DataFrame + The feature matrix. Categorical features are encoded as ordinals. + target : np.array, pandas Series or DataFrame + The regression target or classification labels, if applicable. + Dtype is float if numeric, and object if categorical. If + ``as_frame`` is True, ``target`` is a pandas object. + DESCR : str + The full description of the dataset. + feature_names : list + The names of the dataset columns. + target_names: list + The names of the target columns. + + .. versionadded:: 0.22 + + categories : dict or None + Maps each categorical feature name to a list of values, such + that the value encoded as i is ith in the list. If ``as_frame`` + is True, this is None. + details : dict + More metadata from OpenML. + frame : pandas DataFrame + Only present when `as_frame=True`. DataFrame with ``data`` and + ``target``. + + (data, target) : tuple if ``return_X_y`` is True + + .. note:: EXPERIMENTAL + + This interface is **experimental** and subsequent releases may + change attributes without notice (although there should only be + minor changes to ``data`` and ``target``). + + Missing values in the 'data' are represented as NaN's. Missing values + in 'target' are represented as NaN's (numerical target) or None + (categorical target). + + Notes + ----- + The `"pandas"` and `"liac-arff"` parsers can lead to different data types + in the output. The notable differences are the following: + + - The `"liac-arff"` parser always encodes categorical features as `str` objects. + To the contrary, the `"pandas"` parser instead infers the type while + reading and numerical categories will be casted into integers whenever + possible. + - The `"liac-arff"` parser uses float64 to encode numerical features + tagged as 'REAL' and 'NUMERICAL' in the metadata. The `"pandas"` + parser instead infers if these numerical features corresponds + to integers and uses panda's Integer extension dtype. + - In particular, classification datasets with integer categories are + typically loaded as such `(0, 1, ...)` with the `"pandas"` parser while + `"liac-arff"` will force the use of string encoded class labels such as + `"0"`, `"1"` and so on. + - The `"pandas"` parser will not strip single quotes - i.e. `'` - from + string columns. For instance, a string `'my string'` will be kept as is + while the `"liac-arff"` parser will strip the single quotes. For + categorical columns, the single quotes are stripped from the values. + + In addition, when `as_frame=False` is used, the `"liac-arff"` parser + returns ordinally encoded data where the categories are provided in the + attribute `categories` of the `Bunch` instance. Instead, `"pandas"` returns + a NumPy array were the categories are not encoded. + + Examples + -------- + >>> from sklearn.datasets import fetch_openml + >>> adult = fetch_openml("adult", version=2) # doctest: +SKIP + >>> adult.frame.info() # doctest: +SKIP + + RangeIndex: 48842 entries, 0 to 48841 + Data columns (total 15 columns): + # Column Non-Null Count Dtype + --- ------ -------------- ----- + 0 age 48842 non-null int64 + 1 workclass 46043 non-null category + 2 fnlwgt 48842 non-null int64 + 3 education 48842 non-null category + 4 education-num 48842 non-null int64 + 5 marital-status 48842 non-null category + 6 occupation 46033 non-null category + 7 relationship 48842 non-null category + 8 race 48842 non-null category + 9 sex 48842 non-null category + 10 capital-gain 48842 non-null int64 + 11 capital-loss 48842 non-null int64 + 12 hours-per-week 48842 non-null int64 + 13 native-country 47985 non-null category + 14 class 48842 non-null category + dtypes: category(9), int64(6) + memory usage: 2.7 MB + """ + if cache is False: + # no caching will be applied + data_home = None + else: + data_home = get_data_home(data_home=data_home) + data_home = join(str(data_home), "openml") + + # check valid function arguments. data_id XOR (name, version) should be + # provided + if name is not None: + # OpenML is case-insensitive, but the caching mechanism is not + # convert all data names (str) to lower case + name = name.lower() + if data_id is not None: + raise ValueError( + "Dataset data_id={} and name={} passed, but you can only " + "specify a numeric data_id or a name, not " + "both.".format(data_id, name) + ) + data_info = _get_data_info_by_name( + name, version, data_home, n_retries=n_retries, delay=delay + ) + data_id = data_info["did"] + elif data_id is not None: + # from the previous if statement, it is given that name is None + if version != "active": + raise ValueError( + "Dataset data_id={} and version={} passed, but you can only " + "specify a numeric data_id or a version, not " + "both.".format(data_id, version) + ) + else: + raise ValueError( + "Neither name nor data_id are provided. Please provide name or data_id." + ) + + data_description = _get_data_description_by_id(data_id, data_home) + if data_description["status"] != "active": + warn( + "Version {} of dataset {} is inactive, meaning that issues have " + "been found in the dataset. Try using a newer version from " + "this URL: {}".format( + data_description["version"], + data_description["name"], + data_description["url"], + ) + ) + if "error" in data_description: + warn( + "OpenML registered a problem with the dataset. It might be " + "unusable. Error: {}".format(data_description["error"]) + ) + if "warning" in data_description: + warn( + "OpenML raised a warning on the dataset. It might be " + "unusable. Warning: {}".format(data_description["warning"]) + ) + + return_sparse = data_description["format"].lower() == "sparse_arff" + as_frame = not return_sparse if as_frame == "auto" else as_frame + if parser == "auto": + parser_ = "liac-arff" if return_sparse else "pandas" + else: + parser_ = parser + + if parser_ == "pandas": + try: + check_pandas_support("`fetch_openml`") + except ImportError as exc: + if as_frame: + err_msg = ( + "Returning pandas objects requires pandas to be installed. " + "Alternatively, explicitly set `as_frame=False` and " + "`parser='liac-arff'`." + ) + else: + err_msg = ( + f"Using `parser={parser!r}` wit dense data requires pandas to be " + "installed. Alternatively, explicitly set `parser='liac-arff'`." + ) + raise ImportError(err_msg) from exc + + if return_sparse: + if as_frame: + raise ValueError( + "Sparse ARFF datasets cannot be loaded with as_frame=True. " + "Use as_frame=False or as_frame='auto' instead." + ) + if parser_ == "pandas": + raise ValueError( + f"Sparse ARFF datasets cannot be loaded with parser={parser!r}. " + "Use parser='liac-arff' or parser='auto' instead." + ) + + # download data features, meta-info about column types + features_list = _get_data_features(data_id, data_home) + + if not as_frame: + for feature in features_list: + if "true" in (feature["is_ignore"], feature["is_row_identifier"]): + continue + if feature["data_type"] == "string": + raise ValueError( + "STRING attributes are not supported for " + "array representation. Try as_frame=True" + ) + + if target_column == "default-target": + # determines the default target based on the data feature results + # (which is currently more reliable than the data description; + # see issue: https://github.com/openml/OpenML/issues/768) + target_columns = [ + feature["name"] + for feature in features_list + if feature["is_target"] == "true" + ] + elif isinstance(target_column, str): + # for code-simplicity, make target_column by default a list + target_columns = [target_column] + elif target_column is None: + target_columns = [] + else: + # target_column already is of type list + target_columns = target_column + data_columns = _valid_data_column_names(features_list, target_columns) + + shape: Optional[Tuple[int, int]] + # determine arff encoding to return + if not return_sparse: + # The shape must include the ignored features to keep the right indexes + # during the arff data conversion. + data_qualities = _get_data_qualities(data_id, data_home) + shape = _get_num_samples(data_qualities), len(features_list) + else: + shape = None + + # obtain the data + url = _DATA_FILE.format(data_description["file_id"]) + bunch = _download_data_to_bunch( + url, + return_sparse, + data_home, + as_frame=bool(as_frame), + openml_columns_info=features_list, + shape=shape, + target_columns=target_columns, + data_columns=data_columns, + md5_checksum=data_description["md5_checksum"], + n_retries=n_retries, + delay=delay, + parser=parser_, + read_csv_kwargs=read_csv_kwargs, + ) + + if return_X_y: + return bunch.data, bunch.target + + description = "{}\n\nDownloaded from openml.org.".format( + data_description.pop("description") + ) + + bunch.update( + DESCR=description, + details=data_description, + url="https://www.openml.org/d/{}".format(data_id), + ) + + return bunch diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/_twenty_newsgroups.py b/venv/lib/python3.10/site-packages/sklearn/datasets/_twenty_newsgroups.py new file mode 100644 index 0000000000000000000000000000000000000000..22ac716871cc284adc3616a8b25e484ab03f0d7a --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/datasets/_twenty_newsgroups.py @@ -0,0 +1,561 @@ +"""Caching loader for the 20 newsgroups text classification dataset. + + +The description of the dataset is available on the official website at: + + http://people.csail.mit.edu/jrennie/20Newsgroups/ + +Quoting the introduction: + + The 20 Newsgroups data set is a collection of approximately 20,000 + newsgroup documents, partitioned (nearly) evenly across 20 different + newsgroups. To the best of my knowledge, it was originally collected + by Ken Lang, probably for his Newsweeder: Learning to filter netnews + paper, though he does not explicitly mention this collection. The 20 + newsgroups collection has become a popular data set for experiments + in text applications of machine learning techniques, such as text + classification and text clustering. + +This dataset loader will download the recommended "by date" variant of the +dataset and which features a point in time split between the train and +test sets. The compressed dataset size is around 14 Mb compressed. Once +uncompressed the train set is 52 MB and the test set is 34 MB. +""" +# Copyright (c) 2011 Olivier Grisel +# License: BSD 3 clause + +import codecs +import logging +import os +import pickle +import re +import shutil +import tarfile +from contextlib import suppress + +import joblib +import numpy as np +import scipy.sparse as sp + +from .. import preprocessing +from ..feature_extraction.text import CountVectorizer +from ..utils import Bunch, check_random_state +from ..utils._param_validation import StrOptions, validate_params +from . import get_data_home, load_files +from ._base import ( + RemoteFileMetadata, + _convert_data_dataframe, + _fetch_remote, + _pkl_filepath, + load_descr, +) + +logger = logging.getLogger(__name__) + +# The original data can be found at: +# https://people.csail.mit.edu/jrennie/20Newsgroups/20news-bydate.tar.gz +ARCHIVE = RemoteFileMetadata( + filename="20news-bydate.tar.gz", + url="https://ndownloader.figshare.com/files/5975967", + checksum="8f1b2514ca22a5ade8fbb9cfa5727df95fa587f4c87b786e15c759fa66d95610", +) + +CACHE_NAME = "20news-bydate.pkz" +TRAIN_FOLDER = "20news-bydate-train" +TEST_FOLDER = "20news-bydate-test" + + +def _download_20newsgroups(target_dir, cache_path): + """Download the 20 newsgroups data and stored it as a zipped pickle.""" + train_path = os.path.join(target_dir, TRAIN_FOLDER) + test_path = os.path.join(target_dir, TEST_FOLDER) + + os.makedirs(target_dir, exist_ok=True) + + logger.info("Downloading dataset from %s (14 MB)", ARCHIVE.url) + archive_path = _fetch_remote(ARCHIVE, dirname=target_dir) + + logger.debug("Decompressing %s", archive_path) + tarfile.open(archive_path, "r:gz").extractall(path=target_dir) + + with suppress(FileNotFoundError): + os.remove(archive_path) + + # Store a zipped pickle + cache = dict( + train=load_files(train_path, encoding="latin1"), + test=load_files(test_path, encoding="latin1"), + ) + compressed_content = codecs.encode(pickle.dumps(cache), "zlib_codec") + with open(cache_path, "wb") as f: + f.write(compressed_content) + + shutil.rmtree(target_dir) + return cache + + +def strip_newsgroup_header(text): + """ + Given text in "news" format, strip the headers, by removing everything + before the first blank line. + + Parameters + ---------- + text : str + The text from which to remove the signature block. + """ + _before, _blankline, after = text.partition("\n\n") + return after + + +_QUOTE_RE = re.compile( + r"(writes in|writes:|wrote:|says:|said:" r"|^In article|^Quoted from|^\||^>)" +) + + +def strip_newsgroup_quoting(text): + """ + Given text in "news" format, strip lines beginning with the quote + characters > or |, plus lines that often introduce a quoted section + (for example, because they contain the string 'writes:'.) + + Parameters + ---------- + text : str + The text from which to remove the signature block. + """ + good_lines = [line for line in text.split("\n") if not _QUOTE_RE.search(line)] + return "\n".join(good_lines) + + +def strip_newsgroup_footer(text): + """ + Given text in "news" format, attempt to remove a signature block. + + As a rough heuristic, we assume that signatures are set apart by either + a blank line or a line made of hyphens, and that it is the last such line + in the file (disregarding blank lines at the end). + + Parameters + ---------- + text : str + The text from which to remove the signature block. + """ + lines = text.strip().split("\n") + for line_num in range(len(lines) - 1, -1, -1): + line = lines[line_num] + if line.strip().strip("-") == "": + break + + if line_num > 0: + return "\n".join(lines[:line_num]) + else: + return text + + +@validate_params( + { + "data_home": [str, os.PathLike, None], + "subset": [StrOptions({"train", "test", "all"})], + "categories": ["array-like", None], + "shuffle": ["boolean"], + "random_state": ["random_state"], + "remove": [tuple], + "download_if_missing": ["boolean"], + "return_X_y": ["boolean"], + }, + prefer_skip_nested_validation=True, +) +def fetch_20newsgroups( + *, + data_home=None, + subset="train", + categories=None, + shuffle=True, + random_state=42, + remove=(), + download_if_missing=True, + return_X_y=False, +): + """Load the filenames and data from the 20 newsgroups dataset \ +(classification). + + Download it if necessary. + + ================= ========== + Classes 20 + Samples total 18846 + Dimensionality 1 + Features text + ================= ========== + + Read more in the :ref:`User Guide <20newsgroups_dataset>`. + + Parameters + ---------- + data_home : str or path-like, default=None + Specify a download and cache folder for the datasets. If None, + all scikit-learn data is stored in '~/scikit_learn_data' subfolders. + + subset : {'train', 'test', 'all'}, default='train' + Select the dataset to load: 'train' for the training set, 'test' + for the test set, 'all' for both, with shuffled ordering. + + categories : array-like, dtype=str, default=None + If None (default), load all the categories. + If not None, list of category names to load (other categories + ignored). + + shuffle : bool, default=True + Whether or not to shuffle the data: might be important for models that + make the assumption that the samples are independent and identically + distributed (i.i.d.), such as stochastic gradient descent. + + random_state : int, RandomState instance or None, default=42 + Determines random number generation for dataset shuffling. Pass an int + for reproducible output across multiple function calls. + See :term:`Glossary `. + + remove : tuple, default=() + May contain any subset of ('headers', 'footers', 'quotes'). Each of + these are kinds of text that will be detected and removed from the + newsgroup posts, preventing classifiers from overfitting on + metadata. + + 'headers' removes newsgroup headers, 'footers' removes blocks at the + ends of posts that look like signatures, and 'quotes' removes lines + that appear to be quoting another post. + + 'headers' follows an exact standard; the other filters are not always + correct. + + download_if_missing : bool, default=True + If False, raise an OSError if the data is not locally available + instead of trying to download the data from the source site. + + return_X_y : bool, default=False + If True, returns `(data.data, data.target)` instead of a Bunch + object. + + .. versionadded:: 0.22 + + Returns + ------- + bunch : :class:`~sklearn.utils.Bunch` + Dictionary-like object, with the following attributes. + + data : list of shape (n_samples,) + The data list to learn. + target: ndarray of shape (n_samples,) + The target labels. + filenames: list of shape (n_samples,) + The path to the location of the data. + DESCR: str + The full description of the dataset. + target_names: list of shape (n_classes,) + The names of target classes. + + (data, target) : tuple if `return_X_y=True` + A tuple of two ndarrays. The first contains a 2D array of shape + (n_samples, n_classes) with each row representing one sample and each + column representing the features. The second array of shape + (n_samples,) contains the target samples. + + .. versionadded:: 0.22 + """ + + data_home = get_data_home(data_home=data_home) + cache_path = _pkl_filepath(data_home, CACHE_NAME) + twenty_home = os.path.join(data_home, "20news_home") + cache = None + if os.path.exists(cache_path): + try: + with open(cache_path, "rb") as f: + compressed_content = f.read() + uncompressed_content = codecs.decode(compressed_content, "zlib_codec") + cache = pickle.loads(uncompressed_content) + except Exception as e: + print(80 * "_") + print("Cache loading failed") + print(80 * "_") + print(e) + + if cache is None: + if download_if_missing: + logger.info("Downloading 20news dataset. This may take a few minutes.") + cache = _download_20newsgroups( + target_dir=twenty_home, cache_path=cache_path + ) + else: + raise OSError("20Newsgroups dataset not found") + + if subset in ("train", "test"): + data = cache[subset] + elif subset == "all": + data_lst = list() + target = list() + filenames = list() + for subset in ("train", "test"): + data = cache[subset] + data_lst.extend(data.data) + target.extend(data.target) + filenames.extend(data.filenames) + + data.data = data_lst + data.target = np.array(target) + data.filenames = np.array(filenames) + + fdescr = load_descr("twenty_newsgroups.rst") + + data.DESCR = fdescr + + if "headers" in remove: + data.data = [strip_newsgroup_header(text) for text in data.data] + if "footers" in remove: + data.data = [strip_newsgroup_footer(text) for text in data.data] + if "quotes" in remove: + data.data = [strip_newsgroup_quoting(text) for text in data.data] + + if categories is not None: + labels = [(data.target_names.index(cat), cat) for cat in categories] + # Sort the categories to have the ordering of the labels + labels.sort() + labels, categories = zip(*labels) + mask = np.isin(data.target, labels) + data.filenames = data.filenames[mask] + data.target = data.target[mask] + # searchsorted to have continuous labels + data.target = np.searchsorted(labels, data.target) + data.target_names = list(categories) + # Use an object array to shuffle: avoids memory copy + data_lst = np.array(data.data, dtype=object) + data_lst = data_lst[mask] + data.data = data_lst.tolist() + + if shuffle: + random_state = check_random_state(random_state) + indices = np.arange(data.target.shape[0]) + random_state.shuffle(indices) + data.filenames = data.filenames[indices] + data.target = data.target[indices] + # Use an object array to shuffle: avoids memory copy + data_lst = np.array(data.data, dtype=object) + data_lst = data_lst[indices] + data.data = data_lst.tolist() + + if return_X_y: + return data.data, data.target + + return data + + +@validate_params( + { + "subset": [StrOptions({"train", "test", "all"})], + "remove": [tuple], + "data_home": [str, os.PathLike, None], + "download_if_missing": ["boolean"], + "return_X_y": ["boolean"], + "normalize": ["boolean"], + "as_frame": ["boolean"], + }, + prefer_skip_nested_validation=True, +) +def fetch_20newsgroups_vectorized( + *, + subset="train", + remove=(), + data_home=None, + download_if_missing=True, + return_X_y=False, + normalize=True, + as_frame=False, +): + """Load and vectorize the 20 newsgroups dataset (classification). + + Download it if necessary. + + This is a convenience function; the transformation is done using the + default settings for + :class:`~sklearn.feature_extraction.text.CountVectorizer`. For more + advanced usage (stopword filtering, n-gram extraction, etc.), combine + fetch_20newsgroups with a custom + :class:`~sklearn.feature_extraction.text.CountVectorizer`, + :class:`~sklearn.feature_extraction.text.HashingVectorizer`, + :class:`~sklearn.feature_extraction.text.TfidfTransformer` or + :class:`~sklearn.feature_extraction.text.TfidfVectorizer`. + + The resulting counts are normalized using + :func:`sklearn.preprocessing.normalize` unless normalize is set to False. + + ================= ========== + Classes 20 + Samples total 18846 + Dimensionality 130107 + Features real + ================= ========== + + Read more in the :ref:`User Guide <20newsgroups_dataset>`. + + Parameters + ---------- + subset : {'train', 'test', 'all'}, default='train' + Select the dataset to load: 'train' for the training set, 'test' + for the test set, 'all' for both, with shuffled ordering. + + remove : tuple, default=() + May contain any subset of ('headers', 'footers', 'quotes'). Each of + these are kinds of text that will be detected and removed from the + newsgroup posts, preventing classifiers from overfitting on + metadata. + + 'headers' removes newsgroup headers, 'footers' removes blocks at the + ends of posts that look like signatures, and 'quotes' removes lines + that appear to be quoting another post. + + data_home : str or path-like, default=None + Specify an download and cache folder for the datasets. If None, + all scikit-learn data is stored in '~/scikit_learn_data' subfolders. + + download_if_missing : bool, default=True + If False, raise an OSError if the data is not locally available + instead of trying to download the data from the source site. + + return_X_y : bool, default=False + If True, returns ``(data.data, data.target)`` instead of a Bunch + object. + + .. versionadded:: 0.20 + + normalize : bool, default=True + If True, normalizes each document's feature vector to unit norm using + :func:`sklearn.preprocessing.normalize`. + + .. versionadded:: 0.22 + + as_frame : bool, default=False + If True, the data is a pandas DataFrame including columns with + appropriate dtypes (numeric, string, or categorical). The target is + a pandas DataFrame or Series depending on the number of + `target_columns`. + + .. versionadded:: 0.24 + + Returns + ------- + bunch : :class:`~sklearn.utils.Bunch` + Dictionary-like object, with the following attributes. + + data: {sparse matrix, dataframe} of shape (n_samples, n_features) + The input data matrix. If ``as_frame`` is `True`, ``data`` is + a pandas DataFrame with sparse columns. + target: {ndarray, series} of shape (n_samples,) + The target labels. If ``as_frame`` is `True`, ``target`` is a + pandas Series. + target_names: list of shape (n_classes,) + The names of target classes. + DESCR: str + The full description of the dataset. + frame: dataframe of shape (n_samples, n_features + 1) + Only present when `as_frame=True`. Pandas DataFrame with ``data`` + and ``target``. + + .. versionadded:: 0.24 + + (data, target) : tuple if ``return_X_y`` is True + `data` and `target` would be of the format defined in the `Bunch` + description above. + + .. versionadded:: 0.20 + """ + data_home = get_data_home(data_home=data_home) + filebase = "20newsgroup_vectorized" + if remove: + filebase += "remove-" + "-".join(remove) + target_file = _pkl_filepath(data_home, filebase + ".pkl") + + # we shuffle but use a fixed seed for the memoization + data_train = fetch_20newsgroups( + data_home=data_home, + subset="train", + categories=None, + shuffle=True, + random_state=12, + remove=remove, + download_if_missing=download_if_missing, + ) + + data_test = fetch_20newsgroups( + data_home=data_home, + subset="test", + categories=None, + shuffle=True, + random_state=12, + remove=remove, + download_if_missing=download_if_missing, + ) + + if os.path.exists(target_file): + try: + X_train, X_test, feature_names = joblib.load(target_file) + except ValueError as e: + raise ValueError( + f"The cached dataset located in {target_file} was fetched " + "with an older scikit-learn version and it is not compatible " + "with the scikit-learn version imported. You need to " + f"manually delete the file: {target_file}." + ) from e + else: + vectorizer = CountVectorizer(dtype=np.int16) + X_train = vectorizer.fit_transform(data_train.data).tocsr() + X_test = vectorizer.transform(data_test.data).tocsr() + feature_names = vectorizer.get_feature_names_out() + + joblib.dump((X_train, X_test, feature_names), target_file, compress=9) + + # the data is stored as int16 for compactness + # but normalize needs floats + if normalize: + X_train = X_train.astype(np.float64) + X_test = X_test.astype(np.float64) + preprocessing.normalize(X_train, copy=False) + preprocessing.normalize(X_test, copy=False) + + target_names = data_train.target_names + + if subset == "train": + data = X_train + target = data_train.target + elif subset == "test": + data = X_test + target = data_test.target + elif subset == "all": + data = sp.vstack((X_train, X_test)).tocsr() + target = np.concatenate((data_train.target, data_test.target)) + + fdescr = load_descr("twenty_newsgroups.rst") + + frame = None + target_name = ["category_class"] + + if as_frame: + frame, data, target = _convert_data_dataframe( + "fetch_20newsgroups_vectorized", + data, + target, + feature_names, + target_names=target_name, + sparse_data=True, + ) + + if return_X_y: + return data, target + + return Bunch( + data=data, + target=target, + frame=frame, + target_names=target_names, + feature_names=feature_names, + DESCR=fdescr, + ) diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/images/README.txt b/venv/lib/python3.10/site-packages/sklearn/datasets/images/README.txt new file mode 100644 index 0000000000000000000000000000000000000000..a95a5d42500d45079dedc65c12fd9aff32337ec4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/datasets/images/README.txt @@ -0,0 +1,21 @@ +Image: china.jpg +Released under a creative commons license. [1] +Attribution: Some rights reserved by danielbuechele [2] +Retrieved 21st August, 2011 from [3] by Robert Layton + +[1] https://creativecommons.org/licenses/by/2.0/ +[2] https://www.flickr.com/photos/danielbuechele/ +[3] https://www.flickr.com/photos/danielbuechele/6061409035/sizes/z/in/photostream/ + + +Image: flower.jpg +Released under a creative commons license. [1] +Attribution: Some rights reserved by danielbuechele [2] +Retrieved 21st August, 2011 from [3] by Robert Layton + +[1] https://creativecommons.org/licenses/by/2.0/ +[2] https://www.flickr.com/photos/vultilion/ +[3] https://www.flickr.com/photos/vultilion/6056698931/sizes/z/in/photostream/ + + + diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/images/__init__.py b/venv/lib/python3.10/site-packages/sklearn/datasets/images/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/images/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/datasets/images/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..96a67c4768798399d682864c13338b75cc40f0db Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/datasets/images/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/__init__.py b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..68b6e3246460217f130f8c6a437005926560d9ac Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/__pycache__/test_20news.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/__pycache__/test_20news.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..565016f1776a75fd3a7aecc48fbe07db0136a756 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/__pycache__/test_20news.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/__pycache__/test_arff_parser.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/__pycache__/test_arff_parser.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4f8fd0270039b5a8e60ff352f1bdd16f27e221ff Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/__pycache__/test_arff_parser.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/__pycache__/test_base.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/__pycache__/test_base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b77dee9116e81e37f87127d693c4b1f683d96caf Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/__pycache__/test_base.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/__pycache__/test_california_housing.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/__pycache__/test_california_housing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..802286655da5300ba966b7681ff0e8b68e1b9a2e Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/__pycache__/test_california_housing.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/__pycache__/test_common.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/__pycache__/test_common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4cc1907bc7cbcd6f8d1d7aff36f12c6359b3e31a Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/__pycache__/test_common.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/__pycache__/test_covtype.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/__pycache__/test_covtype.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2813f0070f0d1323cde8941a20b5bcf1b3295571 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/__pycache__/test_covtype.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/__pycache__/test_kddcup99.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/__pycache__/test_kddcup99.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2542e57d49d516ea7a32cf67852fec8d7665da8d Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/__pycache__/test_kddcup99.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/__pycache__/test_lfw.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/__pycache__/test_lfw.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..41f0d50666494d9cb3a6481ec88da69dd626339b Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/__pycache__/test_lfw.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/__pycache__/test_olivetti_faces.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/__pycache__/test_olivetti_faces.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9ccda6cee9231a4722fc8d332d4138c4267887c7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/__pycache__/test_olivetti_faces.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/__pycache__/test_openml.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/__pycache__/test_openml.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f28e46c912e34d891ac90c04d4a28f418096709e Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/__pycache__/test_openml.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/__pycache__/test_rcv1.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/__pycache__/test_rcv1.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f6f44fbb352e527f9319ea365c90d4b845fae3e4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/__pycache__/test_rcv1.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/__pycache__/test_samples_generator.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/__pycache__/test_samples_generator.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c3cc55387ed7ecefb3bec12438a479784fe25b8b Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/__pycache__/test_samples_generator.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/__pycache__/test_svmlight_format.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/__pycache__/test_svmlight_format.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fee7206cdf7ebbf6f3cf6b483b07f97cbb2ad019 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/__pycache__/test_svmlight_format.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/__init__.py b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..71de5def26124afeec7877eb778918c23cad793b Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/__init__.py b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3223f3588a063e350f1334c147c79299f834b8c2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1/__init__.py b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2800da950c16223d527ca36a2347e1b48ae19264 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1119/__init__.py b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1119/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1119/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1119/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8c1a2af87b6d91aab9bc735af5e795df764786c4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1119/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1590/__init__.py b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1590/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1590/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1590/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..33531c7072f6006bf0335bc46d96b8e336de0eb3 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1590/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_2/__init__.py b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_2/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_2/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_2/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1feba2472bf5a47e2474fdbda094b385fd2b1dac Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_2/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_292/__init__.py b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_292/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_292/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_292/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e63471fad32661fcaab8e6d77ed057857d91f551 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_292/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_3/__init__.py b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_3/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_3/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_3/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3caffecb587e3c717e23043b69be1e299a4e61df Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_3/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40589/__init__.py b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40589/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40589/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40589/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4656519fcda9da3b19f658c9d7623586a56ba0b7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40589/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40675/__init__.py b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40675/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40675/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40675/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..905cc537054f47c85ade13ebbbd448cc20f1bd3d Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40675/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40945/__init__.py b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40945/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40945/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40945/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2f950f6880ca22b6a3bc4997ef3beed6ab21600f Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40945/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40966/__init__.py b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40966/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40966/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40966/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ac616d65d76aa32d3a686f9a634c5ca02860fa6e Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40966/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_42074/__init__.py b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_42074/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_42074/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_42074/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..90a471e89e99de07298e7ceb281795c0a2aa96a2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_42074/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_42585/__init__.py b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_42585/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_42585/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_42585/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..45978fffa22352ccddbf6de2487304ae2476c610 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_42585/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_561/__init__.py b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_561/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_561/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_561/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c4cb511afb87151803826ac332c81df9d422f0fe Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_561/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_61/__init__.py b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_61/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_61/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_61/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9bea21364296bc359a29f422b6713f32633a8f9e Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_61/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_62/__init__.py b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_62/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_62/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_62/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..366d5a8c6eeb48642144dfcbfdd90dbc2b5d5a20 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_62/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/svmlight_classification.txt b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/svmlight_classification.txt new file mode 100644 index 0000000000000000000000000000000000000000..a3c4a3364cac126a91738c780ff668156f151611 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/svmlight_classification.txt @@ -0,0 +1,9 @@ +# comment +# note: the next line contains a tab +1.0 3:2.5 11:-5.2 16:1.5 # and an inline comment +2.0 6:1.0 13:-3 +# another comment +3.0 21:27 +4.0 2:1.234567890123456e10 # double precision value +1.0 # empty line, all zeros +2.0 3:0 # explicit zeros diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/svmlight_invalid.txt b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/svmlight_invalid.txt new file mode 100644 index 0000000000000000000000000000000000000000..05601f6ca6eef3276c6c16c0983262836023eb78 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/svmlight_invalid.txt @@ -0,0 +1,3 @@ +python 2:2.5 10:-5.2 15:1.5 +2.0 5:1.0 12:-3 +3.0 20:27 diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/svmlight_invalid_order.txt b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/svmlight_invalid_order.txt new file mode 100644 index 0000000000000000000000000000000000000000..2160abf15ea4298d728e4fb2bed37655c8bbb7ed --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/svmlight_invalid_order.txt @@ -0,0 +1 @@ +-1 5:2.5 2:-5.2 15:1.5 diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/svmlight_multilabel.txt b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/svmlight_multilabel.txt new file mode 100644 index 0000000000000000000000000000000000000000..a8194e5fef163ba9fa255e8f5c3ed9e593793769 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/svmlight_multilabel.txt @@ -0,0 +1,5 @@ +# multilabel dataset in SVMlight format +1,0 2:2.5 10:-5.2 15:1.5 +2 5:1.0 12:-3 + 2:3.5 11:26 +1,2 20:27 diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/test_20news.py b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/test_20news.py new file mode 100644 index 0000000000000000000000000000000000000000..4072d9c8ec67f2ba147e56bafc9e91c2d3485639 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/test_20news.py @@ -0,0 +1,142 @@ +"""Test the 20news downloader, if the data is available, +or if specifically requested via environment variable +(e.g. for CI jobs).""" +from functools import partial +from unittest.mock import patch + +import numpy as np +import pytest +import scipy.sparse as sp + +from sklearn.datasets.tests.test_common import ( + check_as_frame, + check_pandas_dependency_message, + check_return_X_y, +) +from sklearn.preprocessing import normalize +from sklearn.utils._testing import assert_allclose_dense_sparse + + +def test_20news(fetch_20newsgroups_fxt): + data = fetch_20newsgroups_fxt(subset="all", shuffle=False) + assert data.DESCR.startswith(".. _20newsgroups_dataset:") + + # Extract a reduced dataset + data2cats = fetch_20newsgroups_fxt( + subset="all", categories=data.target_names[-1:-3:-1], shuffle=False + ) + # Check that the ordering of the target_names is the same + # as the ordering in the full dataset + assert data2cats.target_names == data.target_names[-2:] + # Assert that we have only 0 and 1 as labels + assert np.unique(data2cats.target).tolist() == [0, 1] + + # Check that the number of filenames is consistent with data/target + assert len(data2cats.filenames) == len(data2cats.target) + assert len(data2cats.filenames) == len(data2cats.data) + + # Check that the first entry of the reduced dataset corresponds to + # the first entry of the corresponding category in the full dataset + entry1 = data2cats.data[0] + category = data2cats.target_names[data2cats.target[0]] + label = data.target_names.index(category) + entry2 = data.data[np.where(data.target == label)[0][0]] + assert entry1 == entry2 + + # check that return_X_y option + X, y = fetch_20newsgroups_fxt(subset="all", shuffle=False, return_X_y=True) + assert len(X) == len(data.data) + assert y.shape == data.target.shape + + +def test_20news_length_consistency(fetch_20newsgroups_fxt): + """Checks the length consistencies within the bunch + + This is a non-regression test for a bug present in 0.16.1. + """ + # Extract the full dataset + data = fetch_20newsgroups_fxt(subset="all") + assert len(data["data"]) == len(data.data) + assert len(data["target"]) == len(data.target) + assert len(data["filenames"]) == len(data.filenames) + + +def test_20news_vectorized(fetch_20newsgroups_vectorized_fxt): + # test subset = train + bunch = fetch_20newsgroups_vectorized_fxt(subset="train") + assert sp.issparse(bunch.data) and bunch.data.format == "csr" + assert bunch.data.shape == (11314, 130107) + assert bunch.target.shape[0] == 11314 + assert bunch.data.dtype == np.float64 + assert bunch.DESCR.startswith(".. _20newsgroups_dataset:") + + # test subset = test + bunch = fetch_20newsgroups_vectorized_fxt(subset="test") + assert sp.issparse(bunch.data) and bunch.data.format == "csr" + assert bunch.data.shape == (7532, 130107) + assert bunch.target.shape[0] == 7532 + assert bunch.data.dtype == np.float64 + assert bunch.DESCR.startswith(".. _20newsgroups_dataset:") + + # test return_X_y option + fetch_func = partial(fetch_20newsgroups_vectorized_fxt, subset="test") + check_return_X_y(bunch, fetch_func) + + # test subset = all + bunch = fetch_20newsgroups_vectorized_fxt(subset="all") + assert sp.issparse(bunch.data) and bunch.data.format == "csr" + assert bunch.data.shape == (11314 + 7532, 130107) + assert bunch.target.shape[0] == 11314 + 7532 + assert bunch.data.dtype == np.float64 + assert bunch.DESCR.startswith(".. _20newsgroups_dataset:") + + +def test_20news_normalization(fetch_20newsgroups_vectorized_fxt): + X = fetch_20newsgroups_vectorized_fxt(normalize=False) + X_ = fetch_20newsgroups_vectorized_fxt(normalize=True) + X_norm = X_["data"][:100] + X = X["data"][:100] + + assert_allclose_dense_sparse(X_norm, normalize(X)) + assert np.allclose(np.linalg.norm(X_norm.todense(), axis=1), 1) + + +def test_20news_as_frame(fetch_20newsgroups_vectorized_fxt): + pd = pytest.importorskip("pandas") + + bunch = fetch_20newsgroups_vectorized_fxt(as_frame=True) + check_as_frame(bunch, fetch_20newsgroups_vectorized_fxt) + + frame = bunch.frame + assert frame.shape == (11314, 130108) + assert all([isinstance(col, pd.SparseDtype) for col in bunch.data.dtypes]) + + # Check a small subset of features + for expected_feature in [ + "beginner", + "beginners", + "beginning", + "beginnings", + "begins", + "begley", + "begone", + ]: + assert expected_feature in frame.keys() + assert "category_class" in frame.keys() + assert bunch.target.name == "category_class" + + +def test_as_frame_no_pandas(fetch_20newsgroups_vectorized_fxt, hide_available_pandas): + check_pandas_dependency_message(fetch_20newsgroups_vectorized_fxt) + + +def test_outdated_pickle(fetch_20newsgroups_vectorized_fxt): + with patch("os.path.exists") as mock_is_exist: + with patch("joblib.load") as mock_load: + # mock that the dataset was cached + mock_is_exist.return_value = True + # mock that we have an outdated pickle with only X and y returned + mock_load.return_value = ("X", "y") + err_msg = "The cached dataset located in" + with pytest.raises(ValueError, match=err_msg): + fetch_20newsgroups_vectorized_fxt(as_frame=True) diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/test_arff_parser.py b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/test_arff_parser.py new file mode 100644 index 0000000000000000000000000000000000000000..b675439cd2e9d1bdd5b1e5105322d9a36a4b4e54 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/test_arff_parser.py @@ -0,0 +1,272 @@ +import textwrap +from io import BytesIO + +import pytest + +from sklearn.datasets._arff_parser import ( + _liac_arff_parser, + _pandas_arff_parser, + _post_process_frame, + load_arff_from_gzip_file, +) + + +@pytest.mark.parametrize( + "feature_names, target_names", + [ + ( + [ + "col_int_as_integer", + "col_int_as_numeric", + "col_float_as_real", + "col_float_as_numeric", + ], + ["col_categorical", "col_string"], + ), + ( + [ + "col_int_as_integer", + "col_int_as_numeric", + "col_float_as_real", + "col_float_as_numeric", + ], + ["col_categorical"], + ), + ( + [ + "col_int_as_integer", + "col_int_as_numeric", + "col_float_as_real", + "col_float_as_numeric", + ], + [], + ), + ], +) +def test_post_process_frame(feature_names, target_names): + """Check the behaviour of the post-processing function for splitting a dataframe.""" + pd = pytest.importorskip("pandas") + + X_original = pd.DataFrame( + { + "col_int_as_integer": [1, 2, 3], + "col_int_as_numeric": [1, 2, 3], + "col_float_as_real": [1.0, 2.0, 3.0], + "col_float_as_numeric": [1.0, 2.0, 3.0], + "col_categorical": ["a", "b", "c"], + "col_string": ["a", "b", "c"], + } + ) + + X, y = _post_process_frame(X_original, feature_names, target_names) + assert isinstance(X, pd.DataFrame) + if len(target_names) >= 2: + assert isinstance(y, pd.DataFrame) + elif len(target_names) == 1: + assert isinstance(y, pd.Series) + else: + assert y is None + + +def test_load_arff_from_gzip_file_error_parser(): + """An error will be raised if the parser is not known.""" + # None of the input parameters are required to be accurate since the check + # of the parser will be carried out first. + + err_msg = "Unknown parser: 'xxx'. Should be 'liac-arff' or 'pandas'" + with pytest.raises(ValueError, match=err_msg): + load_arff_from_gzip_file("xxx", "xxx", "xxx", "xxx", "xxx", "xxx") + + +@pytest.mark.parametrize("parser_func", [_liac_arff_parser, _pandas_arff_parser]) +def test_pandas_arff_parser_strip_single_quotes(parser_func): + """Check that we properly strip single quotes from the data.""" + pd = pytest.importorskip("pandas") + + arff_file = BytesIO(textwrap.dedent(""" + @relation 'toy' + @attribute 'cat_single_quote' {'A', 'B', 'C'} + @attribute 'str_single_quote' string + @attribute 'str_nested_quote' string + @attribute 'class' numeric + @data + 'A','some text','\"expect double quotes\"',0 + """).encode("utf-8")) + + columns_info = { + "cat_single_quote": { + "data_type": "nominal", + "name": "cat_single_quote", + }, + "str_single_quote": { + "data_type": "string", + "name": "str_single_quote", + }, + "str_nested_quote": { + "data_type": "string", + "name": "str_nested_quote", + }, + "class": { + "data_type": "numeric", + "name": "class", + }, + } + + feature_names = [ + "cat_single_quote", + "str_single_quote", + "str_nested_quote", + ] + target_names = ["class"] + + # We don't strip single quotes for string columns with the pandas parser. + expected_values = { + "cat_single_quote": "A", + "str_single_quote": ( + "some text" if parser_func is _liac_arff_parser else "'some text'" + ), + "str_nested_quote": ( + '"expect double quotes"' + if parser_func is _liac_arff_parser + else "'\"expect double quotes\"'" + ), + "class": 0, + } + + _, _, frame, _ = parser_func( + arff_file, + output_arrays_type="pandas", + openml_columns_info=columns_info, + feature_names_to_select=feature_names, + target_names_to_select=target_names, + ) + + assert frame.columns.tolist() == feature_names + target_names + pd.testing.assert_series_equal(frame.iloc[0], pd.Series(expected_values, name=0)) + + +@pytest.mark.parametrize("parser_func", [_liac_arff_parser, _pandas_arff_parser]) +def test_pandas_arff_parser_strip_double_quotes(parser_func): + """Check that we properly strip double quotes from the data.""" + pd = pytest.importorskip("pandas") + + arff_file = BytesIO(textwrap.dedent(""" + @relation 'toy' + @attribute 'cat_double_quote' {"A", "B", "C"} + @attribute 'str_double_quote' string + @attribute 'str_nested_quote' string + @attribute 'class' numeric + @data + "A","some text","\'expect double quotes\'",0 + """).encode("utf-8")) + + columns_info = { + "cat_double_quote": { + "data_type": "nominal", + "name": "cat_double_quote", + }, + "str_double_quote": { + "data_type": "string", + "name": "str_double_quote", + }, + "str_nested_quote": { + "data_type": "string", + "name": "str_nested_quote", + }, + "class": { + "data_type": "numeric", + "name": "class", + }, + } + + feature_names = [ + "cat_double_quote", + "str_double_quote", + "str_nested_quote", + ] + target_names = ["class"] + + expected_values = { + "cat_double_quote": "A", + "str_double_quote": "some text", + "str_nested_quote": "'expect double quotes'", + "class": 0, + } + + _, _, frame, _ = parser_func( + arff_file, + output_arrays_type="pandas", + openml_columns_info=columns_info, + feature_names_to_select=feature_names, + target_names_to_select=target_names, + ) + + assert frame.columns.tolist() == feature_names + target_names + pd.testing.assert_series_equal(frame.iloc[0], pd.Series(expected_values, name=0)) + + +@pytest.mark.parametrize( + "parser_func", + [ + # internal quotes are not considered to follow the ARFF spec in LIAC ARFF + pytest.param(_liac_arff_parser, marks=pytest.mark.xfail), + _pandas_arff_parser, + ], +) +def test_pandas_arff_parser_strip_no_quotes(parser_func): + """Check that we properly parse with no quotes characters.""" + pd = pytest.importorskip("pandas") + + arff_file = BytesIO(textwrap.dedent(""" + @relation 'toy' + @attribute 'cat_without_quote' {A, B, C} + @attribute 'str_without_quote' string + @attribute 'str_internal_quote' string + @attribute 'class' numeric + @data + A,some text,'internal' quote,0 + """).encode("utf-8")) + + columns_info = { + "cat_without_quote": { + "data_type": "nominal", + "name": "cat_without_quote", + }, + "str_without_quote": { + "data_type": "string", + "name": "str_without_quote", + }, + "str_internal_quote": { + "data_type": "string", + "name": "str_internal_quote", + }, + "class": { + "data_type": "numeric", + "name": "class", + }, + } + + feature_names = [ + "cat_without_quote", + "str_without_quote", + "str_internal_quote", + ] + target_names = ["class"] + + expected_values = { + "cat_without_quote": "A", + "str_without_quote": "some text", + "str_internal_quote": "'internal' quote", + "class": 0, + } + + _, _, frame, _ = parser_func( + arff_file, + output_arrays_type="pandas", + openml_columns_info=columns_info, + feature_names_to_select=feature_names, + target_names_to_select=target_names, + ) + + assert frame.columns.tolist() == feature_names + target_names + pd.testing.assert_series_equal(frame.iloc[0], pd.Series(expected_values, name=0)) diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/test_base.py b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/test_base.py new file mode 100644 index 0000000000000000000000000000000000000000..0a1190060a0555f822f3bd4736e22849a773a52b --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/test_base.py @@ -0,0 +1,365 @@ +import os +import shutil +import tempfile +import warnings +from functools import partial +from importlib import resources +from pathlib import Path +from pickle import dumps, loads + +import numpy as np +import pytest + +from sklearn.datasets import ( + clear_data_home, + get_data_home, + load_breast_cancer, + load_diabetes, + load_digits, + load_files, + load_iris, + load_linnerud, + load_sample_image, + load_sample_images, + load_wine, +) +from sklearn.datasets._base import ( + load_csv_data, + load_gzip_compressed_csv_data, +) +from sklearn.datasets.tests.test_common import check_as_frame +from sklearn.preprocessing import scale +from sklearn.utils import Bunch + + +class _DummyPath: + """Minimal class that implements the os.PathLike interface.""" + + def __init__(self, path): + self.path = path + + def __fspath__(self): + return self.path + + +def _remove_dir(path): + if os.path.isdir(path): + shutil.rmtree(path) + + +@pytest.fixture(scope="module") +def data_home(tmpdir_factory): + tmp_file = str(tmpdir_factory.mktemp("scikit_learn_data_home_test")) + yield tmp_file + _remove_dir(tmp_file) + + +@pytest.fixture(scope="module") +def load_files_root(tmpdir_factory): + tmp_file = str(tmpdir_factory.mktemp("scikit_learn_load_files_test")) + yield tmp_file + _remove_dir(tmp_file) + + +@pytest.fixture +def test_category_dir_1(load_files_root): + test_category_dir1 = tempfile.mkdtemp(dir=load_files_root) + sample_file = tempfile.NamedTemporaryFile(dir=test_category_dir1, delete=False) + sample_file.write(b"Hello World!\n") + sample_file.close() + yield str(test_category_dir1) + _remove_dir(test_category_dir1) + + +@pytest.fixture +def test_category_dir_2(load_files_root): + test_category_dir2 = tempfile.mkdtemp(dir=load_files_root) + yield str(test_category_dir2) + _remove_dir(test_category_dir2) + + +@pytest.mark.parametrize("path_container", [None, Path, _DummyPath]) +def test_data_home(path_container, data_home): + # get_data_home will point to a pre-existing folder + if path_container is not None: + data_home = path_container(data_home) + data_home = get_data_home(data_home=data_home) + assert data_home == data_home + assert os.path.exists(data_home) + + # clear_data_home will delete both the content and the folder it-self + if path_container is not None: + data_home = path_container(data_home) + clear_data_home(data_home=data_home) + assert not os.path.exists(data_home) + + # if the folder is missing it will be created again + data_home = get_data_home(data_home=data_home) + assert os.path.exists(data_home) + + +def test_default_empty_load_files(load_files_root): + res = load_files(load_files_root) + assert len(res.filenames) == 0 + assert len(res.target_names) == 0 + assert res.DESCR is None + + +def test_default_load_files(test_category_dir_1, test_category_dir_2, load_files_root): + res = load_files(load_files_root) + assert len(res.filenames) == 1 + assert len(res.target_names) == 2 + assert res.DESCR is None + assert res.data == [b"Hello World!\n"] + + +def test_load_files_w_categories_desc_and_encoding( + test_category_dir_1, test_category_dir_2, load_files_root +): + category = os.path.abspath(test_category_dir_1).split(os.sep).pop() + res = load_files( + load_files_root, description="test", categories=[category], encoding="utf-8" + ) + + assert len(res.filenames) == 1 + assert len(res.target_names) == 1 + assert res.DESCR == "test" + assert res.data == ["Hello World!\n"] + + +def test_load_files_wo_load_content( + test_category_dir_1, test_category_dir_2, load_files_root +): + res = load_files(load_files_root, load_content=False) + assert len(res.filenames) == 1 + assert len(res.target_names) == 2 + assert res.DESCR is None + assert res.get("data") is None + + +@pytest.mark.parametrize("allowed_extensions", ([".txt"], [".txt", ".json"])) +def test_load_files_allowed_extensions(tmp_path, allowed_extensions): + """Check the behaviour of `allowed_extension` in `load_files`.""" + d = tmp_path / "sub" + d.mkdir() + files = ("file1.txt", "file2.json", "file3.json", "file4.md") + paths = [d / f for f in files] + for p in paths: + p.write_bytes(b"hello") + res = load_files(tmp_path, allowed_extensions=allowed_extensions) + assert set([str(p) for p in paths if p.suffix in allowed_extensions]) == set( + res.filenames + ) + + +@pytest.mark.parametrize( + "filename, expected_n_samples, expected_n_features, expected_target_names", + [ + ("wine_data.csv", 178, 13, ["class_0", "class_1", "class_2"]), + ("iris.csv", 150, 4, ["setosa", "versicolor", "virginica"]), + ("breast_cancer.csv", 569, 30, ["malignant", "benign"]), + ], +) +def test_load_csv_data( + filename, expected_n_samples, expected_n_features, expected_target_names +): + actual_data, actual_target, actual_target_names = load_csv_data(filename) + assert actual_data.shape[0] == expected_n_samples + assert actual_data.shape[1] == expected_n_features + assert actual_target.shape[0] == expected_n_samples + np.testing.assert_array_equal(actual_target_names, expected_target_names) + + +def test_load_csv_data_with_descr(): + data_file_name = "iris.csv" + descr_file_name = "iris.rst" + + res_without_descr = load_csv_data(data_file_name=data_file_name) + res_with_descr = load_csv_data( + data_file_name=data_file_name, descr_file_name=descr_file_name + ) + assert len(res_with_descr) == 4 + assert len(res_without_descr) == 3 + + np.testing.assert_array_equal(res_with_descr[0], res_without_descr[0]) + np.testing.assert_array_equal(res_with_descr[1], res_without_descr[1]) + np.testing.assert_array_equal(res_with_descr[2], res_without_descr[2]) + + assert res_with_descr[-1].startswith(".. _iris_dataset:") + + +@pytest.mark.parametrize( + "filename, kwargs, expected_shape", + [ + ("diabetes_data_raw.csv.gz", {}, [442, 10]), + ("diabetes_target.csv.gz", {}, [442]), + ("digits.csv.gz", {"delimiter": ","}, [1797, 65]), + ], +) +def test_load_gzip_compressed_csv_data(filename, kwargs, expected_shape): + actual_data = load_gzip_compressed_csv_data(filename, **kwargs) + assert actual_data.shape == tuple(expected_shape) + + +def test_load_gzip_compressed_csv_data_with_descr(): + data_file_name = "diabetes_target.csv.gz" + descr_file_name = "diabetes.rst" + + expected_data = load_gzip_compressed_csv_data(data_file_name=data_file_name) + actual_data, descr = load_gzip_compressed_csv_data( + data_file_name=data_file_name, + descr_file_name=descr_file_name, + ) + + np.testing.assert_array_equal(actual_data, expected_data) + assert descr.startswith(".. _diabetes_dataset:") + + +def test_load_sample_images(): + try: + res = load_sample_images() + assert len(res.images) == 2 + assert len(res.filenames) == 2 + images = res.images + + # assert is china image + assert np.all(images[0][0, 0, :] == np.array([174, 201, 231], dtype=np.uint8)) + # assert is flower image + assert np.all(images[1][0, 0, :] == np.array([2, 19, 13], dtype=np.uint8)) + assert res.DESCR + except ImportError: + warnings.warn("Could not load sample images, PIL is not available.") + + +def test_load_sample_image(): + try: + china = load_sample_image("china.jpg") + assert china.dtype == "uint8" + assert china.shape == (427, 640, 3) + except ImportError: + warnings.warn("Could not load sample images, PIL is not available.") + + +def test_load_diabetes_raw(): + """Test to check that we load a scaled version by default but that we can + get an unscaled version when setting `scaled=False`.""" + diabetes_raw = load_diabetes(scaled=False) + assert diabetes_raw.data.shape == (442, 10) + assert diabetes_raw.target.size, 442 + assert len(diabetes_raw.feature_names) == 10 + assert diabetes_raw.DESCR + + diabetes_default = load_diabetes() + + np.testing.assert_allclose( + scale(diabetes_raw.data) / (442**0.5), diabetes_default.data, atol=1e-04 + ) + + +@pytest.mark.parametrize( + "loader_func, data_shape, target_shape, n_target, has_descr, filenames", + [ + (load_breast_cancer, (569, 30), (569,), 2, True, ["filename"]), + (load_wine, (178, 13), (178,), 3, True, []), + (load_iris, (150, 4), (150,), 3, True, ["filename"]), + ( + load_linnerud, + (20, 3), + (20, 3), + 3, + True, + ["data_filename", "target_filename"], + ), + (load_diabetes, (442, 10), (442,), None, True, []), + (load_digits, (1797, 64), (1797,), 10, True, []), + (partial(load_digits, n_class=9), (1617, 64), (1617,), 10, True, []), + ], +) +def test_loader(loader_func, data_shape, target_shape, n_target, has_descr, filenames): + bunch = loader_func() + + assert isinstance(bunch, Bunch) + assert bunch.data.shape == data_shape + assert bunch.target.shape == target_shape + if hasattr(bunch, "feature_names"): + assert len(bunch.feature_names) == data_shape[1] + if n_target is not None: + assert len(bunch.target_names) == n_target + if has_descr: + assert bunch.DESCR + if filenames: + assert "data_module" in bunch + assert all( + [ + f in bunch + and (resources.files(bunch["data_module"]) / bunch[f]).is_file() + for f in filenames + ] + ) + + +@pytest.mark.parametrize( + "loader_func, data_dtype, target_dtype", + [ + (load_breast_cancer, np.float64, int), + (load_diabetes, np.float64, np.float64), + (load_digits, np.float64, int), + (load_iris, np.float64, int), + (load_linnerud, np.float64, np.float64), + (load_wine, np.float64, int), + ], +) +def test_toy_dataset_frame_dtype(loader_func, data_dtype, target_dtype): + default_result = loader_func() + check_as_frame( + default_result, + loader_func, + expected_data_dtype=data_dtype, + expected_target_dtype=target_dtype, + ) + + +def test_loads_dumps_bunch(): + bunch = Bunch(x="x") + bunch_from_pkl = loads(dumps(bunch)) + bunch_from_pkl.x = "y" + assert bunch_from_pkl["x"] == bunch_from_pkl.x + + +def test_bunch_pickle_generated_with_0_16_and_read_with_0_17(): + bunch = Bunch(key="original") + # This reproduces a problem when Bunch pickles have been created + # with scikit-learn 0.16 and are read with 0.17. Basically there + # is a surprising behaviour because reading bunch.key uses + # bunch.__dict__ (which is non empty for 0.16 Bunch objects) + # whereas assigning into bunch.key uses bunch.__setattr__. See + # https://github.com/scikit-learn/scikit-learn/issues/6196 for + # more details + bunch.__dict__["key"] = "set from __dict__" + bunch_from_pkl = loads(dumps(bunch)) + # After loading from pickle the __dict__ should have been ignored + assert bunch_from_pkl.key == "original" + assert bunch_from_pkl["key"] == "original" + # Making sure that changing the attr does change the value + # associated with __getitem__ as well + bunch_from_pkl.key = "changed" + assert bunch_from_pkl.key == "changed" + assert bunch_from_pkl["key"] == "changed" + + +def test_bunch_dir(): + # check that dir (important for autocomplete) shows attributes + data = load_iris() + assert "data" in dir(data) + + +def test_load_boston_error(): + """Check that we raise the ethical warning when trying to import `load_boston`.""" + msg = "The Boston housing prices dataset has an ethical problem" + with pytest.raises(ImportError, match=msg): + from sklearn.datasets import load_boston # noqa + + # other non-existing function should raise the usual import error + msg = "cannot import name 'non_existing_function' from 'sklearn.datasets'" + with pytest.raises(ImportError, match=msg): + from sklearn.datasets import non_existing_function # noqa diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/test_california_housing.py b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/test_california_housing.py new file mode 100644 index 0000000000000000000000000000000000000000..ef6fc95db80bfe46bb712113474ebb6bde4d3912 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/test_california_housing.py @@ -0,0 +1,37 @@ +"""Test the california_housing loader, if the data is available, +or if specifically requested via environment variable +(e.g. for CI jobs).""" +from functools import partial + +import pytest + +from sklearn.datasets.tests.test_common import check_return_X_y + + +def test_fetch(fetch_california_housing_fxt): + data = fetch_california_housing_fxt() + assert (20640, 8) == data.data.shape + assert (20640,) == data.target.shape + assert data.DESCR.startswith(".. _california_housing_dataset:") + + # test return_X_y option + fetch_func = partial(fetch_california_housing_fxt) + check_return_X_y(data, fetch_func) + + +def test_fetch_asframe(fetch_california_housing_fxt): + pd = pytest.importorskip("pandas") + bunch = fetch_california_housing_fxt(as_frame=True) + frame = bunch.frame + assert hasattr(bunch, "frame") is True + assert frame.shape == (20640, 9) + assert isinstance(bunch.data, pd.DataFrame) + assert isinstance(bunch.target, pd.Series) + + +def test_pandas_dependency_message(fetch_california_housing_fxt, hide_available_pandas): + # Check that pandas is imported lazily and that an informative error + # message is raised when pandas is missing: + expected_msg = "fetch_california_housing with as_frame=True requires pandas" + with pytest.raises(ImportError, match=expected_msg): + fetch_california_housing_fxt(as_frame=True) diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/test_common.py b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/test_common.py new file mode 100644 index 0000000000000000000000000000000000000000..8048a31041ddcc4926649ad8225fc11954e0eb57 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/test_common.py @@ -0,0 +1,135 @@ +"""Test loaders for common functionality.""" +import inspect +import os + +import numpy as np +import pytest + +import sklearn.datasets + + +def is_pillow_installed(): + try: + import PIL # noqa + + return True + except ImportError: + return False + + +FETCH_PYTEST_MARKERS = { + "return_X_y": { + "fetch_20newsgroups": pytest.mark.xfail( + reason="X is a list and does not have a shape argument" + ), + "fetch_openml": pytest.mark.xfail( + reason="fetch_opeml requires a dataset name or id" + ), + "fetch_lfw_people": pytest.mark.skipif( + not is_pillow_installed(), reason="pillow is not installed" + ), + }, + "as_frame": { + "fetch_openml": pytest.mark.xfail( + reason="fetch_opeml requires a dataset name or id" + ), + }, +} + + +def check_pandas_dependency_message(fetch_func): + try: + import pandas # noqa + + pytest.skip("This test requires pandas to not be installed") + except ImportError: + # Check that pandas is imported lazily and that an informative error + # message is raised when pandas is missing: + name = fetch_func.__name__ + expected_msg = f"{name} with as_frame=True requires pandas" + with pytest.raises(ImportError, match=expected_msg): + fetch_func(as_frame=True) + + +def check_return_X_y(bunch, dataset_func): + X_y_tuple = dataset_func(return_X_y=True) + assert isinstance(X_y_tuple, tuple) + assert X_y_tuple[0].shape == bunch.data.shape + assert X_y_tuple[1].shape == bunch.target.shape + + +def check_as_frame( + bunch, dataset_func, expected_data_dtype=None, expected_target_dtype=None +): + pd = pytest.importorskip("pandas") + frame_bunch = dataset_func(as_frame=True) + assert hasattr(frame_bunch, "frame") + assert isinstance(frame_bunch.frame, pd.DataFrame) + assert isinstance(frame_bunch.data, pd.DataFrame) + assert frame_bunch.data.shape == bunch.data.shape + if frame_bunch.target.ndim > 1: + assert isinstance(frame_bunch.target, pd.DataFrame) + else: + assert isinstance(frame_bunch.target, pd.Series) + assert frame_bunch.target.shape[0] == bunch.target.shape[0] + if expected_data_dtype is not None: + assert np.all(frame_bunch.data.dtypes == expected_data_dtype) + if expected_target_dtype is not None: + assert np.all(frame_bunch.target.dtypes == expected_target_dtype) + + # Test for return_X_y and as_frame=True + frame_X, frame_y = dataset_func(as_frame=True, return_X_y=True) + assert isinstance(frame_X, pd.DataFrame) + if frame_y.ndim > 1: + assert isinstance(frame_X, pd.DataFrame) + else: + assert isinstance(frame_y, pd.Series) + + +def _skip_network_tests(): + return os.environ.get("SKLEARN_SKIP_NETWORK_TESTS", "1") == "1" + + +def _generate_func_supporting_param(param, dataset_type=("load", "fetch")): + markers_fetch = FETCH_PYTEST_MARKERS.get(param, {}) + for name, obj in inspect.getmembers(sklearn.datasets): + if not inspect.isfunction(obj): + continue + + is_dataset_type = any([name.startswith(t) for t in dataset_type]) + is_support_param = param in inspect.signature(obj).parameters + if is_dataset_type and is_support_param: + # check if we should skip if we don't have network support + marks = [ + pytest.mark.skipif( + condition=name.startswith("fetch") and _skip_network_tests(), + reason="Skip because fetcher requires internet network", + ) + ] + if name in markers_fetch: + marks.append(markers_fetch[name]) + + yield pytest.param(name, obj, marks=marks) + + +@pytest.mark.parametrize( + "name, dataset_func", _generate_func_supporting_param("return_X_y") +) +def test_common_check_return_X_y(name, dataset_func): + bunch = dataset_func() + check_return_X_y(bunch, dataset_func) + + +@pytest.mark.parametrize( + "name, dataset_func", _generate_func_supporting_param("as_frame") +) +def test_common_check_as_frame(name, dataset_func): + bunch = dataset_func() + check_as_frame(bunch, dataset_func) + + +@pytest.mark.parametrize( + "name, dataset_func", _generate_func_supporting_param("as_frame") +) +def test_common_check_pandas_dependency(name, dataset_func): + check_pandas_dependency_message(dataset_func) diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/test_covtype.py b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/test_covtype.py new file mode 100644 index 0000000000000000000000000000000000000000..e44fdaae69ec3ec7f1c7bc8c77fc1f6a15d5f331 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/test_covtype.py @@ -0,0 +1,54 @@ +"""Test the covtype loader, if the data is available, +or if specifically requested via environment variable +(e.g. for CI jobs).""" +from functools import partial + +import pytest + +from sklearn.datasets.tests.test_common import check_return_X_y + + +def test_fetch(fetch_covtype_fxt, global_random_seed): + data1 = fetch_covtype_fxt(shuffle=True, random_state=global_random_seed) + data2 = fetch_covtype_fxt(shuffle=True, random_state=global_random_seed + 1) + + X1, X2 = data1["data"], data2["data"] + assert (581012, 54) == X1.shape + assert X1.shape == X2.shape + + assert X1.sum() == X2.sum() + + y1, y2 = data1["target"], data2["target"] + assert (X1.shape[0],) == y1.shape + assert (X1.shape[0],) == y2.shape + + descr_prefix = ".. _covtype_dataset:" + assert data1.DESCR.startswith(descr_prefix) + assert data2.DESCR.startswith(descr_prefix) + + # test return_X_y option + fetch_func = partial(fetch_covtype_fxt) + check_return_X_y(data1, fetch_func) + + +def test_fetch_asframe(fetch_covtype_fxt): + pytest.importorskip("pandas") + + bunch = fetch_covtype_fxt(as_frame=True) + assert hasattr(bunch, "frame") + frame = bunch.frame + assert frame.shape == (581012, 55) + assert bunch.data.shape == (581012, 54) + assert bunch.target.shape == (581012,) + + column_names = set(frame.columns) + + # enumerated names are added correctly + assert set(f"Wilderness_Area_{i}" for i in range(4)) < column_names + assert set(f"Soil_Type_{i}" for i in range(40)) < column_names + + +def test_pandas_dependency_message(fetch_covtype_fxt, hide_available_pandas): + expected_msg = "fetch_covtype with as_frame=True requires pandas" + with pytest.raises(ImportError, match=expected_msg): + fetch_covtype_fxt(as_frame=True) diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/test_kddcup99.py b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/test_kddcup99.py new file mode 100644 index 0000000000000000000000000000000000000000..5f6e9c83a30b8d419880f3d15fffb0fe83f2b559 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/test_kddcup99.py @@ -0,0 +1,89 @@ +"""Test kddcup99 loader, if the data is available, +or if specifically requested via environment variable +(e.g. for CI jobs). + +Only 'percent10' mode is tested, as the full data +is too big to use in unit-testing. +""" + +from functools import partial + +import pytest + +from sklearn.datasets.tests.test_common import ( + check_as_frame, + check_pandas_dependency_message, + check_return_X_y, +) + + +@pytest.mark.parametrize("as_frame", [True, False]) +@pytest.mark.parametrize( + "subset, n_samples, n_features", + [ + (None, 494021, 41), + ("SA", 100655, 41), + ("SF", 73237, 4), + ("http", 58725, 3), + ("smtp", 9571, 3), + ], +) +def test_fetch_kddcup99_percent10( + fetch_kddcup99_fxt, as_frame, subset, n_samples, n_features +): + data = fetch_kddcup99_fxt(subset=subset, as_frame=as_frame) + assert data.data.shape == (n_samples, n_features) + assert data.target.shape == (n_samples,) + if as_frame: + assert data.frame.shape == (n_samples, n_features + 1) + assert data.DESCR.startswith(".. _kddcup99_dataset:") + + +def test_fetch_kddcup99_return_X_y(fetch_kddcup99_fxt): + fetch_func = partial(fetch_kddcup99_fxt, subset="smtp") + data = fetch_func() + check_return_X_y(data, fetch_func) + + +def test_fetch_kddcup99_as_frame(fetch_kddcup99_fxt): + bunch = fetch_kddcup99_fxt() + check_as_frame(bunch, fetch_kddcup99_fxt) + + +def test_fetch_kddcup99_shuffle(fetch_kddcup99_fxt): + dataset = fetch_kddcup99_fxt( + random_state=0, + subset="SA", + percent10=True, + ) + dataset_shuffled = fetch_kddcup99_fxt( + random_state=0, + subset="SA", + shuffle=True, + percent10=True, + ) + assert set(dataset["target"]) == set(dataset_shuffled["target"]) + assert dataset_shuffled.data.shape == dataset.data.shape + assert dataset_shuffled.target.shape == dataset.target.shape + + +def test_pandas_dependency_message(fetch_kddcup99_fxt, hide_available_pandas): + check_pandas_dependency_message(fetch_kddcup99_fxt) + + +def test_corrupted_file_error_message(fetch_kddcup99_fxt, tmp_path): + """Check that a nice error message is raised when cache is corrupted.""" + kddcup99_dir = tmp_path / "kddcup99_10-py3" + kddcup99_dir.mkdir() + samples_path = kddcup99_dir / "samples" + + with samples_path.open("wb") as f: + f.write(b"THIS IS CORRUPTED") + + msg = ( + "The cache for fetch_kddcup99 is invalid, please " + f"delete {str(kddcup99_dir)} and run the fetch_kddcup99 again" + ) + + with pytest.raises(OSError, match=msg): + fetch_kddcup99_fxt(data_home=str(tmp_path)) diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/test_lfw.py b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/test_lfw.py new file mode 100644 index 0000000000000000000000000000000000000000..92edb99ce3b0b0a158c74f64812aaa997e7b36dc --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/test_lfw.py @@ -0,0 +1,241 @@ +"""This test for the LFW require medium-size data downloading and processing + +If the data has not been already downloaded by running the examples, +the tests won't run (skipped). + +If the test are run, the first execution will be long (typically a bit +more than a couple of minutes) but as the dataset loader is leveraging +joblib, successive runs will be fast (less than 200ms). +""" + +import os +import random +import shutil +import tempfile +from functools import partial + +import numpy as np +import pytest + +from sklearn.datasets import fetch_lfw_pairs, fetch_lfw_people +from sklearn.datasets.tests.test_common import check_return_X_y +from sklearn.utils._testing import assert_array_equal + +SCIKIT_LEARN_DATA = None +SCIKIT_LEARN_EMPTY_DATA = None +LFW_HOME = None + +FAKE_NAMES = [ + "Abdelatif_Smith", + "Abhati_Kepler", + "Camara_Alvaro", + "Chen_Dupont", + "John_Lee", + "Lin_Bauman", + "Onur_Lopez", +] + + +def setup_module(): + """Test fixture run once and common to all tests of this module""" + Image = pytest.importorskip("PIL.Image") + + global SCIKIT_LEARN_DATA, SCIKIT_LEARN_EMPTY_DATA, LFW_HOME + + SCIKIT_LEARN_DATA = tempfile.mkdtemp(prefix="scikit_learn_lfw_test_") + LFW_HOME = os.path.join(SCIKIT_LEARN_DATA, "lfw_home") + + SCIKIT_LEARN_EMPTY_DATA = tempfile.mkdtemp(prefix="scikit_learn_empty_test_") + + if not os.path.exists(LFW_HOME): + os.makedirs(LFW_HOME) + + random_state = random.Random(42) + np_rng = np.random.RandomState(42) + + # generate some random jpeg files for each person + counts = {} + for name in FAKE_NAMES: + folder_name = os.path.join(LFW_HOME, "lfw_funneled", name) + if not os.path.exists(folder_name): + os.makedirs(folder_name) + + n_faces = np_rng.randint(1, 5) + counts[name] = n_faces + for i in range(n_faces): + file_path = os.path.join(folder_name, name + "_%04d.jpg" % i) + uniface = np_rng.randint(0, 255, size=(250, 250, 3)) + img = Image.fromarray(uniface.astype(np.uint8)) + img.save(file_path) + + # add some random file pollution to test robustness + with open(os.path.join(LFW_HOME, "lfw_funneled", ".test.swp"), "wb") as f: + f.write(b"Text file to be ignored by the dataset loader.") + + # generate some pairing metadata files using the same format as LFW + with open(os.path.join(LFW_HOME, "pairsDevTrain.txt"), "wb") as f: + f.write(b"10\n") + more_than_two = [name for name, count in counts.items() if count >= 2] + for i in range(5): + name = random_state.choice(more_than_two) + first, second = random_state.sample(range(counts[name]), 2) + f.write(("%s\t%d\t%d\n" % (name, first, second)).encode()) + + for i in range(5): + first_name, second_name = random_state.sample(FAKE_NAMES, 2) + first_index = np_rng.choice(np.arange(counts[first_name])) + second_index = np_rng.choice(np.arange(counts[second_name])) + f.write( + ( + "%s\t%d\t%s\t%d\n" + % (first_name, first_index, second_name, second_index) + ).encode() + ) + + with open(os.path.join(LFW_HOME, "pairsDevTest.txt"), "wb") as f: + f.write(b"Fake place holder that won't be tested") + + with open(os.path.join(LFW_HOME, "pairs.txt"), "wb") as f: + f.write(b"Fake place holder that won't be tested") + + +def teardown_module(): + """Test fixture (clean up) run once after all tests of this module""" + if os.path.isdir(SCIKIT_LEARN_DATA): + shutil.rmtree(SCIKIT_LEARN_DATA) + if os.path.isdir(SCIKIT_LEARN_EMPTY_DATA): + shutil.rmtree(SCIKIT_LEARN_EMPTY_DATA) + + +def test_load_empty_lfw_people(): + with pytest.raises(OSError): + fetch_lfw_people(data_home=SCIKIT_LEARN_EMPTY_DATA, download_if_missing=False) + + +def test_load_fake_lfw_people(): + lfw_people = fetch_lfw_people( + data_home=SCIKIT_LEARN_DATA, min_faces_per_person=3, download_if_missing=False + ) + + # The data is croped around the center as a rectangular bounding box + # around the face. Colors are converted to gray levels: + assert lfw_people.images.shape == (10, 62, 47) + assert lfw_people.data.shape == (10, 2914) + + # the target is array of person integer ids + assert_array_equal(lfw_people.target, [2, 0, 1, 0, 2, 0, 2, 1, 1, 2]) + + # names of the persons can be found using the target_names array + expected_classes = ["Abdelatif Smith", "Abhati Kepler", "Onur Lopez"] + assert_array_equal(lfw_people.target_names, expected_classes) + + # It is possible to ask for the original data without any croping or color + # conversion and not limit on the number of picture per person + lfw_people = fetch_lfw_people( + data_home=SCIKIT_LEARN_DATA, + resize=None, + slice_=None, + color=True, + download_if_missing=False, + ) + assert lfw_people.images.shape == (17, 250, 250, 3) + assert lfw_people.DESCR.startswith(".. _labeled_faces_in_the_wild_dataset:") + + # the ids and class names are the same as previously + assert_array_equal( + lfw_people.target, [0, 0, 1, 6, 5, 6, 3, 6, 0, 3, 6, 1, 2, 4, 5, 1, 2] + ) + assert_array_equal( + lfw_people.target_names, + [ + "Abdelatif Smith", + "Abhati Kepler", + "Camara Alvaro", + "Chen Dupont", + "John Lee", + "Lin Bauman", + "Onur Lopez", + ], + ) + + # test return_X_y option + fetch_func = partial( + fetch_lfw_people, + data_home=SCIKIT_LEARN_DATA, + resize=None, + slice_=None, + color=True, + download_if_missing=False, + ) + check_return_X_y(lfw_people, fetch_func) + + +def test_load_fake_lfw_people_too_restrictive(): + with pytest.raises(ValueError): + fetch_lfw_people( + data_home=SCIKIT_LEARN_DATA, + min_faces_per_person=100, + download_if_missing=False, + ) + + +def test_load_empty_lfw_pairs(): + with pytest.raises(OSError): + fetch_lfw_pairs(data_home=SCIKIT_LEARN_EMPTY_DATA, download_if_missing=False) + + +def test_load_fake_lfw_pairs(): + lfw_pairs_train = fetch_lfw_pairs( + data_home=SCIKIT_LEARN_DATA, download_if_missing=False + ) + + # The data is croped around the center as a rectangular bounding box + # around the face. Colors are converted to gray levels: + assert lfw_pairs_train.pairs.shape == (10, 2, 62, 47) + + # the target is whether the person is the same or not + assert_array_equal(lfw_pairs_train.target, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0]) + + # names of the persons can be found using the target_names array + expected_classes = ["Different persons", "Same person"] + assert_array_equal(lfw_pairs_train.target_names, expected_classes) + + # It is possible to ask for the original data without any croping or color + # conversion + lfw_pairs_train = fetch_lfw_pairs( + data_home=SCIKIT_LEARN_DATA, + resize=None, + slice_=None, + color=True, + download_if_missing=False, + ) + assert lfw_pairs_train.pairs.shape == (10, 2, 250, 250, 3) + + # the ids and class names are the same as previously + assert_array_equal(lfw_pairs_train.target, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0]) + assert_array_equal(lfw_pairs_train.target_names, expected_classes) + + assert lfw_pairs_train.DESCR.startswith(".. _labeled_faces_in_the_wild_dataset:") + + +def test_fetch_lfw_people_internal_cropping(): + """Check that we properly crop the images. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/24942 + """ + # If cropping was not done properly and we don't resize the images, the images would + # have their original size (250x250) and the image would not fit in the NumPy array + # pre-allocated based on `slice_` parameter. + slice_ = (slice(70, 195), slice(78, 172)) + lfw = fetch_lfw_people( + data_home=SCIKIT_LEARN_DATA, + min_faces_per_person=3, + download_if_missing=False, + resize=None, + slice_=slice_, + ) + assert lfw.images[0].shape == ( + slice_[0].stop - slice_[0].start, + slice_[1].stop - slice_[1].start, + ) diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/test_olivetti_faces.py b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/test_olivetti_faces.py new file mode 100644 index 0000000000000000000000000000000000000000..e5d6c853aa454ff31dd1edfccee4993c1e133c4d --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/test_olivetti_faces.py @@ -0,0 +1,26 @@ +"""Test Olivetti faces fetcher, if the data is available, +or if specifically requested via environment variable +(e.g. for CI jobs).""" + +import numpy as np + +from sklearn.datasets.tests.test_common import check_return_X_y +from sklearn.utils import Bunch +from sklearn.utils._testing import assert_array_equal + + +def test_olivetti_faces(fetch_olivetti_faces_fxt): + data = fetch_olivetti_faces_fxt(shuffle=True, random_state=0) + + assert isinstance(data, Bunch) + for expected_keys in ("data", "images", "target", "DESCR"): + assert expected_keys in data.keys() + + assert data.data.shape == (400, 4096) + assert data.images.shape == (400, 64, 64) + assert data.target.shape == (400,) + assert_array_equal(np.unique(np.sort(data.target)), np.arange(40)) + assert data.DESCR.startswith(".. _olivetti_faces_dataset:") + + # test the return_X_y option + check_return_X_y(data, fetch_olivetti_faces_fxt) diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/test_openml.py b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/test_openml.py new file mode 100644 index 0000000000000000000000000000000000000000..3ff2557aa4f9efae21d819507ada5115ec277f0e --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/test_openml.py @@ -0,0 +1,1657 @@ +"""Test the openml loader.""" +import gzip +import json +import os +import re +from functools import partial +from importlib import resources +from io import BytesIO +from urllib.error import HTTPError + +import numpy as np +import pytest +import scipy.sparse + +import sklearn +from sklearn import config_context +from sklearn.datasets import fetch_openml as fetch_openml_orig +from sklearn.datasets._openml import ( + _OPENML_PREFIX, + _get_local_path, + _open_openml_url, + _retry_with_clean_cache, +) +from sklearn.utils import Bunch, check_pandas_support +from sklearn.utils._testing import ( + SkipTest, + assert_allclose, + assert_array_equal, + fails_if_pypy, +) + +OPENML_TEST_DATA_MODULE = "sklearn.datasets.tests.data.openml" +# if True, urlopen will be monkey patched to only use local files +test_offline = True + + +class _MockHTTPResponse: + def __init__(self, data, is_gzip): + self.data = data + self.is_gzip = is_gzip + + def read(self, amt=-1): + return self.data.read(amt) + + def close(self): + self.data.close() + + def info(self): + if self.is_gzip: + return {"Content-Encoding": "gzip"} + return {} + + def __iter__(self): + return iter(self.data) + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + return False + + +# Disable the disk-based cache when testing `fetch_openml`: +# the mock data in sklearn/datasets/tests/data/openml/ is not always consistent +# with the version on openml.org. If one were to load the dataset outside of +# the tests, it may result in data that does not represent openml.org. +fetch_openml = partial(fetch_openml_orig, data_home=None) + + +def _monkey_patch_webbased_functions(context, data_id, gzip_response): + # monkey patches the urlopen function. Important note: Do NOT use this + # in combination with a regular cache directory, as the files that are + # stored as cache should not be mixed up with real openml datasets + url_prefix_data_description = "https://api.openml.org/api/v1/json/data/" + url_prefix_data_features = "https://api.openml.org/api/v1/json/data/features/" + url_prefix_download_data = "https://api.openml.org/data/v1/" + url_prefix_data_list = "https://api.openml.org/api/v1/json/data/list/" + + path_suffix = ".gz" + read_fn = gzip.open + + data_module = OPENML_TEST_DATA_MODULE + "." + f"id_{data_id}" + + def _file_name(url, suffix): + output = ( + re.sub(r"\W", "-", url[len("https://api.openml.org/") :]) + + suffix + + path_suffix + ) + # Shorten the filenames to have better compatibility with windows 10 + # and filenames > 260 characters + return ( + output.replace("-json-data-list", "-jdl") + .replace("-json-data-features", "-jdf") + .replace("-json-data-qualities", "-jdq") + .replace("-json-data", "-jd") + .replace("-data_name", "-dn") + .replace("-download", "-dl") + .replace("-limit", "-l") + .replace("-data_version", "-dv") + .replace("-status", "-s") + .replace("-deactivated", "-dact") + .replace("-active", "-act") + ) + + def _mock_urlopen_shared(url, has_gzip_header, expected_prefix, suffix): + assert url.startswith(expected_prefix) + + data_file_name = _file_name(url, suffix) + data_file_path = resources.files(data_module) / data_file_name + + with data_file_path.open("rb") as f: + if has_gzip_header and gzip_response: + fp = BytesIO(f.read()) + return _MockHTTPResponse(fp, True) + else: + decompressed_f = read_fn(f, "rb") + fp = BytesIO(decompressed_f.read()) + return _MockHTTPResponse(fp, False) + + def _mock_urlopen_data_description(url, has_gzip_header): + return _mock_urlopen_shared( + url=url, + has_gzip_header=has_gzip_header, + expected_prefix=url_prefix_data_description, + suffix=".json", + ) + + def _mock_urlopen_data_features(url, has_gzip_header): + return _mock_urlopen_shared( + url=url, + has_gzip_header=has_gzip_header, + expected_prefix=url_prefix_data_features, + suffix=".json", + ) + + def _mock_urlopen_download_data(url, has_gzip_header): + return _mock_urlopen_shared( + url=url, + has_gzip_header=has_gzip_header, + expected_prefix=url_prefix_download_data, + suffix=".arff", + ) + + def _mock_urlopen_data_list(url, has_gzip_header): + assert url.startswith(url_prefix_data_list) + + data_file_name = _file_name(url, ".json") + data_file_path = resources.files(data_module) / data_file_name + + # load the file itself, to simulate a http error + with data_file_path.open("rb") as f: + decompressed_f = read_fn(f, "rb") + decoded_s = decompressed_f.read().decode("utf-8") + json_data = json.loads(decoded_s) + if "error" in json_data: + raise HTTPError( + url=None, code=412, msg="Simulated mock error", hdrs=None, fp=BytesIO() + ) + + with data_file_path.open("rb") as f: + if has_gzip_header: + fp = BytesIO(f.read()) + return _MockHTTPResponse(fp, True) + else: + decompressed_f = read_fn(f, "rb") + fp = BytesIO(decompressed_f.read()) + return _MockHTTPResponse(fp, False) + + def _mock_urlopen(request, *args, **kwargs): + url = request.get_full_url() + has_gzip_header = request.get_header("Accept-encoding") == "gzip" + if url.startswith(url_prefix_data_list): + return _mock_urlopen_data_list(url, has_gzip_header) + elif url.startswith(url_prefix_data_features): + return _mock_urlopen_data_features(url, has_gzip_header) + elif url.startswith(url_prefix_download_data): + return _mock_urlopen_download_data(url, has_gzip_header) + elif url.startswith(url_prefix_data_description): + return _mock_urlopen_data_description(url, has_gzip_header) + else: + raise ValueError("Unknown mocking URL pattern: %s" % url) + + # XXX: Global variable + if test_offline: + context.setattr(sklearn.datasets._openml, "urlopen", _mock_urlopen) + + +############################################################################### +# Test the behaviour of `fetch_openml` depending of the input parameters. + + +# Known failure of PyPy for OpenML. See the following issue: +# https://github.com/scikit-learn/scikit-learn/issues/18906 +@fails_if_pypy +@pytest.mark.parametrize( + "data_id, dataset_params, n_samples, n_features, n_targets", + [ + # iris + (61, {"data_id": 61}, 150, 4, 1), + (61, {"name": "iris", "version": 1}, 150, 4, 1), + # anneal + (2, {"data_id": 2}, 11, 38, 1), + (2, {"name": "anneal", "version": 1}, 11, 38, 1), + # cpu + (561, {"data_id": 561}, 209, 7, 1), + (561, {"name": "cpu", "version": 1}, 209, 7, 1), + # emotions + (40589, {"data_id": 40589}, 13, 72, 6), + # adult-census + (1119, {"data_id": 1119}, 10, 14, 1), + (1119, {"name": "adult-census"}, 10, 14, 1), + # miceprotein + (40966, {"data_id": 40966}, 7, 77, 1), + (40966, {"name": "MiceProtein"}, 7, 77, 1), + # titanic + (40945, {"data_id": 40945}, 1309, 13, 1), + ], +) +@pytest.mark.parametrize("parser", ["liac-arff", "pandas"]) +@pytest.mark.parametrize("gzip_response", [True, False]) +def test_fetch_openml_as_frame_true( + monkeypatch, + data_id, + dataset_params, + n_samples, + n_features, + n_targets, + parser, + gzip_response, +): + """Check the behaviour of `fetch_openml` with `as_frame=True`. + + Fetch by ID and/or name (depending if the file was previously cached). + """ + pd = pytest.importorskip("pandas") + + _monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response=gzip_response) + bunch = fetch_openml( + as_frame=True, + cache=False, + parser=parser, + **dataset_params, + ) + + assert int(bunch.details["id"]) == data_id + assert isinstance(bunch, Bunch) + + assert isinstance(bunch.frame, pd.DataFrame) + assert bunch.frame.shape == (n_samples, n_features + n_targets) + + assert isinstance(bunch.data, pd.DataFrame) + assert bunch.data.shape == (n_samples, n_features) + + if n_targets == 1: + assert isinstance(bunch.target, pd.Series) + assert bunch.target.shape == (n_samples,) + else: + assert isinstance(bunch.target, pd.DataFrame) + assert bunch.target.shape == (n_samples, n_targets) + + assert bunch.categories is None + + +# Known failure of PyPy for OpenML. See the following issue: +# https://github.com/scikit-learn/scikit-learn/issues/18906 +@fails_if_pypy +@pytest.mark.parametrize( + "data_id, dataset_params, n_samples, n_features, n_targets", + [ + # iris + (61, {"data_id": 61}, 150, 4, 1), + (61, {"name": "iris", "version": 1}, 150, 4, 1), + # anneal + (2, {"data_id": 2}, 11, 38, 1), + (2, {"name": "anneal", "version": 1}, 11, 38, 1), + # cpu + (561, {"data_id": 561}, 209, 7, 1), + (561, {"name": "cpu", "version": 1}, 209, 7, 1), + # emotions + (40589, {"data_id": 40589}, 13, 72, 6), + # adult-census + (1119, {"data_id": 1119}, 10, 14, 1), + (1119, {"name": "adult-census"}, 10, 14, 1), + # miceprotein + (40966, {"data_id": 40966}, 7, 77, 1), + (40966, {"name": "MiceProtein"}, 7, 77, 1), + ], +) +@pytest.mark.parametrize("parser", ["liac-arff", "pandas"]) +def test_fetch_openml_as_frame_false( + monkeypatch, + data_id, + dataset_params, + n_samples, + n_features, + n_targets, + parser, +): + """Check the behaviour of `fetch_openml` with `as_frame=False`. + + Fetch both by ID and/or name + version. + """ + pytest.importorskip("pandas") + + _monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response=True) + bunch = fetch_openml( + as_frame=False, + cache=False, + parser=parser, + **dataset_params, + ) + assert int(bunch.details["id"]) == data_id + assert isinstance(bunch, Bunch) + + assert bunch.frame is None + + assert isinstance(bunch.data, np.ndarray) + assert bunch.data.shape == (n_samples, n_features) + + assert isinstance(bunch.target, np.ndarray) + if n_targets == 1: + assert bunch.target.shape == (n_samples,) + else: + assert bunch.target.shape == (n_samples, n_targets) + + assert isinstance(bunch.categories, dict) + + +# Known failure of PyPy for OpenML. See the following issue: +# https://github.com/scikit-learn/scikit-learn/issues/18906 +@fails_if_pypy +@pytest.mark.parametrize("data_id", [61, 1119, 40945]) +def test_fetch_openml_consistency_parser(monkeypatch, data_id): + """Check the consistency of the LIAC-ARFF and pandas parsers.""" + pd = pytest.importorskip("pandas") + + _monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response=True) + bunch_liac = fetch_openml( + data_id=data_id, + as_frame=True, + cache=False, + parser="liac-arff", + ) + bunch_pandas = fetch_openml( + data_id=data_id, + as_frame=True, + cache=False, + parser="pandas", + ) + + # The data frames for the input features should match up to some numerical + # dtype conversions (e.g. float64 <=> Int64) due to limitations of the + # LIAC-ARFF parser. + data_liac, data_pandas = bunch_liac.data, bunch_pandas.data + + def convert_numerical_dtypes(series): + pandas_series = data_pandas[series.name] + if pd.api.types.is_numeric_dtype(pandas_series): + return series.astype(pandas_series.dtype) + else: + return series + + data_liac_with_fixed_dtypes = data_liac.apply(convert_numerical_dtypes) + pd.testing.assert_frame_equal(data_liac_with_fixed_dtypes, data_pandas) + + # Let's also check that the .frame attributes also match + frame_liac, frame_pandas = bunch_liac.frame, bunch_pandas.frame + + # Note that the .frame attribute is a superset of the .data attribute: + pd.testing.assert_frame_equal(frame_pandas[bunch_pandas.feature_names], data_pandas) + + # However the remaining columns, typically the target(s), are not necessarily + # dtyped similarly by both parsers due to limitations of the LIAC-ARFF parser. + # Therefore, extra dtype conversions are required for those columns: + + def convert_numerical_and_categorical_dtypes(series): + pandas_series = frame_pandas[series.name] + if pd.api.types.is_numeric_dtype(pandas_series): + return series.astype(pandas_series.dtype) + elif isinstance(pandas_series.dtype, pd.CategoricalDtype): + # Compare categorical features by converting categorical liac uses + # strings to denote the categories, we rename the categories to make + # them comparable to the pandas parser. Fixing this behavior in + # LIAC-ARFF would allow to check the consistency in the future but + # we do not plan to maintain the LIAC-ARFF on the long term. + return series.cat.rename_categories(pandas_series.cat.categories) + else: + return series + + frame_liac_with_fixed_dtypes = frame_liac.apply( + convert_numerical_and_categorical_dtypes + ) + pd.testing.assert_frame_equal(frame_liac_with_fixed_dtypes, frame_pandas) + + +# Known failure of PyPy for OpenML. See the following issue: +# https://github.com/scikit-learn/scikit-learn/issues/18906 +@fails_if_pypy +@pytest.mark.parametrize("parser", ["liac-arff", "pandas"]) +def test_fetch_openml_equivalence_array_dataframe(monkeypatch, parser): + """Check the equivalence of the dataset when using `as_frame=False` and + `as_frame=True`. + """ + pytest.importorskip("pandas") + + data_id = 61 + _monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response=True) + bunch_as_frame_true = fetch_openml( + data_id=data_id, + as_frame=True, + cache=False, + parser=parser, + ) + + bunch_as_frame_false = fetch_openml( + data_id=data_id, + as_frame=False, + cache=False, + parser=parser, + ) + + assert_allclose(bunch_as_frame_false.data, bunch_as_frame_true.data) + assert_array_equal(bunch_as_frame_false.target, bunch_as_frame_true.target) + + +# Known failure of PyPy for OpenML. See the following issue: +# https://github.com/scikit-learn/scikit-learn/issues/18906 +@fails_if_pypy +@pytest.mark.parametrize("parser", ["liac-arff", "pandas"]) +def test_fetch_openml_iris_pandas(monkeypatch, parser): + """Check fetching on a numerical only dataset with string labels.""" + pd = pytest.importorskip("pandas") + CategoricalDtype = pd.api.types.CategoricalDtype + data_id = 61 + data_shape = (150, 4) + target_shape = (150,) + frame_shape = (150, 5) + + target_dtype = CategoricalDtype( + ["Iris-setosa", "Iris-versicolor", "Iris-virginica"] + ) + data_dtypes = [np.float64] * 4 + data_names = ["sepallength", "sepalwidth", "petallength", "petalwidth"] + target_name = "class" + + _monkey_patch_webbased_functions(monkeypatch, data_id, True) + + bunch = fetch_openml( + data_id=data_id, + as_frame=True, + cache=False, + parser=parser, + ) + data = bunch.data + target = bunch.target + frame = bunch.frame + + assert isinstance(data, pd.DataFrame) + assert np.all(data.dtypes == data_dtypes) + assert data.shape == data_shape + assert np.all(data.columns == data_names) + assert np.all(bunch.feature_names == data_names) + assert bunch.target_names == [target_name] + + assert isinstance(target, pd.Series) + assert target.dtype == target_dtype + assert target.shape == target_shape + assert target.name == target_name + assert target.index.is_unique + + assert isinstance(frame, pd.DataFrame) + assert frame.shape == frame_shape + assert np.all(frame.dtypes == data_dtypes + [target_dtype]) + assert frame.index.is_unique + + +# Known failure of PyPy for OpenML. See the following issue: +# https://github.com/scikit-learn/scikit-learn/issues/18906 +@fails_if_pypy +@pytest.mark.parametrize("parser", ["liac-arff", "pandas"]) +@pytest.mark.parametrize("target_column", ["petalwidth", ["petalwidth", "petallength"]]) +def test_fetch_openml_forcing_targets(monkeypatch, parser, target_column): + """Check that we can force the target to not be the default target.""" + pd = pytest.importorskip("pandas") + + data_id = 61 + _monkey_patch_webbased_functions(monkeypatch, data_id, True) + bunch_forcing_target = fetch_openml( + data_id=data_id, + as_frame=True, + cache=False, + target_column=target_column, + parser=parser, + ) + bunch_default = fetch_openml( + data_id=data_id, + as_frame=True, + cache=False, + parser=parser, + ) + + pd.testing.assert_frame_equal(bunch_forcing_target.frame, bunch_default.frame) + if isinstance(target_column, list): + pd.testing.assert_index_equal( + bunch_forcing_target.target.columns, pd.Index(target_column) + ) + assert bunch_forcing_target.data.shape == (150, 3) + else: + assert bunch_forcing_target.target.name == target_column + assert bunch_forcing_target.data.shape == (150, 4) + + +# Known failure of PyPy for OpenML. See the following issue: +# https://github.com/scikit-learn/scikit-learn/issues/18906 +@fails_if_pypy +@pytest.mark.parametrize("data_id", [61, 2, 561, 40589, 1119]) +@pytest.mark.parametrize("parser", ["liac-arff", "pandas"]) +def test_fetch_openml_equivalence_frame_return_X_y(monkeypatch, data_id, parser): + """Check the behaviour of `return_X_y=True` when `as_frame=True`.""" + pd = pytest.importorskip("pandas") + + _monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response=True) + bunch = fetch_openml( + data_id=data_id, + as_frame=True, + cache=False, + return_X_y=False, + parser=parser, + ) + X, y = fetch_openml( + data_id=data_id, + as_frame=True, + cache=False, + return_X_y=True, + parser=parser, + ) + + pd.testing.assert_frame_equal(bunch.data, X) + if isinstance(y, pd.Series): + pd.testing.assert_series_equal(bunch.target, y) + else: + pd.testing.assert_frame_equal(bunch.target, y) + + +# Known failure of PyPy for OpenML. See the following issue: +# https://github.com/scikit-learn/scikit-learn/issues/18906 +@fails_if_pypy +@pytest.mark.parametrize("data_id", [61, 561, 40589, 1119]) +@pytest.mark.parametrize("parser", ["liac-arff", "pandas"]) +def test_fetch_openml_equivalence_array_return_X_y(monkeypatch, data_id, parser): + """Check the behaviour of `return_X_y=True` when `as_frame=False`.""" + pytest.importorskip("pandas") + + _monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response=True) + bunch = fetch_openml( + data_id=data_id, + as_frame=False, + cache=False, + return_X_y=False, + parser=parser, + ) + X, y = fetch_openml( + data_id=data_id, + as_frame=False, + cache=False, + return_X_y=True, + parser=parser, + ) + + assert_array_equal(bunch.data, X) + assert_array_equal(bunch.target, y) + + +# Known failure of PyPy for OpenML. See the following issue: +# https://github.com/scikit-learn/scikit-learn/issues/18906 +@fails_if_pypy +def test_fetch_openml_difference_parsers(monkeypatch): + """Check the difference between liac-arff and pandas parser.""" + pytest.importorskip("pandas") + + data_id = 1119 + _monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response=True) + # When `as_frame=False`, the categories will be ordinally encoded with + # liac-arff parser while this is not the case with pandas parser. + as_frame = False + bunch_liac_arff = fetch_openml( + data_id=data_id, + as_frame=as_frame, + cache=False, + parser="liac-arff", + ) + bunch_pandas = fetch_openml( + data_id=data_id, + as_frame=as_frame, + cache=False, + parser="pandas", + ) + + assert bunch_liac_arff.data.dtype.kind == "f" + assert bunch_pandas.data.dtype == "O" + + +############################################################################### +# Test the ARFF parsing on several dataset to check if detect the correct +# types (categories, integers, floats). + + +@pytest.fixture(scope="module") +def datasets_column_names(): + """Returns the columns names for each dataset.""" + return { + 61: ["sepallength", "sepalwidth", "petallength", "petalwidth", "class"], + 2: [ + "family", + "product-type", + "steel", + "carbon", + "hardness", + "temper_rolling", + "condition", + "formability", + "strength", + "non-ageing", + "surface-finish", + "surface-quality", + "enamelability", + "bc", + "bf", + "bt", + "bw%2Fme", + "bl", + "m", + "chrom", + "phos", + "cbond", + "marvi", + "exptl", + "ferro", + "corr", + "blue%2Fbright%2Fvarn%2Fclean", + "lustre", + "jurofm", + "s", + "p", + "shape", + "thick", + "width", + "len", + "oil", + "bore", + "packing", + "class", + ], + 561: ["vendor", "MYCT", "MMIN", "MMAX", "CACH", "CHMIN", "CHMAX", "class"], + 40589: [ + "Mean_Acc1298_Mean_Mem40_Centroid", + "Mean_Acc1298_Mean_Mem40_Rolloff", + "Mean_Acc1298_Mean_Mem40_Flux", + "Mean_Acc1298_Mean_Mem40_MFCC_0", + "Mean_Acc1298_Mean_Mem40_MFCC_1", + "Mean_Acc1298_Mean_Mem40_MFCC_2", + "Mean_Acc1298_Mean_Mem40_MFCC_3", + "Mean_Acc1298_Mean_Mem40_MFCC_4", + "Mean_Acc1298_Mean_Mem40_MFCC_5", + "Mean_Acc1298_Mean_Mem40_MFCC_6", + "Mean_Acc1298_Mean_Mem40_MFCC_7", + "Mean_Acc1298_Mean_Mem40_MFCC_8", + "Mean_Acc1298_Mean_Mem40_MFCC_9", + "Mean_Acc1298_Mean_Mem40_MFCC_10", + "Mean_Acc1298_Mean_Mem40_MFCC_11", + "Mean_Acc1298_Mean_Mem40_MFCC_12", + "Mean_Acc1298_Std_Mem40_Centroid", + "Mean_Acc1298_Std_Mem40_Rolloff", + "Mean_Acc1298_Std_Mem40_Flux", + "Mean_Acc1298_Std_Mem40_MFCC_0", + "Mean_Acc1298_Std_Mem40_MFCC_1", + "Mean_Acc1298_Std_Mem40_MFCC_2", + "Mean_Acc1298_Std_Mem40_MFCC_3", + "Mean_Acc1298_Std_Mem40_MFCC_4", + "Mean_Acc1298_Std_Mem40_MFCC_5", + "Mean_Acc1298_Std_Mem40_MFCC_6", + "Mean_Acc1298_Std_Mem40_MFCC_7", + "Mean_Acc1298_Std_Mem40_MFCC_8", + "Mean_Acc1298_Std_Mem40_MFCC_9", + "Mean_Acc1298_Std_Mem40_MFCC_10", + "Mean_Acc1298_Std_Mem40_MFCC_11", + "Mean_Acc1298_Std_Mem40_MFCC_12", + "Std_Acc1298_Mean_Mem40_Centroid", + "Std_Acc1298_Mean_Mem40_Rolloff", + "Std_Acc1298_Mean_Mem40_Flux", + "Std_Acc1298_Mean_Mem40_MFCC_0", + "Std_Acc1298_Mean_Mem40_MFCC_1", + "Std_Acc1298_Mean_Mem40_MFCC_2", + "Std_Acc1298_Mean_Mem40_MFCC_3", + "Std_Acc1298_Mean_Mem40_MFCC_4", + "Std_Acc1298_Mean_Mem40_MFCC_5", + "Std_Acc1298_Mean_Mem40_MFCC_6", + "Std_Acc1298_Mean_Mem40_MFCC_7", + "Std_Acc1298_Mean_Mem40_MFCC_8", + "Std_Acc1298_Mean_Mem40_MFCC_9", + "Std_Acc1298_Mean_Mem40_MFCC_10", + "Std_Acc1298_Mean_Mem40_MFCC_11", + "Std_Acc1298_Mean_Mem40_MFCC_12", + "Std_Acc1298_Std_Mem40_Centroid", + "Std_Acc1298_Std_Mem40_Rolloff", + "Std_Acc1298_Std_Mem40_Flux", + "Std_Acc1298_Std_Mem40_MFCC_0", + "Std_Acc1298_Std_Mem40_MFCC_1", + "Std_Acc1298_Std_Mem40_MFCC_2", + "Std_Acc1298_Std_Mem40_MFCC_3", + "Std_Acc1298_Std_Mem40_MFCC_4", + "Std_Acc1298_Std_Mem40_MFCC_5", + "Std_Acc1298_Std_Mem40_MFCC_6", + "Std_Acc1298_Std_Mem40_MFCC_7", + "Std_Acc1298_Std_Mem40_MFCC_8", + "Std_Acc1298_Std_Mem40_MFCC_9", + "Std_Acc1298_Std_Mem40_MFCC_10", + "Std_Acc1298_Std_Mem40_MFCC_11", + "Std_Acc1298_Std_Mem40_MFCC_12", + "BH_LowPeakAmp", + "BH_LowPeakBPM", + "BH_HighPeakAmp", + "BH_HighPeakBPM", + "BH_HighLowRatio", + "BHSUM1", + "BHSUM2", + "BHSUM3", + "amazed.suprised", + "happy.pleased", + "relaxing.calm", + "quiet.still", + "sad.lonely", + "angry.aggresive", + ], + 1119: [ + "age", + "workclass", + "fnlwgt:", + "education:", + "education-num:", + "marital-status:", + "occupation:", + "relationship:", + "race:", + "sex:", + "capital-gain:", + "capital-loss:", + "hours-per-week:", + "native-country:", + "class", + ], + 40966: [ + "DYRK1A_N", + "ITSN1_N", + "BDNF_N", + "NR1_N", + "NR2A_N", + "pAKT_N", + "pBRAF_N", + "pCAMKII_N", + "pCREB_N", + "pELK_N", + "pERK_N", + "pJNK_N", + "PKCA_N", + "pMEK_N", + "pNR1_N", + "pNR2A_N", + "pNR2B_N", + "pPKCAB_N", + "pRSK_N", + "AKT_N", + "BRAF_N", + "CAMKII_N", + "CREB_N", + "ELK_N", + "ERK_N", + "GSK3B_N", + "JNK_N", + "MEK_N", + "TRKA_N", + "RSK_N", + "APP_N", + "Bcatenin_N", + "SOD1_N", + "MTOR_N", + "P38_N", + "pMTOR_N", + "DSCR1_N", + "AMPKA_N", + "NR2B_N", + "pNUMB_N", + "RAPTOR_N", + "TIAM1_N", + "pP70S6_N", + "NUMB_N", + "P70S6_N", + "pGSK3B_N", + "pPKCG_N", + "CDK5_N", + "S6_N", + "ADARB1_N", + "AcetylH3K9_N", + "RRP1_N", + "BAX_N", + "ARC_N", + "ERBB4_N", + "nNOS_N", + "Tau_N", + "GFAP_N", + "GluR3_N", + "GluR4_N", + "IL1B_N", + "P3525_N", + "pCASP9_N", + "PSD95_N", + "SNCA_N", + "Ubiquitin_N", + "pGSK3B_Tyr216_N", + "SHH_N", + "BAD_N", + "BCL2_N", + "pS6_N", + "pCFOS_N", + "SYP_N", + "H3AcK18_N", + "EGR1_N", + "H3MeK4_N", + "CaNA_N", + "class", + ], + 40945: [ + "pclass", + "survived", + "name", + "sex", + "age", + "sibsp", + "parch", + "ticket", + "fare", + "cabin", + "embarked", + "boat", + "body", + "home.dest", + ], + } + + +@pytest.fixture(scope="module") +def datasets_missing_values(): + return { + 61: {}, + 2: { + "family": 11, + "temper_rolling": 9, + "condition": 2, + "formability": 4, + "non-ageing": 10, + "surface-finish": 11, + "enamelability": 11, + "bc": 11, + "bf": 10, + "bt": 11, + "bw%2Fme": 8, + "bl": 9, + "m": 11, + "chrom": 11, + "phos": 11, + "cbond": 10, + "marvi": 11, + "exptl": 11, + "ferro": 11, + "corr": 11, + "blue%2Fbright%2Fvarn%2Fclean": 11, + "lustre": 8, + "jurofm": 11, + "s": 11, + "p": 11, + "oil": 10, + "packing": 11, + }, + 561: {}, + 40589: {}, + 1119: {}, + 40966: {"BCL2_N": 7}, + 40945: { + "age": 263, + "fare": 1, + "cabin": 1014, + "embarked": 2, + "boat": 823, + "body": 1188, + "home.dest": 564, + }, + } + + +# Known failure of PyPy for OpenML. See the following issue: +# https://github.com/scikit-learn/scikit-learn/issues/18906 +@fails_if_pypy +@pytest.mark.parametrize( + "data_id, parser, expected_n_categories, expected_n_floats, expected_n_ints", + [ + # iris dataset + (61, "liac-arff", 1, 4, 0), + (61, "pandas", 1, 4, 0), + # anneal dataset + (2, "liac-arff", 33, 6, 0), + (2, "pandas", 33, 2, 4), + # cpu dataset + (561, "liac-arff", 1, 7, 0), + (561, "pandas", 1, 0, 7), + # emotions dataset + (40589, "liac-arff", 6, 72, 0), + (40589, "pandas", 6, 69, 3), + # adult-census dataset + (1119, "liac-arff", 9, 6, 0), + (1119, "pandas", 9, 0, 6), + # miceprotein + (40966, "liac-arff", 1, 77, 0), + (40966, "pandas", 1, 77, 0), + # titanic + (40945, "liac-arff", 3, 6, 0), + (40945, "pandas", 3, 3, 3), + ], +) +@pytest.mark.parametrize("gzip_response", [True, False]) +def test_fetch_openml_types_inference( + monkeypatch, + data_id, + parser, + expected_n_categories, + expected_n_floats, + expected_n_ints, + gzip_response, + datasets_column_names, + datasets_missing_values, +): + """Check that `fetch_openml` infer the right number of categories, integers, and + floats.""" + pd = pytest.importorskip("pandas") + CategoricalDtype = pd.api.types.CategoricalDtype + + _monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response=gzip_response) + + bunch = fetch_openml( + data_id=data_id, + as_frame=True, + cache=False, + parser=parser, + ) + frame = bunch.frame + + n_categories = len( + [dtype for dtype in frame.dtypes if isinstance(dtype, CategoricalDtype)] + ) + n_floats = len([dtype for dtype in frame.dtypes if dtype.kind == "f"]) + n_ints = len([dtype for dtype in frame.dtypes if dtype.kind == "i"]) + + assert n_categories == expected_n_categories + assert n_floats == expected_n_floats + assert n_ints == expected_n_ints + + assert frame.columns.tolist() == datasets_column_names[data_id] + + frame_feature_to_n_nan = frame.isna().sum().to_dict() + for name, n_missing in frame_feature_to_n_nan.items(): + expected_missing = datasets_missing_values[data_id].get(name, 0) + assert n_missing == expected_missing + + +############################################################################### +# Test some more specific behaviour + + +@pytest.mark.parametrize( + "params, err_msg", + [ + ( + {"parser": "unknown"}, + "The 'parser' parameter of fetch_openml must be a str among", + ), + ( + {"as_frame": "unknown"}, + "The 'as_frame' parameter of fetch_openml must be an instance", + ), + ], +) +def test_fetch_openml_validation_parameter(monkeypatch, params, err_msg): + data_id = 1119 + _monkey_patch_webbased_functions(monkeypatch, data_id, True) + with pytest.raises(ValueError, match=err_msg): + fetch_openml(data_id=data_id, **params) + + +@pytest.mark.parametrize( + "params", + [ + {"as_frame": True, "parser": "auto"}, + {"as_frame": "auto", "parser": "auto"}, + {"as_frame": False, "parser": "pandas"}, + {"as_frame": False, "parser": "auto"}, + ], +) +def test_fetch_openml_requires_pandas_error(monkeypatch, params): + """Check that we raise the proper errors when we require pandas.""" + data_id = 1119 + try: + check_pandas_support("test_fetch_openml_requires_pandas") + except ImportError: + _monkey_patch_webbased_functions(monkeypatch, data_id, True) + err_msg = "requires pandas to be installed. Alternatively, explicitly" + with pytest.raises(ImportError, match=err_msg): + fetch_openml(data_id=data_id, **params) + else: + raise SkipTest("This test requires pandas to not be installed.") + + +@pytest.mark.filterwarnings("ignore:Version 1 of dataset Australian is inactive") +@pytest.mark.parametrize( + "params, err_msg", + [ + ( + {"parser": "pandas"}, + "Sparse ARFF datasets cannot be loaded with parser='pandas'", + ), + ( + {"as_frame": True}, + "Sparse ARFF datasets cannot be loaded with as_frame=True.", + ), + ( + {"parser": "pandas", "as_frame": True}, + "Sparse ARFF datasets cannot be loaded with as_frame=True.", + ), + ], +) +def test_fetch_openml_sparse_arff_error(monkeypatch, params, err_msg): + """Check that we raise the expected error for sparse ARFF datasets and + a wrong set of incompatible parameters. + """ + pytest.importorskip("pandas") + data_id = 292 + + _monkey_patch_webbased_functions(monkeypatch, data_id, True) + with pytest.raises(ValueError, match=err_msg): + fetch_openml( + data_id=data_id, + cache=False, + **params, + ) + + +# Known failure of PyPy for OpenML. See the following issue: +# https://github.com/scikit-learn/scikit-learn/issues/18906 +@fails_if_pypy +@pytest.mark.filterwarnings("ignore:Version 1 of dataset Australian is inactive") +@pytest.mark.parametrize( + "data_id, data_type", + [ + (61, "dataframe"), # iris dataset version 1 + (292, "sparse"), # Australian dataset version 1 + ], +) +def test_fetch_openml_auto_mode(monkeypatch, data_id, data_type): + """Check the auto mode of `fetch_openml`.""" + pd = pytest.importorskip("pandas") + + _monkey_patch_webbased_functions(monkeypatch, data_id, True) + data = fetch_openml(data_id=data_id, as_frame="auto", cache=False) + klass = pd.DataFrame if data_type == "dataframe" else scipy.sparse.csr_matrix + assert isinstance(data.data, klass) + + +# Known failure of PyPy for OpenML. See the following issue: +# https://github.com/scikit-learn/scikit-learn/issues/18906 +@fails_if_pypy +def test_convert_arff_data_dataframe_warning_low_memory_pandas(monkeypatch): + """Check that we raise a warning regarding the working memory when using + LIAC-ARFF parser.""" + pytest.importorskip("pandas") + + data_id = 1119 + _monkey_patch_webbased_functions(monkeypatch, data_id, True) + + msg = "Could not adhere to working_memory config." + with pytest.warns(UserWarning, match=msg): + with config_context(working_memory=1e-6): + fetch_openml( + data_id=data_id, + as_frame=True, + cache=False, + parser="liac-arff", + ) + + +@pytest.mark.parametrize("gzip_response", [True, False]) +def test_fetch_openml_iris_warn_multiple_version(monkeypatch, gzip_response): + """Check that a warning is raised when multiple versions exist and no version is + requested.""" + data_id = 61 + data_name = "iris" + + _monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response) + + msg = re.escape( + "Multiple active versions of the dataset matching the name" + " iris exist. Versions may be fundamentally different, " + "returning version 1. Available versions:\n" + "- version 1, status: active\n" + " url: https://www.openml.org/search?type=data&id=61\n" + "- version 3, status: active\n" + " url: https://www.openml.org/search?type=data&id=969\n" + ) + with pytest.warns(UserWarning, match=msg): + fetch_openml( + name=data_name, + as_frame=False, + cache=False, + parser="liac-arff", + ) + + +@pytest.mark.parametrize("gzip_response", [True, False]) +def test_fetch_openml_no_target(monkeypatch, gzip_response): + """Check that we can get a dataset without target.""" + data_id = 61 + target_column = None + expected_observations = 150 + expected_features = 5 + + _monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response) + data = fetch_openml( + data_id=data_id, + target_column=target_column, + cache=False, + as_frame=False, + parser="liac-arff", + ) + assert data.data.shape == (expected_observations, expected_features) + assert data.target is None + + +@pytest.mark.parametrize("gzip_response", [True, False]) +@pytest.mark.parametrize("parser", ["liac-arff", "pandas"]) +def test_missing_values_pandas(monkeypatch, gzip_response, parser): + """check that missing values in categories are compatible with pandas + categorical""" + pytest.importorskip("pandas") + + data_id = 42585 + _monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response=gzip_response) + penguins = fetch_openml( + data_id=data_id, + cache=False, + as_frame=True, + parser=parser, + ) + + cat_dtype = penguins.data.dtypes["sex"] + # there are nans in the categorical + assert penguins.data["sex"].isna().any() + assert_array_equal(cat_dtype.categories, ["FEMALE", "MALE", "_"]) + + +@pytest.mark.parametrize("gzip_response", [True, False]) +@pytest.mark.parametrize( + "dataset_params", + [ + {"data_id": 40675}, + {"data_id": None, "name": "glass2", "version": 1}, + ], +) +def test_fetch_openml_inactive(monkeypatch, gzip_response, dataset_params): + """Check that we raise a warning when the dataset is inactive.""" + data_id = 40675 + _monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response) + msg = "Version 1 of dataset glass2 is inactive," + with pytest.warns(UserWarning, match=msg): + glass2 = fetch_openml( + cache=False, as_frame=False, parser="liac-arff", **dataset_params + ) + assert glass2.data.shape == (163, 9) + assert glass2.details["id"] == "40675" + + +@pytest.mark.parametrize("gzip_response", [True, False]) +@pytest.mark.parametrize( + "data_id, params, err_type, err_msg", + [ + (40675, {"name": "glass2"}, ValueError, "No active dataset glass2 found"), + ( + 61, + {"data_id": 61, "target_column": ["sepalwidth", "class"]}, + ValueError, + "Can only handle homogeneous multi-target datasets", + ), + ( + 40945, + {"data_id": 40945, "as_frame": False}, + ValueError, + ( + "STRING attributes are not supported for array representation. Try" + " as_frame=True" + ), + ), + ( + 2, + {"data_id": 2, "target_column": "family", "as_frame": True}, + ValueError, + "Target column 'family'", + ), + ( + 2, + {"data_id": 2, "target_column": "family", "as_frame": False}, + ValueError, + "Target column 'family'", + ), + ( + 61, + {"data_id": 61, "target_column": "undefined"}, + KeyError, + "Could not find target_column='undefined'", + ), + ( + 61, + {"data_id": 61, "target_column": ["undefined", "class"]}, + KeyError, + "Could not find target_column='undefined'", + ), + ], +) +@pytest.mark.parametrize("parser", ["liac-arff", "pandas"]) +def test_fetch_openml_error( + monkeypatch, gzip_response, data_id, params, err_type, err_msg, parser +): + _monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response) + if params.get("as_frame", True) or parser == "pandas": + pytest.importorskip("pandas") + with pytest.raises(err_type, match=err_msg): + fetch_openml(cache=False, parser=parser, **params) + + +@pytest.mark.parametrize( + "params, err_type, err_msg", + [ + ( + {"data_id": -1, "name": None, "version": "version"}, + ValueError, + "The 'version' parameter of fetch_openml must be an int in the range", + ), + ( + {"data_id": -1, "name": "nAmE"}, + ValueError, + "The 'data_id' parameter of fetch_openml must be an int in the range", + ), + ( + {"data_id": -1, "name": "nAmE", "version": "version"}, + ValueError, + "The 'version' parameter of fetch_openml must be an int", + ), + ( + {}, + ValueError, + "Neither name nor data_id are provided. Please provide name or data_id.", + ), + ], +) +def test_fetch_openml_raises_illegal_argument(params, err_type, err_msg): + with pytest.raises(err_type, match=err_msg): + fetch_openml(**params) + + +@pytest.mark.parametrize("gzip_response", [True, False]) +def test_warn_ignore_attribute(monkeypatch, gzip_response): + data_id = 40966 + expected_row_id_msg = "target_column='{}' has flag is_row_identifier." + expected_ignore_msg = "target_column='{}' has flag is_ignore." + _monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response) + # single column test + target_col = "MouseID" + msg = expected_row_id_msg.format(target_col) + with pytest.warns(UserWarning, match=msg): + fetch_openml( + data_id=data_id, + target_column=target_col, + cache=False, + as_frame=False, + parser="liac-arff", + ) + target_col = "Genotype" + msg = expected_ignore_msg.format(target_col) + with pytest.warns(UserWarning, match=msg): + fetch_openml( + data_id=data_id, + target_column=target_col, + cache=False, + as_frame=False, + parser="liac-arff", + ) + # multi column test + target_col = "MouseID" + msg = expected_row_id_msg.format(target_col) + with pytest.warns(UserWarning, match=msg): + fetch_openml( + data_id=data_id, + target_column=[target_col, "class"], + cache=False, + as_frame=False, + parser="liac-arff", + ) + target_col = "Genotype" + msg = expected_ignore_msg.format(target_col) + with pytest.warns(UserWarning, match=msg): + fetch_openml( + data_id=data_id, + target_column=[target_col, "class"], + cache=False, + as_frame=False, + parser="liac-arff", + ) + + +@pytest.mark.parametrize("gzip_response", [True, False]) +def test_dataset_with_openml_error(monkeypatch, gzip_response): + data_id = 1 + _monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response) + msg = "OpenML registered a problem with the dataset. It might be unusable. Error:" + with pytest.warns(UserWarning, match=msg): + fetch_openml(data_id=data_id, cache=False, as_frame=False, parser="liac-arff") + + +@pytest.mark.parametrize("gzip_response", [True, False]) +def test_dataset_with_openml_warning(monkeypatch, gzip_response): + data_id = 3 + _monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response) + msg = "OpenML raised a warning on the dataset. It might be unusable. Warning:" + with pytest.warns(UserWarning, match=msg): + fetch_openml(data_id=data_id, cache=False, as_frame=False, parser="liac-arff") + + +def test_fetch_openml_overwrite_default_params_read_csv(monkeypatch): + """Check that we can overwrite the default parameters of `read_csv`.""" + pytest.importorskip("pandas") + data_id = 1590 + _monkey_patch_webbased_functions(monkeypatch, data_id=data_id, gzip_response=False) + + common_params = { + "data_id": data_id, + "as_frame": True, + "cache": False, + "parser": "pandas", + } + + # By default, the initial spaces are skipped. We checked that setting the parameter + # `skipinitialspace` to False will have an effect. + adult_without_spaces = fetch_openml(**common_params) + adult_with_spaces = fetch_openml( + **common_params, read_csv_kwargs={"skipinitialspace": False} + ) + assert all( + cat.startswith(" ") for cat in adult_with_spaces.frame["class"].cat.categories + ) + assert not any( + cat.startswith(" ") + for cat in adult_without_spaces.frame["class"].cat.categories + ) + + +############################################################################### +# Test cache, retry mechanisms, checksum, etc. + + +@pytest.mark.parametrize("gzip_response", [True, False]) +def test_open_openml_url_cache(monkeypatch, gzip_response, tmpdir): + data_id = 61 + + _monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response) + openml_path = sklearn.datasets._openml._DATA_FILE.format(data_id) + cache_directory = str(tmpdir.mkdir("scikit_learn_data")) + # first fill the cache + response1 = _open_openml_url(openml_path, cache_directory) + # assert file exists + location = _get_local_path(openml_path, cache_directory) + assert os.path.isfile(location) + # redownload, to utilize cache + response2 = _open_openml_url(openml_path, cache_directory) + assert response1.read() == response2.read() + + +@pytest.mark.parametrize("write_to_disk", [True, False]) +def test_open_openml_url_unlinks_local_path(monkeypatch, tmpdir, write_to_disk): + data_id = 61 + openml_path = sklearn.datasets._openml._DATA_FILE.format(data_id) + cache_directory = str(tmpdir.mkdir("scikit_learn_data")) + location = _get_local_path(openml_path, cache_directory) + + def _mock_urlopen(request, *args, **kwargs): + if write_to_disk: + with open(location, "w") as f: + f.write("") + raise ValueError("Invalid request") + + monkeypatch.setattr(sklearn.datasets._openml, "urlopen", _mock_urlopen) + + with pytest.raises(ValueError, match="Invalid request"): + _open_openml_url(openml_path, cache_directory) + + assert not os.path.exists(location) + + +def test_retry_with_clean_cache(tmpdir): + data_id = 61 + openml_path = sklearn.datasets._openml._DATA_FILE.format(data_id) + cache_directory = str(tmpdir.mkdir("scikit_learn_data")) + location = _get_local_path(openml_path, cache_directory) + os.makedirs(os.path.dirname(location)) + + with open(location, "w") as f: + f.write("") + + @_retry_with_clean_cache(openml_path, cache_directory) + def _load_data(): + # The first call will raise an error since location exists + if os.path.exists(location): + raise Exception("File exist!") + return 1 + + warn_msg = "Invalid cache, redownloading file" + with pytest.warns(RuntimeWarning, match=warn_msg): + result = _load_data() + assert result == 1 + + +def test_retry_with_clean_cache_http_error(tmpdir): + data_id = 61 + openml_path = sklearn.datasets._openml._DATA_FILE.format(data_id) + cache_directory = str(tmpdir.mkdir("scikit_learn_data")) + + @_retry_with_clean_cache(openml_path, cache_directory) + def _load_data(): + raise HTTPError( + url=None, code=412, msg="Simulated mock error", hdrs=None, fp=BytesIO() + ) + + error_msg = "Simulated mock error" + with pytest.raises(HTTPError, match=error_msg): + _load_data() + + +@pytest.mark.parametrize("gzip_response", [True, False]) +def test_fetch_openml_cache(monkeypatch, gzip_response, tmpdir): + def _mock_urlopen_raise(request, *args, **kwargs): + raise ValueError( + "This mechanism intends to test correct cache" + "handling. As such, urlopen should never be " + "accessed. URL: %s" + % request.get_full_url() + ) + + data_id = 61 + cache_directory = str(tmpdir.mkdir("scikit_learn_data")) + _monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response) + X_fetched, y_fetched = fetch_openml( + data_id=data_id, + cache=True, + data_home=cache_directory, + return_X_y=True, + as_frame=False, + parser="liac-arff", + ) + + monkeypatch.setattr(sklearn.datasets._openml, "urlopen", _mock_urlopen_raise) + + X_cached, y_cached = fetch_openml( + data_id=data_id, + cache=True, + data_home=cache_directory, + return_X_y=True, + as_frame=False, + parser="liac-arff", + ) + np.testing.assert_array_equal(X_fetched, X_cached) + np.testing.assert_array_equal(y_fetched, y_cached) + + +# Known failure of PyPy for OpenML. See the following issue: +# https://github.com/scikit-learn/scikit-learn/issues/18906 +@fails_if_pypy +@pytest.mark.parametrize( + "as_frame, parser", + [ + (True, "liac-arff"), + (False, "liac-arff"), + (True, "pandas"), + (False, "pandas"), + ], +) +def test_fetch_openml_verify_checksum(monkeypatch, as_frame, cache, tmpdir, parser): + """Check that the checksum is working as expected.""" + if as_frame or parser == "pandas": + pytest.importorskip("pandas") + + data_id = 2 + _monkey_patch_webbased_functions(monkeypatch, data_id, True) + + # create a temporary modified arff file + original_data_module = OPENML_TEST_DATA_MODULE + "." + f"id_{data_id}" + original_data_file_name = "data-v1-dl-1666876.arff.gz" + original_data_path = resources.files(original_data_module) / original_data_file_name + corrupt_copy_path = tmpdir / "test_invalid_checksum.arff" + with original_data_path.open("rb") as orig_file: + orig_gzip = gzip.open(orig_file, "rb") + data = bytearray(orig_gzip.read()) + data[len(data) - 1] = 37 + + with gzip.GzipFile(corrupt_copy_path, "wb") as modified_gzip: + modified_gzip.write(data) + + # Requests are already mocked by monkey_patch_webbased_functions. + # We want to reuse that mock for all requests except file download, + # hence creating a thin mock over the original mock + mocked_openml_url = sklearn.datasets._openml.urlopen + + def swap_file_mock(request, *args, **kwargs): + url = request.get_full_url() + if url.endswith("data/v1/download/1666876"): + with open(corrupt_copy_path, "rb") as f: + corrupted_data = f.read() + return _MockHTTPResponse(BytesIO(corrupted_data), is_gzip=True) + else: + return mocked_openml_url(request) + + monkeypatch.setattr(sklearn.datasets._openml, "urlopen", swap_file_mock) + + # validate failed checksum + with pytest.raises(ValueError) as exc: + sklearn.datasets.fetch_openml( + data_id=data_id, cache=False, as_frame=as_frame, parser=parser + ) + # exception message should have file-path + assert exc.match("1666876") + + +def test_open_openml_url_retry_on_network_error(monkeypatch): + def _mock_urlopen_network_error(request, *args, **kwargs): + raise HTTPError( + url=None, code=404, msg="Simulated network error", hdrs=None, fp=BytesIO() + ) + + monkeypatch.setattr( + sklearn.datasets._openml, "urlopen", _mock_urlopen_network_error + ) + + invalid_openml_url = "invalid-url" + + with pytest.warns( + UserWarning, + match=re.escape( + "A network error occurred while downloading" + f" {_OPENML_PREFIX + invalid_openml_url}. Retrying..." + ), + ) as record: + with pytest.raises(HTTPError, match="Simulated network error"): + _open_openml_url(invalid_openml_url, None, delay=0) + assert len(record) == 3 + + +############################################################################### +# Non-regressiont tests + + +@pytest.mark.parametrize("gzip_response", [True, False]) +@pytest.mark.parametrize("parser", ("liac-arff", "pandas")) +def test_fetch_openml_with_ignored_feature(monkeypatch, gzip_response, parser): + """Check that we can load the "zoo" dataset. + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/14340 + """ + if parser == "pandas": + pytest.importorskip("pandas") + data_id = 62 + _monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response) + + dataset = sklearn.datasets.fetch_openml( + data_id=data_id, cache=False, as_frame=False, parser=parser + ) + assert dataset is not None + # The dataset has 17 features, including 1 ignored (animal), + # so we assert that we don't have the ignored feature in the final Bunch + assert dataset["data"].shape == (101, 16) + assert "animal" not in dataset["feature_names"] + + +def test_fetch_openml_strip_quotes(monkeypatch): + """Check that we strip the single quotes when used as a string delimiter. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/23381 + """ + pd = pytest.importorskip("pandas") + data_id = 40966 + _monkey_patch_webbased_functions(monkeypatch, data_id=data_id, gzip_response=False) + + common_params = {"as_frame": True, "cache": False, "data_id": data_id} + mice_pandas = fetch_openml(parser="pandas", **common_params) + mice_liac_arff = fetch_openml(parser="liac-arff", **common_params) + pd.testing.assert_series_equal(mice_pandas.target, mice_liac_arff.target) + assert not mice_pandas.target.str.startswith("'").any() + assert not mice_pandas.target.str.endswith("'").any() + + # similar behaviour should be observed when the column is not the target + mice_pandas = fetch_openml(parser="pandas", target_column="NUMB_N", **common_params) + mice_liac_arff = fetch_openml( + parser="liac-arff", target_column="NUMB_N", **common_params + ) + pd.testing.assert_series_equal( + mice_pandas.frame["class"], mice_liac_arff.frame["class"] + ) + assert not mice_pandas.frame["class"].str.startswith("'").any() + assert not mice_pandas.frame["class"].str.endswith("'").any() + + +def test_fetch_openml_leading_whitespace(monkeypatch): + """Check that we can strip leading whitespace in pandas parser. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/25311 + """ + pd = pytest.importorskip("pandas") + data_id = 1590 + _monkey_patch_webbased_functions(monkeypatch, data_id=data_id, gzip_response=False) + + common_params = {"as_frame": True, "cache": False, "data_id": data_id} + adult_pandas = fetch_openml(parser="pandas", **common_params) + adult_liac_arff = fetch_openml(parser="liac-arff", **common_params) + pd.testing.assert_series_equal( + adult_pandas.frame["class"], adult_liac_arff.frame["class"] + ) + + +def test_fetch_openml_quotechar_escapechar(monkeypatch): + """Check that we can handle escapechar and single/double quotechar. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/25478 + """ + pd = pytest.importorskip("pandas") + data_id = 42074 + _monkey_patch_webbased_functions(monkeypatch, data_id=data_id, gzip_response=False) + + common_params = {"as_frame": True, "cache": False, "data_id": data_id} + adult_pandas = fetch_openml(parser="pandas", **common_params) + adult_liac_arff = fetch_openml(parser="liac-arff", **common_params) + pd.testing.assert_frame_equal(adult_pandas.frame, adult_liac_arff.frame) diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/test_rcv1.py b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/test_rcv1.py new file mode 100644 index 0000000000000000000000000000000000000000..fbb9d67015a308e32a7415ff20ca97c23c006835 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/test_rcv1.py @@ -0,0 +1,71 @@ +"""Test the rcv1 loader, if the data is available, +or if specifically requested via environment variable +(e.g. for CI jobs).""" + +from functools import partial + +import numpy as np +import scipy.sparse as sp + +from sklearn.datasets.tests.test_common import check_return_X_y +from sklearn.utils._testing import assert_almost_equal, assert_array_equal + + +def test_fetch_rcv1(fetch_rcv1_fxt, global_random_seed): + data1 = fetch_rcv1_fxt(shuffle=False) + X1, Y1 = data1.data, data1.target + cat_list, s1 = data1.target_names.tolist(), data1.sample_id + + # test sparsity + assert sp.issparse(X1) + assert sp.issparse(Y1) + assert 60915113 == X1.data.size + assert 2606875 == Y1.data.size + + # test shapes + assert (804414, 47236) == X1.shape + assert (804414, 103) == Y1.shape + assert (804414,) == s1.shape + assert 103 == len(cat_list) + + # test descr + assert data1.DESCR.startswith(".. _rcv1_dataset:") + + # test ordering of categories + first_categories = ["C11", "C12", "C13", "C14", "C15", "C151"] + assert_array_equal(first_categories, cat_list[:6]) + + # test number of sample for some categories + some_categories = ("GMIL", "E143", "CCAT") + number_non_zero_in_cat = (5, 1206, 381327) + for num, cat in zip(number_non_zero_in_cat, some_categories): + j = cat_list.index(cat) + assert num == Y1[:, j].data.size + + # test shuffling and subset + data2 = fetch_rcv1_fxt( + shuffle=True, subset="train", random_state=global_random_seed + ) + X2, Y2 = data2.data, data2.target + s2 = data2.sample_id + + # test return_X_y option + fetch_func = partial(fetch_rcv1_fxt, shuffle=False, subset="train") + check_return_X_y(data2, fetch_func) + + # The first 23149 samples are the training samples + assert_array_equal(np.sort(s1[:23149]), np.sort(s2)) + + # test some precise values + some_sample_ids = (2286, 3274, 14042) + for sample_id in some_sample_ids: + idx1 = s1.tolist().index(sample_id) + idx2 = s2.tolist().index(sample_id) + + feature_values_1 = X1[idx1, :].toarray() + feature_values_2 = X2[idx2, :].toarray() + assert_almost_equal(feature_values_1, feature_values_2) + + target_values_1 = Y1[idx1, :].toarray() + target_values_2 = Y2[idx2, :].toarray() + assert_almost_equal(target_values_1, target_values_2) diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/test_samples_generator.py b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/test_samples_generator.py new file mode 100644 index 0000000000000000000000000000000000000000..9a9cc41d7229c3288e7a399a887266e054adca40 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/test_samples_generator.py @@ -0,0 +1,742 @@ +import re +from collections import defaultdict +from functools import partial + +import numpy as np +import pytest +import scipy.sparse as sp + +from sklearn.datasets import ( + make_biclusters, + make_blobs, + make_checkerboard, + make_circles, + make_classification, + make_friedman1, + make_friedman2, + make_friedman3, + make_hastie_10_2, + make_low_rank_matrix, + make_moons, + make_multilabel_classification, + make_regression, + make_s_curve, + make_sparse_coded_signal, + make_sparse_spd_matrix, + make_sparse_uncorrelated, + make_spd_matrix, + make_swiss_roll, +) +from sklearn.utils._testing import ( + assert_allclose, + assert_allclose_dense_sparse, + assert_almost_equal, + assert_array_almost_equal, + assert_array_equal, + ignore_warnings, +) +from sklearn.utils.validation import assert_all_finite + + +def test_make_classification(): + weights = [0.1, 0.25] + X, y = make_classification( + n_samples=100, + n_features=20, + n_informative=5, + n_redundant=1, + n_repeated=1, + n_classes=3, + n_clusters_per_class=1, + hypercube=False, + shift=None, + scale=None, + weights=weights, + random_state=0, + ) + + assert weights == [0.1, 0.25] + assert X.shape == (100, 20), "X shape mismatch" + assert y.shape == (100,), "y shape mismatch" + assert np.unique(y).shape == (3,), "Unexpected number of classes" + assert sum(y == 0) == 10, "Unexpected number of samples in class #0" + assert sum(y == 1) == 25, "Unexpected number of samples in class #1" + assert sum(y == 2) == 65, "Unexpected number of samples in class #2" + + # Test for n_features > 30 + X, y = make_classification( + n_samples=2000, + n_features=31, + n_informative=31, + n_redundant=0, + n_repeated=0, + hypercube=True, + scale=0.5, + random_state=0, + ) + + assert X.shape == (2000, 31), "X shape mismatch" + assert y.shape == (2000,), "y shape mismatch" + assert ( + np.unique(X.view([("", X.dtype)] * X.shape[1])) + .view(X.dtype) + .reshape(-1, X.shape[1]) + .shape[0] + == 2000 + ), "Unexpected number of unique rows" + + +def test_make_classification_informative_features(): + """Test the construction of informative features in make_classification + + Also tests `n_clusters_per_class`, `n_classes`, `hypercube` and + fully-specified `weights`. + """ + # Create very separate clusters; check that vertices are unique and + # correspond to classes + class_sep = 1e6 + make = partial( + make_classification, + class_sep=class_sep, + n_redundant=0, + n_repeated=0, + flip_y=0, + shift=0, + scale=1, + shuffle=False, + ) + + for n_informative, weights, n_clusters_per_class in [ + (2, [1], 1), + (2, [1 / 3] * 3, 1), + (2, [1 / 4] * 4, 1), + (2, [1 / 2] * 2, 2), + (2, [3 / 4, 1 / 4], 2), + (10, [1 / 3] * 3, 10), + (int(64), [1], 1), + ]: + n_classes = len(weights) + n_clusters = n_classes * n_clusters_per_class + n_samples = n_clusters * 50 + + for hypercube in (False, True): + X, y = make( + n_samples=n_samples, + n_classes=n_classes, + weights=weights, + n_features=n_informative, + n_informative=n_informative, + n_clusters_per_class=n_clusters_per_class, + hypercube=hypercube, + random_state=0, + ) + + assert X.shape == (n_samples, n_informative) + assert y.shape == (n_samples,) + + # Cluster by sign, viewed as strings to allow uniquing + signs = np.sign(X) + signs = signs.view(dtype="|S{0}".format(signs.strides[0])).ravel() + unique_signs, cluster_index = np.unique(signs, return_inverse=True) + + assert ( + len(unique_signs) == n_clusters + ), "Wrong number of clusters, or not in distinct quadrants" + + clusters_by_class = defaultdict(set) + for cluster, cls in zip(cluster_index, y): + clusters_by_class[cls].add(cluster) + for clusters in clusters_by_class.values(): + assert ( + len(clusters) == n_clusters_per_class + ), "Wrong number of clusters per class" + assert len(clusters_by_class) == n_classes, "Wrong number of classes" + + assert_array_almost_equal( + np.bincount(y) / len(y) // weights, + [1] * n_classes, + err_msg="Wrong number of samples per class", + ) + + # Ensure on vertices of hypercube + for cluster in range(len(unique_signs)): + centroid = X[cluster_index == cluster].mean(axis=0) + if hypercube: + assert_array_almost_equal( + np.abs(centroid) / class_sep, + np.ones(n_informative), + decimal=5, + err_msg="Clusters are not centered on hypercube vertices", + ) + else: + with pytest.raises(AssertionError): + assert_array_almost_equal( + np.abs(centroid) / class_sep, + np.ones(n_informative), + decimal=5, + err_msg=( + "Clusters should not be centered on hypercube vertices" + ), + ) + + with pytest.raises(ValueError): + make(n_features=2, n_informative=2, n_classes=5, n_clusters_per_class=1) + with pytest.raises(ValueError): + make(n_features=2, n_informative=2, n_classes=3, n_clusters_per_class=2) + + +@pytest.mark.parametrize( + "weights, err_type, err_msg", + [ + ([], ValueError, "Weights specified but incompatible with number of classes."), + ( + [0.25, 0.75, 0.1], + ValueError, + "Weights specified but incompatible with number of classes.", + ), + ( + np.array([]), + ValueError, + "Weights specified but incompatible with number of classes.", + ), + ( + np.array([0.25, 0.75, 0.1]), + ValueError, + "Weights specified but incompatible with number of classes.", + ), + ( + np.random.random(3), + ValueError, + "Weights specified but incompatible with number of classes.", + ), + ], +) +def test_make_classification_weights_type(weights, err_type, err_msg): + with pytest.raises(err_type, match=err_msg): + make_classification(weights=weights) + + +@pytest.mark.parametrize("kwargs", [{}, {"n_classes": 3, "n_informative": 3}]) +def test_make_classification_weights_array_or_list_ok(kwargs): + X1, y1 = make_classification(weights=[0.1, 0.9], random_state=0, **kwargs) + X2, y2 = make_classification(weights=np.array([0.1, 0.9]), random_state=0, **kwargs) + assert_almost_equal(X1, X2) + assert_almost_equal(y1, y2) + + +def test_make_multilabel_classification_return_sequences(): + for allow_unlabeled, min_length in zip((True, False), (0, 1)): + X, Y = make_multilabel_classification( + n_samples=100, + n_features=20, + n_classes=3, + random_state=0, + return_indicator=False, + allow_unlabeled=allow_unlabeled, + ) + assert X.shape == (100, 20), "X shape mismatch" + if not allow_unlabeled: + assert max([max(y) for y in Y]) == 2 + assert min([len(y) for y in Y]) == min_length + assert max([len(y) for y in Y]) <= 3 + + +def test_make_multilabel_classification_return_indicator(): + for allow_unlabeled, min_length in zip((True, False), (0, 1)): + X, Y = make_multilabel_classification( + n_samples=25, + n_features=20, + n_classes=3, + random_state=0, + allow_unlabeled=allow_unlabeled, + ) + assert X.shape == (25, 20), "X shape mismatch" + assert Y.shape == (25, 3), "Y shape mismatch" + assert np.all(np.sum(Y, axis=0) > min_length) + + # Also test return_distributions and return_indicator with True + X2, Y2, p_c, p_w_c = make_multilabel_classification( + n_samples=25, + n_features=20, + n_classes=3, + random_state=0, + allow_unlabeled=allow_unlabeled, + return_distributions=True, + ) + + assert_array_almost_equal(X, X2) + assert_array_equal(Y, Y2) + assert p_c.shape == (3,) + assert_almost_equal(p_c.sum(), 1) + assert p_w_c.shape == (20, 3) + assert_almost_equal(p_w_c.sum(axis=0), [1] * 3) + + +def test_make_multilabel_classification_return_indicator_sparse(): + for allow_unlabeled, min_length in zip((True, False), (0, 1)): + X, Y = make_multilabel_classification( + n_samples=25, + n_features=20, + n_classes=3, + random_state=0, + return_indicator="sparse", + allow_unlabeled=allow_unlabeled, + ) + assert X.shape == (25, 20), "X shape mismatch" + assert Y.shape == (25, 3), "Y shape mismatch" + assert sp.issparse(Y) + + +def test_make_hastie_10_2(): + X, y = make_hastie_10_2(n_samples=100, random_state=0) + assert X.shape == (100, 10), "X shape mismatch" + assert y.shape == (100,), "y shape mismatch" + assert np.unique(y).shape == (2,), "Unexpected number of classes" + + +def test_make_regression(): + X, y, c = make_regression( + n_samples=100, + n_features=10, + n_informative=3, + effective_rank=5, + coef=True, + bias=0.0, + noise=1.0, + random_state=0, + ) + + assert X.shape == (100, 10), "X shape mismatch" + assert y.shape == (100,), "y shape mismatch" + assert c.shape == (10,), "coef shape mismatch" + assert sum(c != 0.0) == 3, "Unexpected number of informative features" + + # Test that y ~= np.dot(X, c) + bias + N(0, 1.0). + assert_almost_equal(np.std(y - np.dot(X, c)), 1.0, decimal=1) + + # Test with small number of features. + X, y = make_regression(n_samples=100, n_features=1) # n_informative=3 + assert X.shape == (100, 1) + + +def test_make_regression_multitarget(): + X, y, c = make_regression( + n_samples=100, + n_features=10, + n_informative=3, + n_targets=3, + coef=True, + noise=1.0, + random_state=0, + ) + + assert X.shape == (100, 10), "X shape mismatch" + assert y.shape == (100, 3), "y shape mismatch" + assert c.shape == (10, 3), "coef shape mismatch" + assert_array_equal(sum(c != 0.0), 3, "Unexpected number of informative features") + + # Test that y ~= np.dot(X, c) + bias + N(0, 1.0) + assert_almost_equal(np.std(y - np.dot(X, c)), 1.0, decimal=1) + + +def test_make_blobs(): + cluster_stds = np.array([0.05, 0.2, 0.4]) + cluster_centers = np.array([[0.0, 0.0], [1.0, 1.0], [0.0, 1.0]]) + X, y = make_blobs( + random_state=0, + n_samples=50, + n_features=2, + centers=cluster_centers, + cluster_std=cluster_stds, + ) + + assert X.shape == (50, 2), "X shape mismatch" + assert y.shape == (50,), "y shape mismatch" + assert np.unique(y).shape == (3,), "Unexpected number of blobs" + for i, (ctr, std) in enumerate(zip(cluster_centers, cluster_stds)): + assert_almost_equal((X[y == i] - ctr).std(), std, 1, "Unexpected std") + + +def test_make_blobs_n_samples_list(): + n_samples = [50, 30, 20] + X, y = make_blobs(n_samples=n_samples, n_features=2, random_state=0) + + assert X.shape == (sum(n_samples), 2), "X shape mismatch" + assert all( + np.bincount(y, minlength=len(n_samples)) == n_samples + ), "Incorrect number of samples per blob" + + +def test_make_blobs_n_samples_list_with_centers(): + n_samples = [20, 20, 20] + centers = np.array([[0.0, 0.0], [1.0, 1.0], [0.0, 1.0]]) + cluster_stds = np.array([0.05, 0.2, 0.4]) + X, y = make_blobs( + n_samples=n_samples, centers=centers, cluster_std=cluster_stds, random_state=0 + ) + + assert X.shape == (sum(n_samples), 2), "X shape mismatch" + assert all( + np.bincount(y, minlength=len(n_samples)) == n_samples + ), "Incorrect number of samples per blob" + for i, (ctr, std) in enumerate(zip(centers, cluster_stds)): + assert_almost_equal((X[y == i] - ctr).std(), std, 1, "Unexpected std") + + +@pytest.mark.parametrize( + "n_samples", [[5, 3, 0], np.array([5, 3, 0]), tuple([5, 3, 0])] +) +def test_make_blobs_n_samples_centers_none(n_samples): + centers = None + X, y = make_blobs(n_samples=n_samples, centers=centers, random_state=0) + + assert X.shape == (sum(n_samples), 2), "X shape mismatch" + assert all( + np.bincount(y, minlength=len(n_samples)) == n_samples + ), "Incorrect number of samples per blob" + + +def test_make_blobs_return_centers(): + n_samples = [10, 20] + n_features = 3 + X, y, centers = make_blobs( + n_samples=n_samples, n_features=n_features, return_centers=True, random_state=0 + ) + + assert centers.shape == (len(n_samples), n_features) + + +def test_make_blobs_error(): + n_samples = [20, 20, 20] + centers = np.array([[0.0, 0.0], [1.0, 1.0], [0.0, 1.0]]) + cluster_stds = np.array([0.05, 0.2, 0.4]) + wrong_centers_msg = re.escape( + "Length of `n_samples` not consistent with number of centers. " + f"Got n_samples = {n_samples} and centers = {centers[:-1]}" + ) + with pytest.raises(ValueError, match=wrong_centers_msg): + make_blobs(n_samples, centers=centers[:-1]) + wrong_std_msg = re.escape( + "Length of `clusters_std` not consistent with number of centers. " + f"Got centers = {centers} and cluster_std = {cluster_stds[:-1]}" + ) + with pytest.raises(ValueError, match=wrong_std_msg): + make_blobs(n_samples, centers=centers, cluster_std=cluster_stds[:-1]) + wrong_type_msg = "Parameter `centers` must be array-like. Got {!r} instead".format( + 3 + ) + with pytest.raises(ValueError, match=wrong_type_msg): + make_blobs(n_samples, centers=3) + + +def test_make_friedman1(): + X, y = make_friedman1(n_samples=5, n_features=10, noise=0.0, random_state=0) + + assert X.shape == (5, 10), "X shape mismatch" + assert y.shape == (5,), "y shape mismatch" + + assert_array_almost_equal( + y, + 10 * np.sin(np.pi * X[:, 0] * X[:, 1]) + + 20 * (X[:, 2] - 0.5) ** 2 + + 10 * X[:, 3] + + 5 * X[:, 4], + ) + + +def test_make_friedman2(): + X, y = make_friedman2(n_samples=5, noise=0.0, random_state=0) + + assert X.shape == (5, 4), "X shape mismatch" + assert y.shape == (5,), "y shape mismatch" + + assert_array_almost_equal( + y, (X[:, 0] ** 2 + (X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 + ) + + +def test_make_friedman3(): + X, y = make_friedman3(n_samples=5, noise=0.0, random_state=0) + + assert X.shape == (5, 4), "X shape mismatch" + assert y.shape == (5,), "y shape mismatch" + + assert_array_almost_equal( + y, np.arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) / X[:, 0]) + ) + + +def test_make_low_rank_matrix(): + X = make_low_rank_matrix( + n_samples=50, + n_features=25, + effective_rank=5, + tail_strength=0.01, + random_state=0, + ) + + assert X.shape == (50, 25), "X shape mismatch" + + from numpy.linalg import svd + + u, s, v = svd(X) + assert sum(s) - 5 < 0.1, "X rank is not approximately 5" + + +def test_make_sparse_coded_signal(): + Y, D, X = make_sparse_coded_signal( + n_samples=5, + n_components=8, + n_features=10, + n_nonzero_coefs=3, + random_state=0, + ) + assert Y.shape == (5, 10), "Y shape mismatch" + assert D.shape == (8, 10), "D shape mismatch" + assert X.shape == (5, 8), "X shape mismatch" + for row in X: + assert len(np.flatnonzero(row)) == 3, "Non-zero coefs mismatch" + assert_allclose(Y, X @ D) + assert_allclose(np.sqrt((D**2).sum(axis=1)), np.ones(D.shape[0])) + + +# TODO(1.5): remove +@ignore_warnings(category=FutureWarning) +def test_make_sparse_coded_signal_transposed(): + Y, D, X = make_sparse_coded_signal( + n_samples=5, + n_components=8, + n_features=10, + n_nonzero_coefs=3, + random_state=0, + data_transposed=True, + ) + assert Y.shape == (10, 5), "Y shape mismatch" + assert D.shape == (10, 8), "D shape mismatch" + assert X.shape == (8, 5), "X shape mismatch" + for col in X.T: + assert len(np.flatnonzero(col)) == 3, "Non-zero coefs mismatch" + assert_allclose(Y, D @ X) + assert_allclose(np.sqrt((D**2).sum(axis=0)), np.ones(D.shape[1])) + + +# TODO(1.5): remove +def test_make_sparse_code_signal_deprecation_warning(): + """Check the message for future deprecation.""" + warn_msg = "data_transposed was deprecated in version 1.3" + with pytest.warns(FutureWarning, match=warn_msg): + make_sparse_coded_signal( + n_samples=1, + n_components=1, + n_features=1, + n_nonzero_coefs=1, + random_state=0, + data_transposed=True, + ) + + +def test_make_sparse_uncorrelated(): + X, y = make_sparse_uncorrelated(n_samples=5, n_features=10, random_state=0) + + assert X.shape == (5, 10), "X shape mismatch" + assert y.shape == (5,), "y shape mismatch" + + +def test_make_spd_matrix(): + X = make_spd_matrix(n_dim=5, random_state=0) + + assert X.shape == (5, 5), "X shape mismatch" + assert_array_almost_equal(X, X.T) + + from numpy.linalg import eig + + eigenvalues, _ = eig(X) + assert np.all(eigenvalues > 0), "X is not positive-definite" + + +@pytest.mark.parametrize("norm_diag", [True, False]) +@pytest.mark.parametrize( + "sparse_format", [None, "bsr", "coo", "csc", "csr", "dia", "dok", "lil"] +) +def test_make_sparse_spd_matrix(norm_diag, sparse_format, global_random_seed): + n_dim = 5 + X = make_sparse_spd_matrix( + n_dim=n_dim, + norm_diag=norm_diag, + sparse_format=sparse_format, + random_state=global_random_seed, + ) + + assert X.shape == (n_dim, n_dim), "X shape mismatch" + if sparse_format is None: + assert not sp.issparse(X) + assert_allclose(X, X.T) + Xarr = X + else: + assert sp.issparse(X) and X.format == sparse_format + assert_allclose_dense_sparse(X, X.T) + Xarr = X.toarray() + + from numpy.linalg import eig + + # Do not use scipy.sparse.linalg.eigs because it cannot find all eigenvalues + eigenvalues, _ = eig(Xarr) + assert np.all(eigenvalues > 0), "X is not positive-definite" + + if norm_diag: + # Check that leading diagonal elements are 1 + assert_array_almost_equal(Xarr.diagonal(), np.ones(n_dim)) + + +# TODO(1.6): remove +def test_make_sparse_spd_matrix_deprecation_warning(): + """Check the message for future deprecation.""" + warn_msg = "dim was deprecated in version 1.4" + with pytest.warns(FutureWarning, match=warn_msg): + make_sparse_spd_matrix( + dim=1, + ) + + error_msg = "`dim` and `n_dim` cannot be both specified" + with pytest.raises(ValueError, match=error_msg): + make_sparse_spd_matrix( + dim=1, + n_dim=1, + ) + + X = make_sparse_spd_matrix() + assert X.shape[1] == 1 + + +@pytest.mark.parametrize("hole", [False, True]) +def test_make_swiss_roll(hole): + X, t = make_swiss_roll(n_samples=5, noise=0.0, random_state=0, hole=hole) + + assert X.shape == (5, 3) + assert t.shape == (5,) + assert_array_almost_equal(X[:, 0], t * np.cos(t)) + assert_array_almost_equal(X[:, 2], t * np.sin(t)) + + +def test_make_s_curve(): + X, t = make_s_curve(n_samples=5, noise=0.0, random_state=0) + + assert X.shape == (5, 3), "X shape mismatch" + assert t.shape == (5,), "t shape mismatch" + assert_array_almost_equal(X[:, 0], np.sin(t)) + assert_array_almost_equal(X[:, 2], np.sign(t) * (np.cos(t) - 1)) + + +def test_make_biclusters(): + X, rows, cols = make_biclusters( + shape=(100, 100), n_clusters=4, shuffle=True, random_state=0 + ) + assert X.shape == (100, 100), "X shape mismatch" + assert rows.shape == (4, 100), "rows shape mismatch" + assert cols.shape == ( + 4, + 100, + ), "columns shape mismatch" + assert_all_finite(X) + assert_all_finite(rows) + assert_all_finite(cols) + + X2, _, _ = make_biclusters( + shape=(100, 100), n_clusters=4, shuffle=True, random_state=0 + ) + assert_array_almost_equal(X, X2) + + +def test_make_checkerboard(): + X, rows, cols = make_checkerboard( + shape=(100, 100), n_clusters=(20, 5), shuffle=True, random_state=0 + ) + assert X.shape == (100, 100), "X shape mismatch" + assert rows.shape == (100, 100), "rows shape mismatch" + assert cols.shape == ( + 100, + 100, + ), "columns shape mismatch" + + X, rows, cols = make_checkerboard( + shape=(100, 100), n_clusters=2, shuffle=True, random_state=0 + ) + assert_all_finite(X) + assert_all_finite(rows) + assert_all_finite(cols) + + X1, _, _ = make_checkerboard( + shape=(100, 100), n_clusters=2, shuffle=True, random_state=0 + ) + X2, _, _ = make_checkerboard( + shape=(100, 100), n_clusters=2, shuffle=True, random_state=0 + ) + assert_array_almost_equal(X1, X2) + + +def test_make_moons(): + X, y = make_moons(3, shuffle=False) + for x, label in zip(X, y): + center = [0.0, 0.0] if label == 0 else [1.0, 0.5] + dist_sqr = ((x - center) ** 2).sum() + assert_almost_equal( + dist_sqr, 1.0, err_msg="Point is not on expected unit circle" + ) + + +def test_make_moons_unbalanced(): + X, y = make_moons(n_samples=(7, 5)) + assert ( + np.sum(y == 0) == 7 and np.sum(y == 1) == 5 + ), "Number of samples in a moon is wrong" + assert X.shape == (12, 2), "X shape mismatch" + assert y.shape == (12,), "y shape mismatch" + + with pytest.raises( + ValueError, + match=r"`n_samples` can be either an int " r"or a two-element tuple.", + ): + make_moons(n_samples=(10,)) + + +def test_make_circles(): + factor = 0.3 + + for n_samples, n_outer, n_inner in [(7, 3, 4), (8, 4, 4)]: + # Testing odd and even case, because in the past make_circles always + # created an even number of samples. + X, y = make_circles(n_samples, shuffle=False, noise=None, factor=factor) + assert X.shape == (n_samples, 2), "X shape mismatch" + assert y.shape == (n_samples,), "y shape mismatch" + center = [0.0, 0.0] + for x, label in zip(X, y): + dist_sqr = ((x - center) ** 2).sum() + dist_exp = 1.0 if label == 0 else factor**2 + dist_exp = 1.0 if label == 0 else factor**2 + assert_almost_equal( + dist_sqr, dist_exp, err_msg="Point is not on expected circle" + ) + + assert X[y == 0].shape == ( + n_outer, + 2, + ), "Samples not correctly distributed across circles." + assert X[y == 1].shape == ( + n_inner, + 2, + ), "Samples not correctly distributed across circles." + + +def test_make_circles_unbalanced(): + X, y = make_circles(n_samples=(2, 8)) + + assert np.sum(y == 0) == 2, "Number of samples in inner circle is wrong" + assert np.sum(y == 1) == 8, "Number of samples in outer circle is wrong" + assert X.shape == (10, 2), "X shape mismatch" + assert y.shape == (10,), "y shape mismatch" + + with pytest.raises( + ValueError, + match="When a tuple, n_samples must have exactly two elements.", + ): + make_circles(n_samples=(10,)) diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/test_svmlight_format.py b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/test_svmlight_format.py new file mode 100644 index 0000000000000000000000000000000000000000..5c641dd79cc6396cca2201adc499d144a1a4df62 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/test_svmlight_format.py @@ -0,0 +1,616 @@ +import gzip +import os +import shutil +from bz2 import BZ2File +from importlib import resources +from io import BytesIO +from tempfile import NamedTemporaryFile + +import numpy as np +import pytest +import scipy.sparse as sp + +import sklearn +from sklearn.datasets import dump_svmlight_file, load_svmlight_file, load_svmlight_files +from sklearn.utils._testing import ( + assert_allclose, + assert_array_almost_equal, + assert_array_equal, + create_memmap_backed_data, + fails_if_pypy, +) +from sklearn.utils.fixes import CSR_CONTAINERS + +TEST_DATA_MODULE = "sklearn.datasets.tests.data" +datafile = "svmlight_classification.txt" +multifile = "svmlight_multilabel.txt" +invalidfile = "svmlight_invalid.txt" +invalidfile2 = "svmlight_invalid_order.txt" + +pytestmark = fails_if_pypy + + +def _svmlight_local_test_file_path(filename): + return resources.files(TEST_DATA_MODULE) / filename + + +def _load_svmlight_local_test_file(filename, **kwargs): + """ + Helper to load resource `filename` with `importlib.resources` + """ + data_path = _svmlight_local_test_file_path(filename) + with data_path.open("rb") as f: + return load_svmlight_file(f, **kwargs) + + +def test_load_svmlight_file(): + X, y = _load_svmlight_local_test_file(datafile) + + # test X's shape + assert X.indptr.shape[0] == 7 + assert X.shape[0] == 6 + assert X.shape[1] == 21 + assert y.shape[0] == 6 + + # test X's non-zero values + for i, j, val in ( + (0, 2, 2.5), + (0, 10, -5.2), + (0, 15, 1.5), + (1, 5, 1.0), + (1, 12, -3), + (2, 20, 27), + ): + assert X[i, j] == val + + # tests X's zero values + assert X[0, 3] == 0 + assert X[0, 5] == 0 + assert X[1, 8] == 0 + assert X[1, 16] == 0 + assert X[2, 18] == 0 + + # test can change X's values + X[0, 2] *= 2 + assert X[0, 2] == 5 + + # test y + assert_array_equal(y, [1, 2, 3, 4, 1, 2]) + + +def test_load_svmlight_file_fd(): + # test loading from file descriptor + + # GH20081: testing equality between path-based and + # fd-based load_svmlight_file + + data_path = resources.files(TEST_DATA_MODULE) / datafile + data_path = str(data_path) + X1, y1 = load_svmlight_file(data_path) + + fd = os.open(data_path, os.O_RDONLY) + try: + X2, y2 = load_svmlight_file(fd) + assert_array_almost_equal(X1.data, X2.data) + assert_array_almost_equal(y1, y2) + finally: + os.close(fd) + + +def test_load_svmlight_pathlib(): + # test loading from file descriptor + data_path = _svmlight_local_test_file_path(datafile) + X1, y1 = load_svmlight_file(str(data_path)) + X2, y2 = load_svmlight_file(data_path) + + assert_allclose(X1.data, X2.data) + assert_allclose(y1, y2) + + +def test_load_svmlight_file_multilabel(): + X, y = _load_svmlight_local_test_file(multifile, multilabel=True) + assert y == [(0, 1), (2,), (), (1, 2)] + + +def test_load_svmlight_files(): + data_path = _svmlight_local_test_file_path(datafile) + X_train, y_train, X_test, y_test = load_svmlight_files( + [str(data_path)] * 2, dtype=np.float32 + ) + assert_array_equal(X_train.toarray(), X_test.toarray()) + assert_array_almost_equal(y_train, y_test) + assert X_train.dtype == np.float32 + assert X_test.dtype == np.float32 + + X1, y1, X2, y2, X3, y3 = load_svmlight_files([str(data_path)] * 3, dtype=np.float64) + assert X1.dtype == X2.dtype + assert X2.dtype == X3.dtype + assert X3.dtype == np.float64 + + +def test_load_svmlight_file_n_features(): + X, y = _load_svmlight_local_test_file(datafile, n_features=22) + + # test X'shape + assert X.indptr.shape[0] == 7 + assert X.shape[0] == 6 + assert X.shape[1] == 22 + + # test X's non-zero values + for i, j, val in ((0, 2, 2.5), (0, 10, -5.2), (1, 5, 1.0), (1, 12, -3)): + assert X[i, j] == val + + # 21 features in file + with pytest.raises(ValueError): + _load_svmlight_local_test_file(datafile, n_features=20) + + +def test_load_compressed(): + X, y = _load_svmlight_local_test_file(datafile) + + with NamedTemporaryFile(prefix="sklearn-test", suffix=".gz") as tmp: + tmp.close() # necessary under windows + with _svmlight_local_test_file_path(datafile).open("rb") as f: + with gzip.open(tmp.name, "wb") as fh_out: + shutil.copyfileobj(f, fh_out) + Xgz, ygz = load_svmlight_file(tmp.name) + # because we "close" it manually and write to it, + # we need to remove it manually. + os.remove(tmp.name) + assert_array_almost_equal(X.toarray(), Xgz.toarray()) + assert_array_almost_equal(y, ygz) + + with NamedTemporaryFile(prefix="sklearn-test", suffix=".bz2") as tmp: + tmp.close() # necessary under windows + with _svmlight_local_test_file_path(datafile).open("rb") as f: + with BZ2File(tmp.name, "wb") as fh_out: + shutil.copyfileobj(f, fh_out) + Xbz, ybz = load_svmlight_file(tmp.name) + # because we "close" it manually and write to it, + # we need to remove it manually. + os.remove(tmp.name) + assert_array_almost_equal(X.toarray(), Xbz.toarray()) + assert_array_almost_equal(y, ybz) + + +def test_load_invalid_file(): + with pytest.raises(ValueError): + _load_svmlight_local_test_file(invalidfile) + + +def test_load_invalid_order_file(): + with pytest.raises(ValueError): + _load_svmlight_local_test_file(invalidfile2) + + +def test_load_zero_based(): + f = BytesIO(b"-1 4:1.\n1 0:1\n") + with pytest.raises(ValueError): + load_svmlight_file(f, zero_based=False) + + +def test_load_zero_based_auto(): + data1 = b"-1 1:1 2:2 3:3\n" + data2 = b"-1 0:0 1:1\n" + + f1 = BytesIO(data1) + X, y = load_svmlight_file(f1, zero_based="auto") + assert X.shape == (1, 3) + + f1 = BytesIO(data1) + f2 = BytesIO(data2) + X1, y1, X2, y2 = load_svmlight_files([f1, f2], zero_based="auto") + assert X1.shape == (1, 4) + assert X2.shape == (1, 4) + + +def test_load_with_qid(): + # load svmfile with qid attribute + data = b""" + 3 qid:1 1:0.53 2:0.12 + 2 qid:1 1:0.13 2:0.1 + 7 qid:2 1:0.87 2:0.12""" + X, y = load_svmlight_file(BytesIO(data), query_id=False) + assert_array_equal(y, [3, 2, 7]) + assert_array_equal(X.toarray(), [[0.53, 0.12], [0.13, 0.1], [0.87, 0.12]]) + res1 = load_svmlight_files([BytesIO(data)], query_id=True) + res2 = load_svmlight_file(BytesIO(data), query_id=True) + for X, y, qid in (res1, res2): + assert_array_equal(y, [3, 2, 7]) + assert_array_equal(qid, [1, 1, 2]) + assert_array_equal(X.toarray(), [[0.53, 0.12], [0.13, 0.1], [0.87, 0.12]]) + + +@pytest.mark.skip( + "testing the overflow of 32 bit sparse indexing requires a large amount of memory" +) +def test_load_large_qid(): + """ + load large libsvm / svmlight file with qid attribute. Tests 64-bit query ID + """ + data = b"\n".join( + ( + "3 qid:{0} 1:0.53 2:0.12\n2 qid:{0} 1:0.13 2:0.1".format(i).encode() + for i in range(1, 40 * 1000 * 1000) + ) + ) + X, y, qid = load_svmlight_file(BytesIO(data), query_id=True) + assert_array_equal(y[-4:], [3, 2, 3, 2]) + assert_array_equal(np.unique(qid), np.arange(1, 40 * 1000 * 1000)) + + +def test_load_invalid_file2(): + with pytest.raises(ValueError): + data_path = _svmlight_local_test_file_path(datafile) + invalid_path = _svmlight_local_test_file_path(invalidfile) + load_svmlight_files([str(data_path), str(invalid_path), str(data_path)]) + + +def test_not_a_filename(): + # in python 3 integers are valid file opening arguments (taken as unix + # file descriptors) + with pytest.raises(TypeError): + load_svmlight_file(0.42) + + +def test_invalid_filename(): + with pytest.raises(OSError): + load_svmlight_file("trou pic nic douille") + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_dump(csr_container): + X_sparse, y_dense = _load_svmlight_local_test_file(datafile) + X_dense = X_sparse.toarray() + y_sparse = csr_container(np.atleast_2d(y_dense)) + + # slicing a csr_matrix can unsort its .indices, so test that we sort + # those correctly + X_sliced = X_sparse[np.arange(X_sparse.shape[0])] + y_sliced = y_sparse[np.arange(y_sparse.shape[0])] + + for X in (X_sparse, X_dense, X_sliced): + for y in (y_sparse, y_dense, y_sliced): + for zero_based in (True, False): + for dtype in [np.float32, np.float64, np.int32, np.int64]: + f = BytesIO() + # we need to pass a comment to get the version info in; + # LibSVM doesn't grok comments so they're not put in by + # default anymore. + + if sp.issparse(y) and y.shape[0] == 1: + # make sure y's shape is: (n_samples, n_labels) + # when it is sparse + y = y.T + + # Note: with dtype=np.int32 we are performing unsafe casts, + # where X.astype(dtype) overflows. The result is + # then platform dependent and X_dense.astype(dtype) may be + # different from X_sparse.astype(dtype).asarray(). + X_input = X.astype(dtype) + + dump_svmlight_file( + X_input, y, f, comment="test", zero_based=zero_based + ) + f.seek(0) + + comment = f.readline() + comment = str(comment, "utf-8") + + assert "scikit-learn %s" % sklearn.__version__ in comment + + comment = f.readline() + comment = str(comment, "utf-8") + + assert ["one", "zero"][zero_based] + "-based" in comment + + X2, y2 = load_svmlight_file(f, dtype=dtype, zero_based=zero_based) + assert X2.dtype == dtype + assert_array_equal(X2.sorted_indices().indices, X2.indices) + + X2_dense = X2.toarray() + if sp.issparse(X_input): + X_input_dense = X_input.toarray() + else: + X_input_dense = X_input + + if dtype == np.float32: + # allow a rounding error at the last decimal place + assert_array_almost_equal(X_input_dense, X2_dense, 4) + assert_array_almost_equal( + y_dense.astype(dtype, copy=False), y2, 4 + ) + else: + # allow a rounding error at the last decimal place + assert_array_almost_equal(X_input_dense, X2_dense, 15) + assert_array_almost_equal( + y_dense.astype(dtype, copy=False), y2, 15 + ) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_dump_multilabel(csr_container): + X = [[1, 0, 3, 0, 5], [0, 0, 0, 0, 0], [0, 5, 0, 1, 0]] + y_dense = [[0, 1, 0], [1, 0, 1], [1, 1, 0]] + y_sparse = csr_container(y_dense) + for y in [y_dense, y_sparse]: + f = BytesIO() + dump_svmlight_file(X, y, f, multilabel=True) + f.seek(0) + # make sure it dumps multilabel correctly + assert f.readline() == b"1 0:1 2:3 4:5\n" + assert f.readline() == b"0,2 \n" + assert f.readline() == b"0,1 1:5 3:1\n" + + +def test_dump_concise(): + one = 1 + two = 2.1 + three = 3.01 + exact = 1.000000000000001 + # loses the last decimal place + almost = 1.0000000000000001 + X = [ + [one, two, three, exact, almost], + [1e9, 2e18, 3e27, 0, 0], + [0, 0, 0, 0, 0], + [0, 0, 0, 0, 0], + [0, 0, 0, 0, 0], + ] + y = [one, two, three, exact, almost] + f = BytesIO() + dump_svmlight_file(X, y, f) + f.seek(0) + # make sure it's using the most concise format possible + assert f.readline() == b"1 0:1 1:2.1 2:3.01 3:1.000000000000001 4:1\n" + assert f.readline() == b"2.1 0:1000000000 1:2e+18 2:3e+27\n" + assert f.readline() == b"3.01 \n" + assert f.readline() == b"1.000000000000001 \n" + assert f.readline() == b"1 \n" + f.seek(0) + # make sure it's correct too :) + X2, y2 = load_svmlight_file(f) + assert_array_almost_equal(X, X2.toarray()) + assert_array_almost_equal(y, y2) + + +def test_dump_comment(): + X, y = _load_svmlight_local_test_file(datafile) + X = X.toarray() + + f = BytesIO() + ascii_comment = "This is a comment\nspanning multiple lines." + dump_svmlight_file(X, y, f, comment=ascii_comment, zero_based=False) + f.seek(0) + + X2, y2 = load_svmlight_file(f, zero_based=False) + assert_array_almost_equal(X, X2.toarray()) + assert_array_almost_equal(y, y2) + + # XXX we have to update this to support Python 3.x + utf8_comment = b"It is true that\n\xc2\xbd\xc2\xb2 = \xc2\xbc" + f = BytesIO() + with pytest.raises(UnicodeDecodeError): + dump_svmlight_file(X, y, f, comment=utf8_comment) + + unicode_comment = utf8_comment.decode("utf-8") + f = BytesIO() + dump_svmlight_file(X, y, f, comment=unicode_comment, zero_based=False) + f.seek(0) + + X2, y2 = load_svmlight_file(f, zero_based=False) + assert_array_almost_equal(X, X2.toarray()) + assert_array_almost_equal(y, y2) + + f = BytesIO() + with pytest.raises(ValueError): + dump_svmlight_file(X, y, f, comment="I've got a \0.") + + +def test_dump_invalid(): + X, y = _load_svmlight_local_test_file(datafile) + + f = BytesIO() + y2d = [y] + with pytest.raises(ValueError): + dump_svmlight_file(X, y2d, f) + + f = BytesIO() + with pytest.raises(ValueError): + dump_svmlight_file(X, y[:-1], f) + + +def test_dump_query_id(): + # test dumping a file with query_id + X, y = _load_svmlight_local_test_file(datafile) + X = X.toarray() + query_id = np.arange(X.shape[0]) // 2 + f = BytesIO() + dump_svmlight_file(X, y, f, query_id=query_id, zero_based=True) + + f.seek(0) + X1, y1, query_id1 = load_svmlight_file(f, query_id=True, zero_based=True) + assert_array_almost_equal(X, X1.toarray()) + assert_array_almost_equal(y, y1) + assert_array_almost_equal(query_id, query_id1) + + +def test_load_with_long_qid(): + # load svmfile with longint qid attribute + data = b""" + 1 qid:0 0:1 1:2 2:3 + 0 qid:72048431380967004 0:1440446648 1:72048431380967004 2:236784985 + 0 qid:-9223372036854775807 0:1440446648 1:72048431380967004 2:236784985 + 3 qid:9223372036854775807 0:1440446648 1:72048431380967004 2:236784985""" + X, y, qid = load_svmlight_file(BytesIO(data), query_id=True) + + true_X = [ + [1, 2, 3], + [1440446648, 72048431380967004, 236784985], + [1440446648, 72048431380967004, 236784985], + [1440446648, 72048431380967004, 236784985], + ] + + true_y = [1, 0, 0, 3] + trueQID = [0, 72048431380967004, -9223372036854775807, 9223372036854775807] + assert_array_equal(y, true_y) + assert_array_equal(X.toarray(), true_X) + assert_array_equal(qid, trueQID) + + f = BytesIO() + dump_svmlight_file(X, y, f, query_id=qid, zero_based=True) + f.seek(0) + X, y, qid = load_svmlight_file(f, query_id=True, zero_based=True) + assert_array_equal(y, true_y) + assert_array_equal(X.toarray(), true_X) + assert_array_equal(qid, trueQID) + + f.seek(0) + X, y = load_svmlight_file(f, query_id=False, zero_based=True) + assert_array_equal(y, true_y) + assert_array_equal(X.toarray(), true_X) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_load_zeros(csr_container): + f = BytesIO() + true_X = csr_container(np.zeros(shape=(3, 4))) + true_y = np.array([0, 1, 0]) + dump_svmlight_file(true_X, true_y, f) + + for zero_based in ["auto", True, False]: + f.seek(0) + X, y = load_svmlight_file(f, n_features=4, zero_based=zero_based) + assert_array_almost_equal(y, true_y) + assert_array_almost_equal(X.toarray(), true_X.toarray()) + + +@pytest.mark.parametrize("sparsity", [0, 0.1, 0.5, 0.99, 1]) +@pytest.mark.parametrize("n_samples", [13, 101]) +@pytest.mark.parametrize("n_features", [2, 7, 41]) +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_load_with_offsets(sparsity, n_samples, n_features, csr_container): + rng = np.random.RandomState(0) + X = rng.uniform(low=0.0, high=1.0, size=(n_samples, n_features)) + if sparsity: + X[X < sparsity] = 0.0 + X = csr_container(X) + y = rng.randint(low=0, high=2, size=n_samples) + + f = BytesIO() + dump_svmlight_file(X, y, f) + f.seek(0) + + size = len(f.getvalue()) + + # put some marks that are likely to happen anywhere in a row + mark_0 = 0 + mark_1 = size // 3 + length_0 = mark_1 - mark_0 + mark_2 = 4 * size // 5 + length_1 = mark_2 - mark_1 + + # load the original sparse matrix into 3 independent CSR matrices + X_0, y_0 = load_svmlight_file( + f, n_features=n_features, offset=mark_0, length=length_0 + ) + X_1, y_1 = load_svmlight_file( + f, n_features=n_features, offset=mark_1, length=length_1 + ) + X_2, y_2 = load_svmlight_file(f, n_features=n_features, offset=mark_2) + + y_concat = np.concatenate([y_0, y_1, y_2]) + X_concat = sp.vstack([X_0, X_1, X_2]) + assert_array_almost_equal(y, y_concat) + assert_array_almost_equal(X.toarray(), X_concat.toarray()) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_load_offset_exhaustive_splits(csr_container): + rng = np.random.RandomState(0) + X = np.array( + [ + [0, 0, 0, 0, 0, 0], + [1, 2, 3, 4, 0, 6], + [1, 2, 3, 4, 0, 6], + [0, 0, 0, 0, 0, 0], + [1, 0, 3, 0, 0, 0], + [0, 0, 0, 0, 0, 1], + [1, 0, 0, 0, 0, 0], + ] + ) + X = csr_container(X) + n_samples, n_features = X.shape + y = rng.randint(low=0, high=2, size=n_samples) + query_id = np.arange(n_samples) // 2 + + f = BytesIO() + dump_svmlight_file(X, y, f, query_id=query_id) + f.seek(0) + + size = len(f.getvalue()) + + # load the same data in 2 parts with all the possible byte offsets to + # locate the split so has to test for particular boundary cases + for mark in range(size): + f.seek(0) + X_0, y_0, q_0 = load_svmlight_file( + f, n_features=n_features, query_id=True, offset=0, length=mark + ) + X_1, y_1, q_1 = load_svmlight_file( + f, n_features=n_features, query_id=True, offset=mark, length=-1 + ) + q_concat = np.concatenate([q_0, q_1]) + y_concat = np.concatenate([y_0, y_1]) + X_concat = sp.vstack([X_0, X_1]) + assert_array_almost_equal(y, y_concat) + assert_array_equal(query_id, q_concat) + assert_array_almost_equal(X.toarray(), X_concat.toarray()) + + +def test_load_with_offsets_error(): + with pytest.raises(ValueError, match="n_features is required"): + _load_svmlight_local_test_file(datafile, offset=3, length=3) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_multilabel_y_explicit_zeros(tmp_path, csr_container): + """ + Ensure that if y contains explicit zeros (i.e. elements of y.data equal to + 0) then those explicit zeros are not encoded. + """ + save_path = str(tmp_path / "svm_explicit_zero") + rng = np.random.RandomState(42) + X = rng.randn(3, 5).astype(np.float64) + indptr = np.array([0, 2, 3, 6]) + indices = np.array([0, 2, 2, 0, 1, 2]) + # The first and last element are explicit zeros. + data = np.array([0, 1, 1, 1, 1, 0]) + y = csr_container((data, indices, indptr), shape=(3, 3)) + # y as a dense array would look like + # [[0, 0, 1], + # [0, 0, 1], + # [1, 1, 0]] + + dump_svmlight_file(X, y, save_path, multilabel=True) + + _, y_load = load_svmlight_file(save_path, multilabel=True) + y_true = [(2.0,), (2.0,), (0.0, 1.0)] + assert y_load == y_true + + +def test_dump_read_only(tmp_path): + """Ensure that there is no ValueError when dumping a read-only `X`. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/28026 + """ + rng = np.random.RandomState(42) + X = rng.randn(5, 2) + y = rng.randn(5) + + # Convert to memmap-backed which are read-only + X, y = create_memmap_backed_data([X, y]) + + save_path = str(tmp_path / "svm_read_only") + dump_svmlight_file(X, y, save_path)