diff --git a/ckpts/universal/global_step40/zero/19.mlp.dense_h_to_4h_swiglu.weight/fp32.pt b/ckpts/universal/global_step40/zero/19.mlp.dense_h_to_4h_swiglu.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..62b0fe3b0777cbb6b307f9238e5fda91023a50a0 --- /dev/null +++ b/ckpts/universal/global_step40/zero/19.mlp.dense_h_to_4h_swiglu.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a62e2b38d4b05a49cb79f73688c63fcd392d525e9680342e58032b19c6a08cf0 +size 33555533 diff --git a/ckpts/universal/global_step40/zero/25.input_layernorm.weight/exp_avg.pt b/ckpts/universal/global_step40/zero/25.input_layernorm.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..5d05a8d7f70c1e9265d225dae60e862fd7315a01 --- /dev/null +++ b/ckpts/universal/global_step40/zero/25.input_layernorm.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7eb69a0f83463d456b09d9f04406518c181fcd4dab9472d093e2bab259b7e895 +size 9372 diff --git a/ckpts/universal/global_step40/zero/25.input_layernorm.weight/fp32.pt b/ckpts/universal/global_step40/zero/25.input_layernorm.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..67e4e399faf79e7ea7961c61328b8bc3d84a7abb --- /dev/null +++ b/ckpts/universal/global_step40/zero/25.input_layernorm.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:134cf9a96301b45e9a70aad00df044ad3af56a473c6b15b634f1f381fb382427 +size 9293 diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/__init__.py b/venv/lib/python3.10/site-packages/sklearn/datasets/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..7ae7902f3365c6087c9c662f2c6d676c9a0e4254 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/datasets/__init__.py @@ -0,0 +1,162 @@ +""" +The :mod:`sklearn.datasets` module includes utilities to load datasets, +including methods to load and fetch popular reference datasets. It also +features some artificial data generators. +""" +import textwrap + +from ._base import ( + clear_data_home, + get_data_home, + load_breast_cancer, + load_diabetes, + load_digits, + load_files, + load_iris, + load_linnerud, + load_sample_image, + load_sample_images, + load_wine, +) +from ._california_housing import fetch_california_housing +from ._covtype import fetch_covtype +from ._kddcup99 import fetch_kddcup99 +from ._lfw import fetch_lfw_pairs, fetch_lfw_people +from ._olivetti_faces import fetch_olivetti_faces +from ._openml import fetch_openml +from ._rcv1 import fetch_rcv1 +from ._samples_generator import ( + make_biclusters, + make_blobs, + make_checkerboard, + make_circles, + make_classification, + make_friedman1, + make_friedman2, + make_friedman3, + make_gaussian_quantiles, + make_hastie_10_2, + make_low_rank_matrix, + make_moons, + make_multilabel_classification, + make_regression, + make_s_curve, + make_sparse_coded_signal, + make_sparse_spd_matrix, + make_sparse_uncorrelated, + make_spd_matrix, + make_swiss_roll, +) +from ._species_distributions import fetch_species_distributions +from ._svmlight_format_io import ( + dump_svmlight_file, + load_svmlight_file, + load_svmlight_files, +) +from ._twenty_newsgroups import fetch_20newsgroups, fetch_20newsgroups_vectorized + +__all__ = [ + "clear_data_home", + "dump_svmlight_file", + "fetch_20newsgroups", + "fetch_20newsgroups_vectorized", + "fetch_lfw_pairs", + "fetch_lfw_people", + "fetch_olivetti_faces", + "fetch_species_distributions", + "fetch_california_housing", + "fetch_covtype", + "fetch_rcv1", + "fetch_kddcup99", + "fetch_openml", + "get_data_home", + "load_diabetes", + "load_digits", + "load_files", + "load_iris", + "load_breast_cancer", + "load_linnerud", + "load_sample_image", + "load_sample_images", + "load_svmlight_file", + "load_svmlight_files", + "load_wine", + "make_biclusters", + "make_blobs", + "make_circles", + "make_classification", + "make_checkerboard", + "make_friedman1", + "make_friedman2", + "make_friedman3", + "make_gaussian_quantiles", + "make_hastie_10_2", + "make_low_rank_matrix", + "make_moons", + "make_multilabel_classification", + "make_regression", + "make_s_curve", + "make_sparse_coded_signal", + "make_sparse_spd_matrix", + "make_sparse_uncorrelated", + "make_spd_matrix", + "make_swiss_roll", +] + + +def __getattr__(name): + if name == "load_boston": + msg = textwrap.dedent(""" + `load_boston` has been removed from scikit-learn since version 1.2. + + The Boston housing prices dataset has an ethical problem: as + investigated in [1], the authors of this dataset engineered a + non-invertible variable "B" assuming that racial self-segregation had a + positive impact on house prices [2]. Furthermore the goal of the + research that led to the creation of this dataset was to study the + impact of air quality but it did not give adequate demonstration of the + validity of this assumption. + + The scikit-learn maintainers therefore strongly discourage the use of + this dataset unless the purpose of the code is to study and educate + about ethical issues in data science and machine learning. + + In this special case, you can fetch the dataset from the original + source:: + + import pandas as pd + import numpy as np + + data_url = "http://lib.stat.cmu.edu/datasets/boston" + raw_df = pd.read_csv(data_url, sep="\\s+", skiprows=22, header=None) + data = np.hstack([raw_df.values[::2, :], raw_df.values[1::2, :2]]) + target = raw_df.values[1::2, 2] + + Alternative datasets include the California housing dataset and the + Ames housing dataset. You can load the datasets as follows:: + + from sklearn.datasets import fetch_california_housing + housing = fetch_california_housing() + + for the California housing dataset and:: + + from sklearn.datasets import fetch_openml + housing = fetch_openml(name="house_prices", as_frame=True) + + for the Ames housing dataset. + + [1] M Carlisle. + "Racist data destruction?" + + + [2] Harrison Jr, David, and Daniel L. Rubinfeld. + "Hedonic housing prices and the demand for clean air." + Journal of environmental economics and management 5.1 (1978): 81-102. + + """) + raise ImportError(msg) + try: + return globals()[name] + except KeyError: + # This is turned into the appropriate ImportError + raise AttributeError diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/_california_housing.py b/venv/lib/python3.10/site-packages/sklearn/datasets/_california_housing.py new file mode 100644 index 0000000000000000000000000000000000000000..a8a889fa8ce1de1a84697e64ca999b385e878d50 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/datasets/_california_housing.py @@ -0,0 +1,223 @@ +"""California housing dataset. + +The original database is available from StatLib + + http://lib.stat.cmu.edu/datasets/ + +The data contains 20,640 observations on 9 variables. + +This dataset contains the average house value as target variable +and the following input variables (features): average income, +housing average age, average rooms, average bedrooms, population, +average occupation, latitude, and longitude in that order. + +References +---------- + +Pace, R. Kelley and Ronald Barry, Sparse Spatial Autoregressions, +Statistics and Probability Letters, 33 (1997) 291-297. + +""" +# Authors: Peter Prettenhofer +# License: BSD 3 clause + +import logging +import tarfile +from os import PathLike, makedirs, remove +from os.path import exists + +import joblib +import numpy as np + +from ..utils import Bunch +from ..utils._param_validation import validate_params +from . import get_data_home +from ._base import ( + RemoteFileMetadata, + _convert_data_dataframe, + _fetch_remote, + _pkl_filepath, + load_descr, +) + +# The original data can be found at: +# https://www.dcc.fc.up.pt/~ltorgo/Regression/cal_housing.tgz +ARCHIVE = RemoteFileMetadata( + filename="cal_housing.tgz", + url="https://ndownloader.figshare.com/files/5976036", + checksum="aaa5c9a6afe2225cc2aed2723682ae403280c4a3695a2ddda4ffb5d8215ea681", +) + +logger = logging.getLogger(__name__) + + +@validate_params( + { + "data_home": [str, PathLike, None], + "download_if_missing": ["boolean"], + "return_X_y": ["boolean"], + "as_frame": ["boolean"], + }, + prefer_skip_nested_validation=True, +) +def fetch_california_housing( + *, data_home=None, download_if_missing=True, return_X_y=False, as_frame=False +): + """Load the California housing dataset (regression). + + ============== ============== + Samples total 20640 + Dimensionality 8 + Features real + Target real 0.15 - 5. + ============== ============== + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + data_home : str or path-like, default=None + Specify another download and cache folder for the datasets. By default + all scikit-learn data is stored in '~/scikit_learn_data' subfolders. + + download_if_missing : bool, default=True + If False, raise an OSError if the data is not locally available + instead of trying to download the data from the source site. + + return_X_y : bool, default=False + If True, returns ``(data.data, data.target)`` instead of a Bunch + object. + + .. versionadded:: 0.20 + + as_frame : bool, default=False + If True, the data is a pandas DataFrame including columns with + appropriate dtypes (numeric, string or categorical). The target is + a pandas DataFrame or Series depending on the number of target_columns. + + .. versionadded:: 0.23 + + Returns + ------- + dataset : :class:`~sklearn.utils.Bunch` + Dictionary-like object, with the following attributes. + + data : ndarray, shape (20640, 8) + Each row corresponding to the 8 feature values in order. + If ``as_frame`` is True, ``data`` is a pandas object. + target : numpy array of shape (20640,) + Each value corresponds to the average + house value in units of 100,000. + If ``as_frame`` is True, ``target`` is a pandas object. + feature_names : list of length 8 + Array of ordered feature names used in the dataset. + DESCR : str + Description of the California housing dataset. + frame : pandas DataFrame + Only present when `as_frame=True`. DataFrame with ``data`` and + ``target``. + + .. versionadded:: 0.23 + + (data, target) : tuple if ``return_X_y`` is True + A tuple of two ndarray. The first containing a 2D array of + shape (n_samples, n_features) with each row representing one + sample and each column representing the features. The second + ndarray of shape (n_samples,) containing the target samples. + + .. versionadded:: 0.20 + + Notes + ----- + + This dataset consists of 20,640 samples and 9 features. + + Examples + -------- + >>> from sklearn.datasets import fetch_california_housing + >>> housing = fetch_california_housing() + >>> print(housing.data.shape, housing.target.shape) + (20640, 8) (20640,) + >>> print(housing.feature_names[0:6]) + ['MedInc', 'HouseAge', 'AveRooms', 'AveBedrms', 'Population', 'AveOccup'] + """ + data_home = get_data_home(data_home=data_home) + if not exists(data_home): + makedirs(data_home) + + filepath = _pkl_filepath(data_home, "cal_housing.pkz") + if not exists(filepath): + if not download_if_missing: + raise OSError("Data not found and `download_if_missing` is False") + + logger.info( + "Downloading Cal. housing from {} to {}".format(ARCHIVE.url, data_home) + ) + + archive_path = _fetch_remote(ARCHIVE, dirname=data_home) + + with tarfile.open(mode="r:gz", name=archive_path) as f: + cal_housing = np.loadtxt( + f.extractfile("CaliforniaHousing/cal_housing.data"), delimiter="," + ) + # Columns are not in the same order compared to the previous + # URL resource on lib.stat.cmu.edu + columns_index = [8, 7, 2, 3, 4, 5, 6, 1, 0] + cal_housing = cal_housing[:, columns_index] + + joblib.dump(cal_housing, filepath, compress=6) + remove(archive_path) + + else: + cal_housing = joblib.load(filepath) + + feature_names = [ + "MedInc", + "HouseAge", + "AveRooms", + "AveBedrms", + "Population", + "AveOccup", + "Latitude", + "Longitude", + ] + + target, data = cal_housing[:, 0], cal_housing[:, 1:] + + # avg rooms = total rooms / households + data[:, 2] /= data[:, 5] + + # avg bed rooms = total bed rooms / households + data[:, 3] /= data[:, 5] + + # avg occupancy = population / households + data[:, 5] = data[:, 4] / data[:, 5] + + # target in units of 100,000 + target = target / 100000.0 + + descr = load_descr("california_housing.rst") + + X = data + y = target + + frame = None + target_names = [ + "MedHouseVal", + ] + if as_frame: + frame, X, y = _convert_data_dataframe( + "fetch_california_housing", data, target, feature_names, target_names + ) + + if return_X_y: + return X, y + + return Bunch( + data=X, + target=y, + frame=frame, + target_names=target_names, + feature_names=feature_names, + DESCR=descr, + ) diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/_kddcup99.py b/venv/lib/python3.10/site-packages/sklearn/datasets/_kddcup99.py new file mode 100644 index 0000000000000000000000000000000000000000..444bd01737901f0b2fa791c2f7e80b3762d40dc0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/datasets/_kddcup99.py @@ -0,0 +1,401 @@ +"""KDDCUP 99 dataset. + +A classic dataset for anomaly detection. + +The dataset page is available from UCI Machine Learning Repository + +https://archive.ics.uci.edu/ml/machine-learning-databases/kddcup99-mld/kddcup.data.gz + +""" + +import errno +import logging +import os +from gzip import GzipFile +from os.path import exists, join + +import joblib +import numpy as np + +from ..utils import Bunch, check_random_state +from ..utils import shuffle as shuffle_method +from ..utils._param_validation import StrOptions, validate_params +from . import get_data_home +from ._base import ( + RemoteFileMetadata, + _convert_data_dataframe, + _fetch_remote, + load_descr, +) + +# The original data can be found at: +# https://archive.ics.uci.edu/ml/machine-learning-databases/kddcup99-mld/kddcup.data.gz +ARCHIVE = RemoteFileMetadata( + filename="kddcup99_data", + url="https://ndownloader.figshare.com/files/5976045", + checksum="3b6c942aa0356c0ca35b7b595a26c89d343652c9db428893e7494f837b274292", +) + +# The original data can be found at: +# https://archive.ics.uci.edu/ml/machine-learning-databases/kddcup99-mld/kddcup.data_10_percent.gz +ARCHIVE_10_PERCENT = RemoteFileMetadata( + filename="kddcup99_10_data", + url="https://ndownloader.figshare.com/files/5976042", + checksum="8045aca0d84e70e622d1148d7df782496f6333bf6eb979a1b0837c42a9fd9561", +) + +logger = logging.getLogger(__name__) + + +@validate_params( + { + "subset": [StrOptions({"SA", "SF", "http", "smtp"}), None], + "data_home": [str, os.PathLike, None], + "shuffle": ["boolean"], + "random_state": ["random_state"], + "percent10": ["boolean"], + "download_if_missing": ["boolean"], + "return_X_y": ["boolean"], + "as_frame": ["boolean"], + }, + prefer_skip_nested_validation=True, +) +def fetch_kddcup99( + *, + subset=None, + data_home=None, + shuffle=False, + random_state=None, + percent10=True, + download_if_missing=True, + return_X_y=False, + as_frame=False, +): + """Load the kddcup99 dataset (classification). + + Download it if necessary. + + ================= ==================================== + Classes 23 + Samples total 4898431 + Dimensionality 41 + Features discrete (int) or continuous (float) + ================= ==================================== + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.18 + + Parameters + ---------- + subset : {'SA', 'SF', 'http', 'smtp'}, default=None + To return the corresponding classical subsets of kddcup 99. + If None, return the entire kddcup 99 dataset. + + data_home : str or path-like, default=None + Specify another download and cache folder for the datasets. By default + all scikit-learn data is stored in '~/scikit_learn_data' subfolders. + + .. versionadded:: 0.19 + + shuffle : bool, default=False + Whether to shuffle dataset. + + random_state : int, RandomState instance or None, default=None + Determines random number generation for dataset shuffling and for + selection of abnormal samples if `subset='SA'`. Pass an int for + reproducible output across multiple function calls. + See :term:`Glossary `. + + percent10 : bool, default=True + Whether to load only 10 percent of the data. + + download_if_missing : bool, default=True + If False, raise an OSError if the data is not locally available + instead of trying to download the data from the source site. + + return_X_y : bool, default=False + If True, returns ``(data, target)`` instead of a Bunch object. See + below for more information about the `data` and `target` object. + + .. versionadded:: 0.20 + + as_frame : bool, default=False + If `True`, returns a pandas Dataframe for the ``data`` and ``target`` + objects in the `Bunch` returned object; `Bunch` return object will also + have a ``frame`` member. + + .. versionadded:: 0.24 + + Returns + ------- + data : :class:`~sklearn.utils.Bunch` + Dictionary-like object, with the following attributes. + + data : {ndarray, dataframe} of shape (494021, 41) + The data matrix to learn. If `as_frame=True`, `data` will be a + pandas DataFrame. + target : {ndarray, series} of shape (494021,) + The regression target for each sample. If `as_frame=True`, `target` + will be a pandas Series. + frame : dataframe of shape (494021, 42) + Only present when `as_frame=True`. Contains `data` and `target`. + DESCR : str + The full description of the dataset. + feature_names : list + The names of the dataset columns + target_names: list + The names of the target columns + + (data, target) : tuple if ``return_X_y`` is True + A tuple of two ndarray. The first containing a 2D array of + shape (n_samples, n_features) with each row representing one + sample and each column representing the features. The second + ndarray of shape (n_samples,) containing the target samples. + + .. versionadded:: 0.20 + """ + data_home = get_data_home(data_home=data_home) + kddcup99 = _fetch_brute_kddcup99( + data_home=data_home, + percent10=percent10, + download_if_missing=download_if_missing, + ) + + data = kddcup99.data + target = kddcup99.target + feature_names = kddcup99.feature_names + target_names = kddcup99.target_names + + if subset == "SA": + s = target == b"normal." + t = np.logical_not(s) + normal_samples = data[s, :] + normal_targets = target[s] + abnormal_samples = data[t, :] + abnormal_targets = target[t] + + n_samples_abnormal = abnormal_samples.shape[0] + # selected abnormal samples: + random_state = check_random_state(random_state) + r = random_state.randint(0, n_samples_abnormal, 3377) + abnormal_samples = abnormal_samples[r] + abnormal_targets = abnormal_targets[r] + + data = np.r_[normal_samples, abnormal_samples] + target = np.r_[normal_targets, abnormal_targets] + + if subset == "SF" or subset == "http" or subset == "smtp": + # select all samples with positive logged_in attribute: + s = data[:, 11] == 1 + data = np.c_[data[s, :11], data[s, 12:]] + feature_names = feature_names[:11] + feature_names[12:] + target = target[s] + + data[:, 0] = np.log((data[:, 0] + 0.1).astype(float, copy=False)) + data[:, 4] = np.log((data[:, 4] + 0.1).astype(float, copy=False)) + data[:, 5] = np.log((data[:, 5] + 0.1).astype(float, copy=False)) + + if subset == "http": + s = data[:, 2] == b"http" + data = data[s] + target = target[s] + data = np.c_[data[:, 0], data[:, 4], data[:, 5]] + feature_names = [feature_names[0], feature_names[4], feature_names[5]] + + if subset == "smtp": + s = data[:, 2] == b"smtp" + data = data[s] + target = target[s] + data = np.c_[data[:, 0], data[:, 4], data[:, 5]] + feature_names = [feature_names[0], feature_names[4], feature_names[5]] + + if subset == "SF": + data = np.c_[data[:, 0], data[:, 2], data[:, 4], data[:, 5]] + feature_names = [ + feature_names[0], + feature_names[2], + feature_names[4], + feature_names[5], + ] + + if shuffle: + data, target = shuffle_method(data, target, random_state=random_state) + + fdescr = load_descr("kddcup99.rst") + + frame = None + if as_frame: + frame, data, target = _convert_data_dataframe( + "fetch_kddcup99", data, target, feature_names, target_names + ) + + if return_X_y: + return data, target + + return Bunch( + data=data, + target=target, + frame=frame, + target_names=target_names, + feature_names=feature_names, + DESCR=fdescr, + ) + + +def _fetch_brute_kddcup99(data_home=None, download_if_missing=True, percent10=True): + """Load the kddcup99 dataset, downloading it if necessary. + + Parameters + ---------- + data_home : str, default=None + Specify another download and cache folder for the datasets. By default + all scikit-learn data is stored in '~/scikit_learn_data' subfolders. + + download_if_missing : bool, default=True + If False, raise an OSError if the data is not locally available + instead of trying to download the data from the source site. + + percent10 : bool, default=True + Whether to load only 10 percent of the data. + + Returns + ------- + dataset : :class:`~sklearn.utils.Bunch` + Dictionary-like object, with the following attributes. + + data : ndarray of shape (494021, 41) + Each row corresponds to the 41 features in the dataset. + target : ndarray of shape (494021,) + Each value corresponds to one of the 21 attack types or to the + label 'normal.'. + feature_names : list + The names of the dataset columns + target_names: list + The names of the target columns + DESCR : str + Description of the kddcup99 dataset. + + """ + + data_home = get_data_home(data_home=data_home) + dir_suffix = "-py3" + + if percent10: + kddcup_dir = join(data_home, "kddcup99_10" + dir_suffix) + archive = ARCHIVE_10_PERCENT + else: + kddcup_dir = join(data_home, "kddcup99" + dir_suffix) + archive = ARCHIVE + + samples_path = join(kddcup_dir, "samples") + targets_path = join(kddcup_dir, "targets") + available = exists(samples_path) + + dt = [ + ("duration", int), + ("protocol_type", "S4"), + ("service", "S11"), + ("flag", "S6"), + ("src_bytes", int), + ("dst_bytes", int), + ("land", int), + ("wrong_fragment", int), + ("urgent", int), + ("hot", int), + ("num_failed_logins", int), + ("logged_in", int), + ("num_compromised", int), + ("root_shell", int), + ("su_attempted", int), + ("num_root", int), + ("num_file_creations", int), + ("num_shells", int), + ("num_access_files", int), + ("num_outbound_cmds", int), + ("is_host_login", int), + ("is_guest_login", int), + ("count", int), + ("srv_count", int), + ("serror_rate", float), + ("srv_serror_rate", float), + ("rerror_rate", float), + ("srv_rerror_rate", float), + ("same_srv_rate", float), + ("diff_srv_rate", float), + ("srv_diff_host_rate", float), + ("dst_host_count", int), + ("dst_host_srv_count", int), + ("dst_host_same_srv_rate", float), + ("dst_host_diff_srv_rate", float), + ("dst_host_same_src_port_rate", float), + ("dst_host_srv_diff_host_rate", float), + ("dst_host_serror_rate", float), + ("dst_host_srv_serror_rate", float), + ("dst_host_rerror_rate", float), + ("dst_host_srv_rerror_rate", float), + ("labels", "S16"), + ] + + column_names = [c[0] for c in dt] + target_names = column_names[-1] + feature_names = column_names[:-1] + + if available: + try: + X = joblib.load(samples_path) + y = joblib.load(targets_path) + except Exception as e: + raise OSError( + "The cache for fetch_kddcup99 is invalid, please delete " + f"{str(kddcup_dir)} and run the fetch_kddcup99 again" + ) from e + + elif download_if_missing: + _mkdirp(kddcup_dir) + logger.info("Downloading %s" % archive.url) + _fetch_remote(archive, dirname=kddcup_dir) + DT = np.dtype(dt) + logger.debug("extracting archive") + archive_path = join(kddcup_dir, archive.filename) + file_ = GzipFile(filename=archive_path, mode="r") + Xy = [] + for line in file_.readlines(): + line = line.decode() + Xy.append(line.replace("\n", "").split(",")) + file_.close() + logger.debug("extraction done") + os.remove(archive_path) + + Xy = np.asarray(Xy, dtype=object) + for j in range(42): + Xy[:, j] = Xy[:, j].astype(DT[j]) + + X = Xy[:, :-1] + y = Xy[:, -1] + # XXX bug when compress!=0: + # (error: 'Incorrect data length while decompressing[...] the file + # could be corrupted.') + + joblib.dump(X, samples_path, compress=0) + joblib.dump(y, targets_path, compress=0) + else: + raise OSError("Data not found and `download_if_missing` is False") + + return Bunch( + data=X, + target=y, + feature_names=feature_names, + target_names=[target_names], + ) + + +def _mkdirp(d): + """Ensure directory d exists (like mkdir -p on Unix) + No guarantee that the directory is writable. + """ + try: + os.makedirs(d) + except OSError as e: + if e.errno != errno.EEXIST: + raise diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/_lfw.py b/venv/lib/python3.10/site-packages/sklearn/datasets/_lfw.py new file mode 100644 index 0000000000000000000000000000000000000000..d06d29f21d0a5c79b96fc65eb6c998e6a74a67b3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/datasets/_lfw.py @@ -0,0 +1,570 @@ +"""Labeled Faces in the Wild (LFW) dataset + +This dataset is a collection of JPEG pictures of famous people collected +over the internet, all details are available on the official website: + + http://vis-www.cs.umass.edu/lfw/ +""" +# Copyright (c) 2011 Olivier Grisel +# License: BSD 3 clause + +import logging +from numbers import Integral, Real +from os import PathLike, listdir, makedirs, remove +from os.path import exists, isdir, join + +import numpy as np +from joblib import Memory + +from ..utils import Bunch +from ..utils._param_validation import Hidden, Interval, StrOptions, validate_params +from ._base import ( + RemoteFileMetadata, + _fetch_remote, + get_data_home, + load_descr, +) + +logger = logging.getLogger(__name__) + +# The original data can be found in: +# http://vis-www.cs.umass.edu/lfw/lfw.tgz +ARCHIVE = RemoteFileMetadata( + filename="lfw.tgz", + url="https://ndownloader.figshare.com/files/5976018", + checksum="055f7d9c632d7370e6fb4afc7468d40f970c34a80d4c6f50ffec63f5a8d536c0", +) + +# The original funneled data can be found in: +# http://vis-www.cs.umass.edu/lfw/lfw-funneled.tgz +FUNNELED_ARCHIVE = RemoteFileMetadata( + filename="lfw-funneled.tgz", + url="https://ndownloader.figshare.com/files/5976015", + checksum="b47c8422c8cded889dc5a13418c4bc2abbda121092b3533a83306f90d900100a", +) + +# The original target data can be found in: +# http://vis-www.cs.umass.edu/lfw/pairsDevTrain.txt', +# http://vis-www.cs.umass.edu/lfw/pairsDevTest.txt', +# http://vis-www.cs.umass.edu/lfw/pairs.txt', +TARGETS = ( + RemoteFileMetadata( + filename="pairsDevTrain.txt", + url="https://ndownloader.figshare.com/files/5976012", + checksum="1d454dada7dfeca0e7eab6f65dc4e97a6312d44cf142207be28d688be92aabfa", + ), + RemoteFileMetadata( + filename="pairsDevTest.txt", + url="https://ndownloader.figshare.com/files/5976009", + checksum="7cb06600ea8b2814ac26e946201cdb304296262aad67d046a16a7ec85d0ff87c", + ), + RemoteFileMetadata( + filename="pairs.txt", + url="https://ndownloader.figshare.com/files/5976006", + checksum="ea42330c62c92989f9d7c03237ed5d591365e89b3e649747777b70e692dc1592", + ), +) + + +# +# Common private utilities for data fetching from the original LFW website +# local disk caching, and image decoding. +# + + +def _check_fetch_lfw(data_home=None, funneled=True, download_if_missing=True): + """Helper function to download any missing LFW data""" + + data_home = get_data_home(data_home=data_home) + lfw_home = join(data_home, "lfw_home") + + if not exists(lfw_home): + makedirs(lfw_home) + + for target in TARGETS: + target_filepath = join(lfw_home, target.filename) + if not exists(target_filepath): + if download_if_missing: + logger.info("Downloading LFW metadata: %s", target.url) + _fetch_remote(target, dirname=lfw_home) + else: + raise OSError("%s is missing" % target_filepath) + + if funneled: + data_folder_path = join(lfw_home, "lfw_funneled") + archive = FUNNELED_ARCHIVE + else: + data_folder_path = join(lfw_home, "lfw") + archive = ARCHIVE + + if not exists(data_folder_path): + archive_path = join(lfw_home, archive.filename) + if not exists(archive_path): + if download_if_missing: + logger.info("Downloading LFW data (~200MB): %s", archive.url) + _fetch_remote(archive, dirname=lfw_home) + else: + raise OSError("%s is missing" % archive_path) + + import tarfile + + logger.debug("Decompressing the data archive to %s", data_folder_path) + tarfile.open(archive_path, "r:gz").extractall(path=lfw_home) + remove(archive_path) + + return lfw_home, data_folder_path + + +def _load_imgs(file_paths, slice_, color, resize): + """Internally used to load images""" + try: + from PIL import Image + except ImportError: + raise ImportError( + "The Python Imaging Library (PIL) is required to load data " + "from jpeg files. Please refer to " + "https://pillow.readthedocs.io/en/stable/installation.html " + "for installing PIL." + ) + + # compute the portion of the images to load to respect the slice_ parameter + # given by the caller + default_slice = (slice(0, 250), slice(0, 250)) + if slice_ is None: + slice_ = default_slice + else: + slice_ = tuple(s or ds for s, ds in zip(slice_, default_slice)) + + h_slice, w_slice = slice_ + h = (h_slice.stop - h_slice.start) // (h_slice.step or 1) + w = (w_slice.stop - w_slice.start) // (w_slice.step or 1) + + if resize is not None: + resize = float(resize) + h = int(resize * h) + w = int(resize * w) + + # allocate some contiguous memory to host the decoded image slices + n_faces = len(file_paths) + if not color: + faces = np.zeros((n_faces, h, w), dtype=np.float32) + else: + faces = np.zeros((n_faces, h, w, 3), dtype=np.float32) + + # iterate over the collected file path to load the jpeg files as numpy + # arrays + for i, file_path in enumerate(file_paths): + if i % 1000 == 0: + logger.debug("Loading face #%05d / %05d", i + 1, n_faces) + + # Checks if jpeg reading worked. Refer to issue #3594 for more + # details. + pil_img = Image.open(file_path) + pil_img = pil_img.crop( + (w_slice.start, h_slice.start, w_slice.stop, h_slice.stop) + ) + if resize is not None: + pil_img = pil_img.resize((w, h)) + face = np.asarray(pil_img, dtype=np.float32) + + if face.ndim == 0: + raise RuntimeError( + "Failed to read the image file %s, " + "Please make sure that libjpeg is installed" % file_path + ) + + face /= 255.0 # scale uint8 coded colors to the [0.0, 1.0] floats + if not color: + # average the color channels to compute a gray levels + # representation + face = face.mean(axis=2) + + faces[i, ...] = face + + return faces + + +# +# Task #1: Face Identification on picture with names +# + + +def _fetch_lfw_people( + data_folder_path, slice_=None, color=False, resize=None, min_faces_per_person=0 +): + """Perform the actual data loading for the lfw people dataset + + This operation is meant to be cached by a joblib wrapper. + """ + # scan the data folder content to retain people with more that + # `min_faces_per_person` face pictures + person_names, file_paths = [], [] + for person_name in sorted(listdir(data_folder_path)): + folder_path = join(data_folder_path, person_name) + if not isdir(folder_path): + continue + paths = [join(folder_path, f) for f in sorted(listdir(folder_path))] + n_pictures = len(paths) + if n_pictures >= min_faces_per_person: + person_name = person_name.replace("_", " ") + person_names.extend([person_name] * n_pictures) + file_paths.extend(paths) + + n_faces = len(file_paths) + if n_faces == 0: + raise ValueError( + "min_faces_per_person=%d is too restrictive" % min_faces_per_person + ) + + target_names = np.unique(person_names) + target = np.searchsorted(target_names, person_names) + + faces = _load_imgs(file_paths, slice_, color, resize) + + # shuffle the faces with a deterministic RNG scheme to avoid having + # all faces of the same person in a row, as it would break some + # cross validation and learning algorithms such as SGD and online + # k-means that make an IID assumption + + indices = np.arange(n_faces) + np.random.RandomState(42).shuffle(indices) + faces, target = faces[indices], target[indices] + return faces, target, target_names + + +@validate_params( + { + "data_home": [str, PathLike, None], + "funneled": ["boolean"], + "resize": [Interval(Real, 0, None, closed="neither"), None], + "min_faces_per_person": [Interval(Integral, 0, None, closed="left"), None], + "color": ["boolean"], + "slice_": [tuple, Hidden(None)], + "download_if_missing": ["boolean"], + "return_X_y": ["boolean"], + }, + prefer_skip_nested_validation=True, +) +def fetch_lfw_people( + *, + data_home=None, + funneled=True, + resize=0.5, + min_faces_per_person=0, + color=False, + slice_=(slice(70, 195), slice(78, 172)), + download_if_missing=True, + return_X_y=False, +): + """Load the Labeled Faces in the Wild (LFW) people dataset \ +(classification). + + Download it if necessary. + + ================= ======================= + Classes 5749 + Samples total 13233 + Dimensionality 5828 + Features real, between 0 and 255 + ================= ======================= + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + data_home : str or path-like, default=None + Specify another download and cache folder for the datasets. By default + all scikit-learn data is stored in '~/scikit_learn_data' subfolders. + + funneled : bool, default=True + Download and use the funneled variant of the dataset. + + resize : float or None, default=0.5 + Ratio used to resize the each face picture. If `None`, no resizing is + performed. + + min_faces_per_person : int, default=None + The extracted dataset will only retain pictures of people that have at + least `min_faces_per_person` different pictures. + + color : bool, default=False + Keep the 3 RGB channels instead of averaging them to a single + gray level channel. If color is True the shape of the data has + one more dimension than the shape with color = False. + + slice_ : tuple of slice, default=(slice(70, 195), slice(78, 172)) + Provide a custom 2D slice (height, width) to extract the + 'interesting' part of the jpeg files and avoid use statistical + correlation from the background. + + download_if_missing : bool, default=True + If False, raise an OSError if the data is not locally available + instead of trying to download the data from the source site. + + return_X_y : bool, default=False + If True, returns ``(dataset.data, dataset.target)`` instead of a Bunch + object. See below for more information about the `dataset.data` and + `dataset.target` object. + + .. versionadded:: 0.20 + + Returns + ------- + dataset : :class:`~sklearn.utils.Bunch` + Dictionary-like object, with the following attributes. + + data : numpy array of shape (13233, 2914) + Each row corresponds to a ravelled face image + of original size 62 x 47 pixels. + Changing the ``slice_`` or resize parameters will change the + shape of the output. + images : numpy array of shape (13233, 62, 47) + Each row is a face image corresponding to one of the 5749 people in + the dataset. Changing the ``slice_`` + or resize parameters will change the shape of the output. + target : numpy array of shape (13233,) + Labels associated to each face image. + Those labels range from 0-5748 and correspond to the person IDs. + target_names : numpy array of shape (5749,) + Names of all persons in the dataset. + Position in array corresponds to the person ID in the target array. + DESCR : str + Description of the Labeled Faces in the Wild (LFW) dataset. + + (data, target) : tuple if ``return_X_y`` is True + A tuple of two ndarray. The first containing a 2D array of + shape (n_samples, n_features) with each row representing one + sample and each column representing the features. The second + ndarray of shape (n_samples,) containing the target samples. + + .. versionadded:: 0.20 + """ + lfw_home, data_folder_path = _check_fetch_lfw( + data_home=data_home, funneled=funneled, download_if_missing=download_if_missing + ) + logger.debug("Loading LFW people faces from %s", lfw_home) + + # wrap the loader in a memoizing function that will return memmaped data + # arrays for optimal memory usage + m = Memory(location=lfw_home, compress=6, verbose=0) + load_func = m.cache(_fetch_lfw_people) + + # load and memoize the pairs as np arrays + faces, target, target_names = load_func( + data_folder_path, + resize=resize, + min_faces_per_person=min_faces_per_person, + color=color, + slice_=slice_, + ) + + X = faces.reshape(len(faces), -1) + + fdescr = load_descr("lfw.rst") + + if return_X_y: + return X, target + + # pack the results as a Bunch instance + return Bunch( + data=X, images=faces, target=target, target_names=target_names, DESCR=fdescr + ) + + +# +# Task #2: Face Verification on pairs of face pictures +# + + +def _fetch_lfw_pairs( + index_file_path, data_folder_path, slice_=None, color=False, resize=None +): + """Perform the actual data loading for the LFW pairs dataset + + This operation is meant to be cached by a joblib wrapper. + """ + # parse the index file to find the number of pairs to be able to allocate + # the right amount of memory before starting to decode the jpeg files + with open(index_file_path, "rb") as index_file: + split_lines = [ln.decode().strip().split("\t") for ln in index_file] + pair_specs = [sl for sl in split_lines if len(sl) > 2] + n_pairs = len(pair_specs) + + # iterating over the metadata lines for each pair to find the filename to + # decode and load in memory + target = np.zeros(n_pairs, dtype=int) + file_paths = list() + for i, components in enumerate(pair_specs): + if len(components) == 3: + target[i] = 1 + pair = ( + (components[0], int(components[1]) - 1), + (components[0], int(components[2]) - 1), + ) + elif len(components) == 4: + target[i] = 0 + pair = ( + (components[0], int(components[1]) - 1), + (components[2], int(components[3]) - 1), + ) + else: + raise ValueError("invalid line %d: %r" % (i + 1, components)) + for j, (name, idx) in enumerate(pair): + try: + person_folder = join(data_folder_path, name) + except TypeError: + person_folder = join(data_folder_path, str(name, "UTF-8")) + filenames = list(sorted(listdir(person_folder))) + file_path = join(person_folder, filenames[idx]) + file_paths.append(file_path) + + pairs = _load_imgs(file_paths, slice_, color, resize) + shape = list(pairs.shape) + n_faces = shape.pop(0) + shape.insert(0, 2) + shape.insert(0, n_faces // 2) + pairs.shape = shape + + return pairs, target, np.array(["Different persons", "Same person"]) + + +@validate_params( + { + "subset": [StrOptions({"train", "test", "10_folds"})], + "data_home": [str, PathLike, None], + "funneled": ["boolean"], + "resize": [Interval(Real, 0, None, closed="neither"), None], + "color": ["boolean"], + "slice_": [tuple, Hidden(None)], + "download_if_missing": ["boolean"], + }, + prefer_skip_nested_validation=True, +) +def fetch_lfw_pairs( + *, + subset="train", + data_home=None, + funneled=True, + resize=0.5, + color=False, + slice_=(slice(70, 195), slice(78, 172)), + download_if_missing=True, +): + """Load the Labeled Faces in the Wild (LFW) pairs dataset (classification). + + Download it if necessary. + + ================= ======================= + Classes 2 + Samples total 13233 + Dimensionality 5828 + Features real, between 0 and 255 + ================= ======================= + + In the official `README.txt`_ this task is described as the + "Restricted" task. As I am not sure as to implement the + "Unrestricted" variant correctly, I left it as unsupported for now. + + .. _`README.txt`: http://vis-www.cs.umass.edu/lfw/README.txt + + The original images are 250 x 250 pixels, but the default slice and resize + arguments reduce them to 62 x 47. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + subset : {'train', 'test', '10_folds'}, default='train' + Select the dataset to load: 'train' for the development training + set, 'test' for the development test set, and '10_folds' for the + official evaluation set that is meant to be used with a 10-folds + cross validation. + + data_home : str or path-like, default=None + Specify another download and cache folder for the datasets. By + default all scikit-learn data is stored in '~/scikit_learn_data' + subfolders. + + funneled : bool, default=True + Download and use the funneled variant of the dataset. + + resize : float, default=0.5 + Ratio used to resize the each face picture. + + color : bool, default=False + Keep the 3 RGB channels instead of averaging them to a single + gray level channel. If color is True the shape of the data has + one more dimension than the shape with color = False. + + slice_ : tuple of slice, default=(slice(70, 195), slice(78, 172)) + Provide a custom 2D slice (height, width) to extract the + 'interesting' part of the jpeg files and avoid use statistical + correlation from the background. + + download_if_missing : bool, default=True + If False, raise an OSError if the data is not locally available + instead of trying to download the data from the source site. + + Returns + ------- + data : :class:`~sklearn.utils.Bunch` + Dictionary-like object, with the following attributes. + + data : ndarray of shape (2200, 5828). Shape depends on ``subset``. + Each row corresponds to 2 ravel'd face images + of original size 62 x 47 pixels. + Changing the ``slice_``, ``resize`` or ``subset`` parameters + will change the shape of the output. + pairs : ndarray of shape (2200, 2, 62, 47). Shape depends on ``subset`` + Each row has 2 face images corresponding + to same or different person from the dataset + containing 5749 people. Changing the ``slice_``, + ``resize`` or ``subset`` parameters will change the shape of the + output. + target : numpy array of shape (2200,). Shape depends on ``subset``. + Labels associated to each pair of images. + The two label values being different persons or the same person. + target_names : numpy array of shape (2,) + Explains the target values of the target array. + 0 corresponds to "Different person", 1 corresponds to "same person". + DESCR : str + Description of the Labeled Faces in the Wild (LFW) dataset. + """ + lfw_home, data_folder_path = _check_fetch_lfw( + data_home=data_home, funneled=funneled, download_if_missing=download_if_missing + ) + logger.debug("Loading %s LFW pairs from %s", subset, lfw_home) + + # wrap the loader in a memoizing function that will return memmaped data + # arrays for optimal memory usage + m = Memory(location=lfw_home, compress=6, verbose=0) + load_func = m.cache(_fetch_lfw_pairs) + + # select the right metadata file according to the requested subset + label_filenames = { + "train": "pairsDevTrain.txt", + "test": "pairsDevTest.txt", + "10_folds": "pairs.txt", + } + if subset not in label_filenames: + raise ValueError( + "subset='%s' is invalid: should be one of %r" + % (subset, list(sorted(label_filenames.keys()))) + ) + index_file_path = join(lfw_home, label_filenames[subset]) + + # load and memoize the pairs as np arrays + pairs, target, target_names = load_func( + index_file_path, data_folder_path, resize=resize, color=color, slice_=slice_ + ) + + fdescr = load_descr("lfw.rst") + + # pack the results as a Bunch instance + return Bunch( + data=pairs.reshape(len(pairs), -1), + pairs=pairs, + target=target, + target_names=target_names, + DESCR=fdescr, + ) diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/_rcv1.py b/venv/lib/python3.10/site-packages/sklearn/datasets/_rcv1.py new file mode 100644 index 0000000000000000000000000000000000000000..d9f392d872216d7b420d8ddf34ffdd71db6527e1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/datasets/_rcv1.py @@ -0,0 +1,306 @@ +"""RCV1 dataset. + +The dataset page is available at + + http://jmlr.csail.mit.edu/papers/volume5/lewis04a/ +""" + +# Author: Tom Dupre la Tour +# License: BSD 3 clause + +import logging +from gzip import GzipFile +from os import PathLike, makedirs, remove +from os.path import exists, join + +import joblib +import numpy as np +import scipy.sparse as sp + +from ..utils import Bunch +from ..utils import shuffle as shuffle_ +from ..utils._param_validation import StrOptions, validate_params +from . import get_data_home +from ._base import RemoteFileMetadata, _fetch_remote, _pkl_filepath, load_descr +from ._svmlight_format_io import load_svmlight_files + +# The original vectorized data can be found at: +# http://www.ai.mit.edu/projects/jmlr/papers/volume5/lewis04a/a13-vector-files/lyrl2004_vectors_test_pt0.dat.gz +# http://www.ai.mit.edu/projects/jmlr/papers/volume5/lewis04a/a13-vector-files/lyrl2004_vectors_test_pt1.dat.gz +# http://www.ai.mit.edu/projects/jmlr/papers/volume5/lewis04a/a13-vector-files/lyrl2004_vectors_test_pt2.dat.gz +# http://www.ai.mit.edu/projects/jmlr/papers/volume5/lewis04a/a13-vector-files/lyrl2004_vectors_test_pt3.dat.gz +# http://www.ai.mit.edu/projects/jmlr/papers/volume5/lewis04a/a13-vector-files/lyrl2004_vectors_train.dat.gz +# while the original stemmed token files can be found +# in the README, section B.12.i.: +# http://www.ai.mit.edu/projects/jmlr/papers/volume5/lewis04a/lyrl2004_rcv1v2_README.htm +XY_METADATA = ( + RemoteFileMetadata( + url="https://ndownloader.figshare.com/files/5976069", + checksum="ed40f7e418d10484091b059703eeb95ae3199fe042891dcec4be6696b9968374", + filename="lyrl2004_vectors_test_pt0.dat.gz", + ), + RemoteFileMetadata( + url="https://ndownloader.figshare.com/files/5976066", + checksum="87700668ae45d45d5ca1ef6ae9bd81ab0f5ec88cc95dcef9ae7838f727a13aa6", + filename="lyrl2004_vectors_test_pt1.dat.gz", + ), + RemoteFileMetadata( + url="https://ndownloader.figshare.com/files/5976063", + checksum="48143ac703cbe33299f7ae9f4995db49a258690f60e5debbff8995c34841c7f5", + filename="lyrl2004_vectors_test_pt2.dat.gz", + ), + RemoteFileMetadata( + url="https://ndownloader.figshare.com/files/5976060", + checksum="dfcb0d658311481523c6e6ca0c3f5a3e1d3d12cde5d7a8ce629a9006ec7dbb39", + filename="lyrl2004_vectors_test_pt3.dat.gz", + ), + RemoteFileMetadata( + url="https://ndownloader.figshare.com/files/5976057", + checksum="5468f656d0ba7a83afc7ad44841cf9a53048a5c083eedc005dcdb5cc768924ae", + filename="lyrl2004_vectors_train.dat.gz", + ), +) + +# The original data can be found at: +# http://jmlr.csail.mit.edu/papers/volume5/lewis04a/a08-topic-qrels/rcv1-v2.topics.qrels.gz +TOPICS_METADATA = RemoteFileMetadata( + url="https://ndownloader.figshare.com/files/5976048", + checksum="2a98e5e5d8b770bded93afc8930d88299474317fe14181aee1466cc754d0d1c1", + filename="rcv1v2.topics.qrels.gz", +) + +logger = logging.getLogger(__name__) + + +@validate_params( + { + "data_home": [str, PathLike, None], + "subset": [StrOptions({"train", "test", "all"})], + "download_if_missing": ["boolean"], + "random_state": ["random_state"], + "shuffle": ["boolean"], + "return_X_y": ["boolean"], + }, + prefer_skip_nested_validation=True, +) +def fetch_rcv1( + *, + data_home=None, + subset="all", + download_if_missing=True, + random_state=None, + shuffle=False, + return_X_y=False, +): + """Load the RCV1 multilabel dataset (classification). + + Download it if necessary. + + Version: RCV1-v2, vectors, full sets, topics multilabels. + + ================= ===================== + Classes 103 + Samples total 804414 + Dimensionality 47236 + Features real, between 0 and 1 + ================= ===================== + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.17 + + Parameters + ---------- + data_home : str or path-like, default=None + Specify another download and cache folder for the datasets. By default + all scikit-learn data is stored in '~/scikit_learn_data' subfolders. + + subset : {'train', 'test', 'all'}, default='all' + Select the dataset to load: 'train' for the training set + (23149 samples), 'test' for the test set (781265 samples), + 'all' for both, with the training samples first if shuffle is False. + This follows the official LYRL2004 chronological split. + + download_if_missing : bool, default=True + If False, raise an OSError if the data is not locally available + instead of trying to download the data from the source site. + + random_state : int, RandomState instance or None, default=None + Determines random number generation for dataset shuffling. Pass an int + for reproducible output across multiple function calls. + See :term:`Glossary `. + + shuffle : bool, default=False + Whether to shuffle dataset. + + return_X_y : bool, default=False + If True, returns ``(dataset.data, dataset.target)`` instead of a Bunch + object. See below for more information about the `dataset.data` and + `dataset.target` object. + + .. versionadded:: 0.20 + + Returns + ------- + dataset : :class:`~sklearn.utils.Bunch` + Dictionary-like object. Returned only if `return_X_y` is False. + `dataset` has the following attributes: + + - data : sparse matrix of shape (804414, 47236), dtype=np.float64 + The array has 0.16% of non zero values. Will be of CSR format. + - target : sparse matrix of shape (804414, 103), dtype=np.uint8 + Each sample has a value of 1 in its categories, and 0 in others. + The array has 3.15% of non zero values. Will be of CSR format. + - sample_id : ndarray of shape (804414,), dtype=np.uint32, + Identification number of each sample, as ordered in dataset.data. + - target_names : ndarray of shape (103,), dtype=object + Names of each target (RCV1 topics), as ordered in dataset.target. + - DESCR : str + Description of the RCV1 dataset. + + (data, target) : tuple + A tuple consisting of `dataset.data` and `dataset.target`, as + described above. Returned only if `return_X_y` is True. + + .. versionadded:: 0.20 + """ + N_SAMPLES = 804414 + N_FEATURES = 47236 + N_CATEGORIES = 103 + N_TRAIN = 23149 + + data_home = get_data_home(data_home=data_home) + rcv1_dir = join(data_home, "RCV1") + if download_if_missing: + if not exists(rcv1_dir): + makedirs(rcv1_dir) + + samples_path = _pkl_filepath(rcv1_dir, "samples.pkl") + sample_id_path = _pkl_filepath(rcv1_dir, "sample_id.pkl") + sample_topics_path = _pkl_filepath(rcv1_dir, "sample_topics.pkl") + topics_path = _pkl_filepath(rcv1_dir, "topics_names.pkl") + + # load data (X) and sample_id + if download_if_missing and (not exists(samples_path) or not exists(sample_id_path)): + files = [] + for each in XY_METADATA: + logger.info("Downloading %s" % each.url) + file_path = _fetch_remote(each, dirname=rcv1_dir) + files.append(GzipFile(filename=file_path)) + + Xy = load_svmlight_files(files, n_features=N_FEATURES) + + # Training data is before testing data + X = sp.vstack([Xy[8], Xy[0], Xy[2], Xy[4], Xy[6]]).tocsr() + sample_id = np.hstack((Xy[9], Xy[1], Xy[3], Xy[5], Xy[7])) + sample_id = sample_id.astype(np.uint32, copy=False) + + joblib.dump(X, samples_path, compress=9) + joblib.dump(sample_id, sample_id_path, compress=9) + + # delete archives + for f in files: + f.close() + remove(f.name) + else: + X = joblib.load(samples_path) + sample_id = joblib.load(sample_id_path) + + # load target (y), categories, and sample_id_bis + if download_if_missing and ( + not exists(sample_topics_path) or not exists(topics_path) + ): + logger.info("Downloading %s" % TOPICS_METADATA.url) + topics_archive_path = _fetch_remote(TOPICS_METADATA, dirname=rcv1_dir) + + # parse the target file + n_cat = -1 + n_doc = -1 + doc_previous = -1 + y = np.zeros((N_SAMPLES, N_CATEGORIES), dtype=np.uint8) + sample_id_bis = np.zeros(N_SAMPLES, dtype=np.int32) + category_names = {} + with GzipFile(filename=topics_archive_path, mode="rb") as f: + for line in f: + line_components = line.decode("ascii").split(" ") + if len(line_components) == 3: + cat, doc, _ = line_components + if cat not in category_names: + n_cat += 1 + category_names[cat] = n_cat + + doc = int(doc) + if doc != doc_previous: + doc_previous = doc + n_doc += 1 + sample_id_bis[n_doc] = doc + y[n_doc, category_names[cat]] = 1 + + # delete archive + remove(topics_archive_path) + + # Samples in X are ordered with sample_id, + # whereas in y, they are ordered with sample_id_bis. + permutation = _find_permutation(sample_id_bis, sample_id) + y = y[permutation, :] + + # save category names in a list, with same order than y + categories = np.empty(N_CATEGORIES, dtype=object) + for k in category_names.keys(): + categories[category_names[k]] = k + + # reorder categories in lexicographic order + order = np.argsort(categories) + categories = categories[order] + y = sp.csr_matrix(y[:, order]) + + joblib.dump(y, sample_topics_path, compress=9) + joblib.dump(categories, topics_path, compress=9) + else: + y = joblib.load(sample_topics_path) + categories = joblib.load(topics_path) + + if subset == "all": + pass + elif subset == "train": + X = X[:N_TRAIN, :] + y = y[:N_TRAIN, :] + sample_id = sample_id[:N_TRAIN] + elif subset == "test": + X = X[N_TRAIN:, :] + y = y[N_TRAIN:, :] + sample_id = sample_id[N_TRAIN:] + else: + raise ValueError( + "Unknown subset parameter. Got '%s' instead of one" + " of ('all', 'train', test')" % subset + ) + + if shuffle: + X, y, sample_id = shuffle_(X, y, sample_id, random_state=random_state) + + fdescr = load_descr("rcv1.rst") + + if return_X_y: + return X, y + + return Bunch( + data=X, target=y, sample_id=sample_id, target_names=categories, DESCR=fdescr + ) + + +def _inverse_permutation(p): + """Inverse permutation p.""" + n = p.size + s = np.zeros(n, dtype=np.int32) + i = np.arange(n, dtype=np.int32) + np.put(s, p, i) # s[p] = i + return s + + +def _find_permutation(a, b): + """Find the permutation from a to b.""" + t = np.argsort(a) + u = np.argsort(b) + u_ = _inverse_permutation(u) + return t[u_] diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/_samples_generator.py b/venv/lib/python3.10/site-packages/sklearn/datasets/_samples_generator.py new file mode 100644 index 0000000000000000000000000000000000000000..1d1e65ff9966ef0f71521314f59772ca0dfd2283 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/datasets/_samples_generator.py @@ -0,0 +1,2284 @@ +""" +Generate samples of synthetic data sets. +""" + +# Authors: B. Thirion, G. Varoquaux, A. Gramfort, V. Michel, O. Grisel, +# G. Louppe, J. Nothman +# License: BSD 3 clause + +import array +import numbers +import warnings +from collections.abc import Iterable +from numbers import Integral, Real + +import numpy as np +import scipy.sparse as sp +from scipy import linalg + +from ..preprocessing import MultiLabelBinarizer +from ..utils import check_array, check_random_state +from ..utils import shuffle as util_shuffle +from ..utils._param_validation import Hidden, Interval, StrOptions, validate_params +from ..utils.random import sample_without_replacement + + +def _generate_hypercube(samples, dimensions, rng): + """Returns distinct binary samples of length dimensions.""" + if dimensions > 30: + return np.hstack( + [ + rng.randint(2, size=(samples, dimensions - 30)), + _generate_hypercube(samples, 30, rng), + ] + ) + out = sample_without_replacement(2**dimensions, samples, random_state=rng).astype( + dtype=">u4", copy=False + ) + out = np.unpackbits(out.view(">u1")).reshape((-1, 32))[:, -dimensions:] + return out + + +@validate_params( + { + "n_samples": [Interval(Integral, 1, None, closed="left")], + "n_features": [Interval(Integral, 1, None, closed="left")], + "n_informative": [Interval(Integral, 1, None, closed="left")], + "n_redundant": [Interval(Integral, 0, None, closed="left")], + "n_repeated": [Interval(Integral, 0, None, closed="left")], + "n_classes": [Interval(Integral, 1, None, closed="left")], + "n_clusters_per_class": [Interval(Integral, 1, None, closed="left")], + "weights": ["array-like", None], + "flip_y": [Interval(Real, 0, 1, closed="both")], + "class_sep": [Interval(Real, 0, None, closed="neither")], + "hypercube": ["boolean"], + "shift": [Interval(Real, None, None, closed="neither"), "array-like", None], + "scale": [Interval(Real, 0, None, closed="neither"), "array-like", None], + "shuffle": ["boolean"], + "random_state": ["random_state"], + }, + prefer_skip_nested_validation=True, +) +def make_classification( + n_samples=100, + n_features=20, + *, + n_informative=2, + n_redundant=2, + n_repeated=0, + n_classes=2, + n_clusters_per_class=2, + weights=None, + flip_y=0.01, + class_sep=1.0, + hypercube=True, + shift=0.0, + scale=1.0, + shuffle=True, + random_state=None, +): + """Generate a random n-class classification problem. + + This initially creates clusters of points normally distributed (std=1) + about vertices of an ``n_informative``-dimensional hypercube with sides of + length ``2*class_sep`` and assigns an equal number of clusters to each + class. It introduces interdependence between these features and adds + various types of further noise to the data. + + Without shuffling, ``X`` horizontally stacks features in the following + order: the primary ``n_informative`` features, followed by ``n_redundant`` + linear combinations of the informative features, followed by ``n_repeated`` + duplicates, drawn randomly with replacement from the informative and + redundant features. The remaining features are filled with random noise. + Thus, without shuffling, all useful features are contained in the columns + ``X[:, :n_informative + n_redundant + n_repeated]``. + + For an example of usage, see + :ref:`sphx_glr_auto_examples_datasets_plot_random_dataset.py`. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_samples : int, default=100 + The number of samples. + + n_features : int, default=20 + The total number of features. These comprise ``n_informative`` + informative features, ``n_redundant`` redundant features, + ``n_repeated`` duplicated features and + ``n_features-n_informative-n_redundant-n_repeated`` useless features + drawn at random. + + n_informative : int, default=2 + The number of informative features. Each class is composed of a number + of gaussian clusters each located around the vertices of a hypercube + in a subspace of dimension ``n_informative``. For each cluster, + informative features are drawn independently from N(0, 1) and then + randomly linearly combined within each cluster in order to add + covariance. The clusters are then placed on the vertices of the + hypercube. + + n_redundant : int, default=2 + The number of redundant features. These features are generated as + random linear combinations of the informative features. + + n_repeated : int, default=0 + The number of duplicated features, drawn randomly from the informative + and the redundant features. + + n_classes : int, default=2 + The number of classes (or labels) of the classification problem. + + n_clusters_per_class : int, default=2 + The number of clusters per class. + + weights : array-like of shape (n_classes,) or (n_classes - 1,),\ + default=None + The proportions of samples assigned to each class. If None, then + classes are balanced. Note that if ``len(weights) == n_classes - 1``, + then the last class weight is automatically inferred. + More than ``n_samples`` samples may be returned if the sum of + ``weights`` exceeds 1. Note that the actual class proportions will + not exactly match ``weights`` when ``flip_y`` isn't 0. + + flip_y : float, default=0.01 + The fraction of samples whose class is assigned randomly. Larger + values introduce noise in the labels and make the classification + task harder. Note that the default setting flip_y > 0 might lead + to less than ``n_classes`` in y in some cases. + + class_sep : float, default=1.0 + The factor multiplying the hypercube size. Larger values spread + out the clusters/classes and make the classification task easier. + + hypercube : bool, default=True + If True, the clusters are put on the vertices of a hypercube. If + False, the clusters are put on the vertices of a random polytope. + + shift : float, ndarray of shape (n_features,) or None, default=0.0 + Shift features by the specified value. If None, then features + are shifted by a random value drawn in [-class_sep, class_sep]. + + scale : float, ndarray of shape (n_features,) or None, default=1.0 + Multiply features by the specified value. If None, then features + are scaled by a random value drawn in [1, 100]. Note that scaling + happens after shifting. + + shuffle : bool, default=True + Shuffle the samples and the features. + + random_state : int, RandomState instance or None, default=None + Determines random number generation for dataset creation. Pass an int + for reproducible output across multiple function calls. + See :term:`Glossary `. + + Returns + ------- + X : ndarray of shape (n_samples, n_features) + The generated samples. + + y : ndarray of shape (n_samples,) + The integer labels for class membership of each sample. + + See Also + -------- + make_blobs : Simplified variant. + make_multilabel_classification : Unrelated generator for multilabel tasks. + + Notes + ----- + The algorithm is adapted from Guyon [1] and was designed to generate + the "Madelon" dataset. + + References + ---------- + .. [1] I. Guyon, "Design of experiments for the NIPS 2003 variable + selection benchmark", 2003. + + Examples + -------- + >>> from sklearn.datasets import make_classification + >>> X, y = make_classification(random_state=42) + >>> X.shape + (100, 20) + >>> y.shape + (100,) + >>> list(y[:5]) + [0, 0, 1, 1, 0] + """ + generator = check_random_state(random_state) + + # Count features, clusters and samples + if n_informative + n_redundant + n_repeated > n_features: + raise ValueError( + "Number of informative, redundant and repeated " + "features must sum to less than the number of total" + " features" + ) + # Use log2 to avoid overflow errors + if n_informative < np.log2(n_classes * n_clusters_per_class): + msg = "n_classes({}) * n_clusters_per_class({}) must be" + msg += " smaller or equal 2**n_informative({})={}" + raise ValueError( + msg.format( + n_classes, n_clusters_per_class, n_informative, 2**n_informative + ) + ) + + if weights is not None: + if len(weights) not in [n_classes, n_classes - 1]: + raise ValueError( + "Weights specified but incompatible with number of classes." + ) + if len(weights) == n_classes - 1: + if isinstance(weights, list): + weights = weights + [1.0 - sum(weights)] + else: + weights = np.resize(weights, n_classes) + weights[-1] = 1.0 - sum(weights[:-1]) + else: + weights = [1.0 / n_classes] * n_classes + + n_useless = n_features - n_informative - n_redundant - n_repeated + n_clusters = n_classes * n_clusters_per_class + + # Distribute samples among clusters by weight + n_samples_per_cluster = [ + int(n_samples * weights[k % n_classes] / n_clusters_per_class) + for k in range(n_clusters) + ] + + for i in range(n_samples - sum(n_samples_per_cluster)): + n_samples_per_cluster[i % n_clusters] += 1 + + # Initialize X and y + X = np.zeros((n_samples, n_features)) + y = np.zeros(n_samples, dtype=int) + + # Build the polytope whose vertices become cluster centroids + centroids = _generate_hypercube(n_clusters, n_informative, generator).astype( + float, copy=False + ) + centroids *= 2 * class_sep + centroids -= class_sep + if not hypercube: + centroids *= generator.uniform(size=(n_clusters, 1)) + centroids *= generator.uniform(size=(1, n_informative)) + + # Initially draw informative features from the standard normal + X[:, :n_informative] = generator.standard_normal(size=(n_samples, n_informative)) + + # Create each cluster; a variant of make_blobs + stop = 0 + for k, centroid in enumerate(centroids): + start, stop = stop, stop + n_samples_per_cluster[k] + y[start:stop] = k % n_classes # assign labels + X_k = X[start:stop, :n_informative] # slice a view of the cluster + + A = 2 * generator.uniform(size=(n_informative, n_informative)) - 1 + X_k[...] = np.dot(X_k, A) # introduce random covariance + + X_k += centroid # shift the cluster to a vertex + + # Create redundant features + if n_redundant > 0: + B = 2 * generator.uniform(size=(n_informative, n_redundant)) - 1 + X[:, n_informative : n_informative + n_redundant] = np.dot( + X[:, :n_informative], B + ) + + # Repeat some features + if n_repeated > 0: + n = n_informative + n_redundant + indices = ((n - 1) * generator.uniform(size=n_repeated) + 0.5).astype(np.intp) + X[:, n : n + n_repeated] = X[:, indices] + + # Fill useless features + if n_useless > 0: + X[:, -n_useless:] = generator.standard_normal(size=(n_samples, n_useless)) + + # Randomly replace labels + if flip_y >= 0.0: + flip_mask = generator.uniform(size=n_samples) < flip_y + y[flip_mask] = generator.randint(n_classes, size=flip_mask.sum()) + + # Randomly shift and scale + if shift is None: + shift = (2 * generator.uniform(size=n_features) - 1) * class_sep + X += shift + + if scale is None: + scale = 1 + 100 * generator.uniform(size=n_features) + X *= scale + + if shuffle: + # Randomly permute samples + X, y = util_shuffle(X, y, random_state=generator) + + # Randomly permute features + indices = np.arange(n_features) + generator.shuffle(indices) + X[:, :] = X[:, indices] + + return X, y + + +@validate_params( + { + "n_samples": [Interval(Integral, 1, None, closed="left")], + "n_features": [Interval(Integral, 1, None, closed="left")], + "n_classes": [Interval(Integral, 1, None, closed="left")], + "n_labels": [Interval(Integral, 0, None, closed="left")], + "length": [Interval(Integral, 1, None, closed="left")], + "allow_unlabeled": ["boolean"], + "sparse": ["boolean"], + "return_indicator": [StrOptions({"dense", "sparse"}), "boolean"], + "return_distributions": ["boolean"], + "random_state": ["random_state"], + }, + prefer_skip_nested_validation=True, +) +def make_multilabel_classification( + n_samples=100, + n_features=20, + *, + n_classes=5, + n_labels=2, + length=50, + allow_unlabeled=True, + sparse=False, + return_indicator="dense", + return_distributions=False, + random_state=None, +): + """Generate a random multilabel classification problem. + + For each sample, the generative process is: + - pick the number of labels: n ~ Poisson(n_labels) + - n times, choose a class c: c ~ Multinomial(theta) + - pick the document length: k ~ Poisson(length) + - k times, choose a word: w ~ Multinomial(theta_c) + + In the above process, rejection sampling is used to make sure that + n is never zero or more than `n_classes`, and that the document length + is never zero. Likewise, we reject classes which have already been chosen. + + For an example of usage, see + :ref:`sphx_glr_auto_examples_datasets_plot_random_multilabel_dataset.py`. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_samples : int, default=100 + The number of samples. + + n_features : int, default=20 + The total number of features. + + n_classes : int, default=5 + The number of classes of the classification problem. + + n_labels : int, default=2 + The average number of labels per instance. More precisely, the number + of labels per sample is drawn from a Poisson distribution with + ``n_labels`` as its expected value, but samples are bounded (using + rejection sampling) by ``n_classes``, and must be nonzero if + ``allow_unlabeled`` is False. + + length : int, default=50 + The sum of the features (number of words if documents) is drawn from + a Poisson distribution with this expected value. + + allow_unlabeled : bool, default=True + If ``True``, some instances might not belong to any class. + + sparse : bool, default=False + If ``True``, return a sparse feature matrix. + + .. versionadded:: 0.17 + parameter to allow *sparse* output. + + return_indicator : {'dense', 'sparse'} or False, default='dense' + If ``'dense'`` return ``Y`` in the dense binary indicator format. If + ``'sparse'`` return ``Y`` in the sparse binary indicator format. + ``False`` returns a list of lists of labels. + + return_distributions : bool, default=False + If ``True``, return the prior class probability and conditional + probabilities of features given classes, from which the data was + drawn. + + random_state : int, RandomState instance or None, default=None + Determines random number generation for dataset creation. Pass an int + for reproducible output across multiple function calls. + See :term:`Glossary `. + + Returns + ------- + X : ndarray of shape (n_samples, n_features) + The generated samples. + + Y : {ndarray, sparse matrix} of shape (n_samples, n_classes) + The label sets. Sparse matrix should be of CSR format. + + p_c : ndarray of shape (n_classes,) + The probability of each class being drawn. Only returned if + ``return_distributions=True``. + + p_w_c : ndarray of shape (n_features, n_classes) + The probability of each feature being drawn given each class. + Only returned if ``return_distributions=True``. + + Examples + -------- + >>> from sklearn.datasets import make_multilabel_classification + >>> X, y = make_multilabel_classification(n_labels=3, random_state=42) + >>> X.shape + (100, 20) + >>> y.shape + (100, 5) + >>> list(y[:3]) + [array([1, 1, 0, 1, 0]), array([0, 1, 1, 1, 0]), array([0, 1, 0, 0, 0])] + """ + + generator = check_random_state(random_state) + p_c = generator.uniform(size=n_classes) + p_c /= p_c.sum() + cumulative_p_c = np.cumsum(p_c) + p_w_c = generator.uniform(size=(n_features, n_classes)) + p_w_c /= np.sum(p_w_c, axis=0) + + def sample_example(): + _, n_classes = p_w_c.shape + + # pick a nonzero number of labels per document by rejection sampling + y_size = n_classes + 1 + while (not allow_unlabeled and y_size == 0) or y_size > n_classes: + y_size = generator.poisson(n_labels) + + # pick n classes + y = set() + while len(y) != y_size: + # pick a class with probability P(c) + c = np.searchsorted(cumulative_p_c, generator.uniform(size=y_size - len(y))) + y.update(c) + y = list(y) + + # pick a non-zero document length by rejection sampling + n_words = 0 + while n_words == 0: + n_words = generator.poisson(length) + + # generate a document of length n_words + if len(y) == 0: + # if sample does not belong to any class, generate noise word + words = generator.randint(n_features, size=n_words) + return words, y + + # sample words with replacement from selected classes + cumulative_p_w_sample = p_w_c.take(y, axis=1).sum(axis=1).cumsum() + cumulative_p_w_sample /= cumulative_p_w_sample[-1] + words = np.searchsorted(cumulative_p_w_sample, generator.uniform(size=n_words)) + return words, y + + X_indices = array.array("i") + X_indptr = array.array("i", [0]) + Y = [] + for i in range(n_samples): + words, y = sample_example() + X_indices.extend(words) + X_indptr.append(len(X_indices)) + Y.append(y) + X_data = np.ones(len(X_indices), dtype=np.float64) + X = sp.csr_matrix((X_data, X_indices, X_indptr), shape=(n_samples, n_features)) + X.sum_duplicates() + if not sparse: + X = X.toarray() + + # return_indicator can be True due to backward compatibility + if return_indicator in (True, "sparse", "dense"): + lb = MultiLabelBinarizer(sparse_output=(return_indicator == "sparse")) + Y = lb.fit([range(n_classes)]).transform(Y) + if return_distributions: + return X, Y, p_c, p_w_c + return X, Y + + +@validate_params( + { + "n_samples": [Interval(Integral, 1, None, closed="left")], + "random_state": ["random_state"], + }, + prefer_skip_nested_validation=True, +) +def make_hastie_10_2(n_samples=12000, *, random_state=None): + """Generate data for binary classification used in Hastie et al. 2009, Example 10.2. + + The ten features are standard independent Gaussian and + the target ``y`` is defined by:: + + y[i] = 1 if np.sum(X[i] ** 2) > 9.34 else -1 + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_samples : int, default=12000 + The number of samples. + + random_state : int, RandomState instance or None, default=None + Determines random number generation for dataset creation. Pass an int + for reproducible output across multiple function calls. + See :term:`Glossary `. + + Returns + ------- + X : ndarray of shape (n_samples, 10) + The input samples. + + y : ndarray of shape (n_samples,) + The output values. + + See Also + -------- + make_gaussian_quantiles : A generalization of this dataset approach. + + References + ---------- + .. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical + Learning Ed. 2", Springer, 2009. + """ + rs = check_random_state(random_state) + + shape = (n_samples, 10) + X = rs.normal(size=shape).reshape(shape) + y = ((X**2.0).sum(axis=1) > 9.34).astype(np.float64, copy=False) + y[y == 0.0] = -1.0 + + return X, y + + +@validate_params( + { + "n_samples": [Interval(Integral, 1, None, closed="left")], + "n_features": [Interval(Integral, 1, None, closed="left")], + "n_informative": [Interval(Integral, 0, None, closed="left")], + "n_targets": [Interval(Integral, 1, None, closed="left")], + "bias": [Interval(Real, None, None, closed="neither")], + "effective_rank": [Interval(Integral, 1, None, closed="left"), None], + "tail_strength": [Interval(Real, 0, 1, closed="both")], + "noise": [Interval(Real, 0, None, closed="left")], + "shuffle": ["boolean"], + "coef": ["boolean"], + "random_state": ["random_state"], + }, + prefer_skip_nested_validation=True, +) +def make_regression( + n_samples=100, + n_features=100, + *, + n_informative=10, + n_targets=1, + bias=0.0, + effective_rank=None, + tail_strength=0.5, + noise=0.0, + shuffle=True, + coef=False, + random_state=None, +): + """Generate a random regression problem. + + The input set can either be well conditioned (by default) or have a low + rank-fat tail singular profile. See :func:`make_low_rank_matrix` for + more details. + + The output is generated by applying a (potentially biased) random linear + regression model with `n_informative` nonzero regressors to the previously + generated input and some gaussian centered noise with some adjustable + scale. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_samples : int, default=100 + The number of samples. + + n_features : int, default=100 + The number of features. + + n_informative : int, default=10 + The number of informative features, i.e., the number of features used + to build the linear model used to generate the output. + + n_targets : int, default=1 + The number of regression targets, i.e., the dimension of the y output + vector associated with a sample. By default, the output is a scalar. + + bias : float, default=0.0 + The bias term in the underlying linear model. + + effective_rank : int, default=None + If not None: + The approximate number of singular vectors required to explain most + of the input data by linear combinations. Using this kind of + singular spectrum in the input allows the generator to reproduce + the correlations often observed in practice. + If None: + The input set is well conditioned, centered and gaussian with + unit variance. + + tail_strength : float, default=0.5 + The relative importance of the fat noisy tail of the singular values + profile if `effective_rank` is not None. When a float, it should be + between 0 and 1. + + noise : float, default=0.0 + The standard deviation of the gaussian noise applied to the output. + + shuffle : bool, default=True + Shuffle the samples and the features. + + coef : bool, default=False + If True, the coefficients of the underlying linear model are returned. + + random_state : int, RandomState instance or None, default=None + Determines random number generation for dataset creation. Pass an int + for reproducible output across multiple function calls. + See :term:`Glossary `. + + Returns + ------- + X : ndarray of shape (n_samples, n_features) + The input samples. + + y : ndarray of shape (n_samples,) or (n_samples, n_targets) + The output values. + + coef : ndarray of shape (n_features,) or (n_features, n_targets) + The coefficient of the underlying linear model. It is returned only if + coef is True. + + Examples + -------- + >>> from sklearn.datasets import make_regression + >>> X, y = make_regression(n_samples=5, n_features=2, noise=1, random_state=42) + >>> X + array([[ 0.4967..., -0.1382... ], + [ 0.6476..., 1.523...], + [-0.2341..., -0.2341...], + [-0.4694..., 0.5425...], + [ 1.579..., 0.7674...]]) + >>> y + array([ 6.737..., 37.79..., -10.27..., 0.4017..., 42.22...]) + """ + n_informative = min(n_features, n_informative) + generator = check_random_state(random_state) + + if effective_rank is None: + # Randomly generate a well conditioned input set + X = generator.standard_normal(size=(n_samples, n_features)) + + else: + # Randomly generate a low rank, fat tail input set + X = make_low_rank_matrix( + n_samples=n_samples, + n_features=n_features, + effective_rank=effective_rank, + tail_strength=tail_strength, + random_state=generator, + ) + + # Generate a ground truth model with only n_informative features being non + # zeros (the other features are not correlated to y and should be ignored + # by a sparsifying regularizers such as L1 or elastic net) + ground_truth = np.zeros((n_features, n_targets)) + ground_truth[:n_informative, :] = 100 * generator.uniform( + size=(n_informative, n_targets) + ) + + y = np.dot(X, ground_truth) + bias + + # Add noise + if noise > 0.0: + y += generator.normal(scale=noise, size=y.shape) + + # Randomly permute samples and features + if shuffle: + X, y = util_shuffle(X, y, random_state=generator) + + indices = np.arange(n_features) + generator.shuffle(indices) + X[:, :] = X[:, indices] + ground_truth = ground_truth[indices] + + y = np.squeeze(y) + + if coef: + return X, y, np.squeeze(ground_truth) + + else: + return X, y + + +@validate_params( + { + "n_samples": [Interval(Integral, 0, None, closed="left"), tuple], + "shuffle": ["boolean"], + "noise": [Interval(Real, 0, None, closed="left"), None], + "random_state": ["random_state"], + "factor": [Interval(Real, 0, 1, closed="left")], + }, + prefer_skip_nested_validation=True, +) +def make_circles( + n_samples=100, *, shuffle=True, noise=None, random_state=None, factor=0.8 +): + """Make a large circle containing a smaller circle in 2d. + + A simple toy dataset to visualize clustering and classification + algorithms. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_samples : int or tuple of shape (2,), dtype=int, default=100 + If int, it is the total number of points generated. + For odd numbers, the inner circle will have one point more than the + outer circle. + If two-element tuple, number of points in outer circle and inner + circle. + + .. versionchanged:: 0.23 + Added two-element tuple. + + shuffle : bool, default=True + Whether to shuffle the samples. + + noise : float, default=None + Standard deviation of Gaussian noise added to the data. + + random_state : int, RandomState instance or None, default=None + Determines random number generation for dataset shuffling and noise. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + factor : float, default=.8 + Scale factor between inner and outer circle in the range `[0, 1)`. + + Returns + ------- + X : ndarray of shape (n_samples, 2) + The generated samples. + + y : ndarray of shape (n_samples,) + The integer labels (0 or 1) for class membership of each sample. + + Examples + -------- + >>> from sklearn.datasets import make_circles + >>> X, y = make_circles(random_state=42) + >>> X.shape + (100, 2) + >>> y.shape + (100,) + >>> list(y[:5]) + [1, 1, 1, 0, 0] + """ + if isinstance(n_samples, numbers.Integral): + n_samples_out = n_samples // 2 + n_samples_in = n_samples - n_samples_out + else: # n_samples is a tuple + if len(n_samples) != 2: + raise ValueError("When a tuple, n_samples must have exactly two elements.") + n_samples_out, n_samples_in = n_samples + + generator = check_random_state(random_state) + # so as not to have the first point = last point, we set endpoint=False + linspace_out = np.linspace(0, 2 * np.pi, n_samples_out, endpoint=False) + linspace_in = np.linspace(0, 2 * np.pi, n_samples_in, endpoint=False) + outer_circ_x = np.cos(linspace_out) + outer_circ_y = np.sin(linspace_out) + inner_circ_x = np.cos(linspace_in) * factor + inner_circ_y = np.sin(linspace_in) * factor + + X = np.vstack( + [np.append(outer_circ_x, inner_circ_x), np.append(outer_circ_y, inner_circ_y)] + ).T + y = np.hstack( + [np.zeros(n_samples_out, dtype=np.intp), np.ones(n_samples_in, dtype=np.intp)] + ) + if shuffle: + X, y = util_shuffle(X, y, random_state=generator) + + if noise is not None: + X += generator.normal(scale=noise, size=X.shape) + + return X, y + + +@validate_params( + { + "n_samples": [Interval(Integral, 1, None, closed="left"), tuple], + "shuffle": ["boolean"], + "noise": [Interval(Real, 0, None, closed="left"), None], + "random_state": ["random_state"], + }, + prefer_skip_nested_validation=True, +) +def make_moons(n_samples=100, *, shuffle=True, noise=None, random_state=None): + """Make two interleaving half circles. + + A simple toy dataset to visualize clustering and classification + algorithms. Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_samples : int or tuple of shape (2,), dtype=int, default=100 + If int, the total number of points generated. + If two-element tuple, number of points in each of two moons. + + .. versionchanged:: 0.23 + Added two-element tuple. + + shuffle : bool, default=True + Whether to shuffle the samples. + + noise : float, default=None + Standard deviation of Gaussian noise added to the data. + + random_state : int, RandomState instance or None, default=None + Determines random number generation for dataset shuffling and noise. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + Returns + ------- + X : ndarray of shape (n_samples, 2) + The generated samples. + + y : ndarray of shape (n_samples,) + The integer labels (0 or 1) for class membership of each sample. + """ + + if isinstance(n_samples, numbers.Integral): + n_samples_out = n_samples // 2 + n_samples_in = n_samples - n_samples_out + else: + try: + n_samples_out, n_samples_in = n_samples + except ValueError as e: + raise ValueError( + "`n_samples` can be either an int or a two-element tuple." + ) from e + + generator = check_random_state(random_state) + + outer_circ_x = np.cos(np.linspace(0, np.pi, n_samples_out)) + outer_circ_y = np.sin(np.linspace(0, np.pi, n_samples_out)) + inner_circ_x = 1 - np.cos(np.linspace(0, np.pi, n_samples_in)) + inner_circ_y = 1 - np.sin(np.linspace(0, np.pi, n_samples_in)) - 0.5 + + X = np.vstack( + [np.append(outer_circ_x, inner_circ_x), np.append(outer_circ_y, inner_circ_y)] + ).T + y = np.hstack( + [np.zeros(n_samples_out, dtype=np.intp), np.ones(n_samples_in, dtype=np.intp)] + ) + + if shuffle: + X, y = util_shuffle(X, y, random_state=generator) + + if noise is not None: + X += generator.normal(scale=noise, size=X.shape) + + return X, y + + +@validate_params( + { + "n_samples": [Interval(Integral, 1, None, closed="left"), "array-like"], + "n_features": [Interval(Integral, 1, None, closed="left")], + "centers": [Interval(Integral, 1, None, closed="left"), "array-like", None], + "cluster_std": [Interval(Real, 0, None, closed="left"), "array-like"], + "center_box": [tuple], + "shuffle": ["boolean"], + "random_state": ["random_state"], + "return_centers": ["boolean"], + }, + prefer_skip_nested_validation=True, +) +def make_blobs( + n_samples=100, + n_features=2, + *, + centers=None, + cluster_std=1.0, + center_box=(-10.0, 10.0), + shuffle=True, + random_state=None, + return_centers=False, +): + """Generate isotropic Gaussian blobs for clustering. + + For an example of usage, see + :ref:`sphx_glr_auto_examples_datasets_plot_random_dataset.py`. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_samples : int or array-like, default=100 + If int, it is the total number of points equally divided among + clusters. + If array-like, each element of the sequence indicates + the number of samples per cluster. + + .. versionchanged:: v0.20 + one can now pass an array-like to the ``n_samples`` parameter + + n_features : int, default=2 + The number of features for each sample. + + centers : int or array-like of shape (n_centers, n_features), default=None + The number of centers to generate, or the fixed center locations. + If n_samples is an int and centers is None, 3 centers are generated. + If n_samples is array-like, centers must be + either None or an array of length equal to the length of n_samples. + + cluster_std : float or array-like of float, default=1.0 + The standard deviation of the clusters. + + center_box : tuple of float (min, max), default=(-10.0, 10.0) + The bounding box for each cluster center when centers are + generated at random. + + shuffle : bool, default=True + Shuffle the samples. + + random_state : int, RandomState instance or None, default=None + Determines random number generation for dataset creation. Pass an int + for reproducible output across multiple function calls. + See :term:`Glossary `. + + return_centers : bool, default=False + If True, then return the centers of each cluster. + + .. versionadded:: 0.23 + + Returns + ------- + X : ndarray of shape (n_samples, n_features) + The generated samples. + + y : ndarray of shape (n_samples,) + The integer labels for cluster membership of each sample. + + centers : ndarray of shape (n_centers, n_features) + The centers of each cluster. Only returned if + ``return_centers=True``. + + See Also + -------- + make_classification : A more intricate variant. + + Examples + -------- + >>> from sklearn.datasets import make_blobs + >>> X, y = make_blobs(n_samples=10, centers=3, n_features=2, + ... random_state=0) + >>> print(X.shape) + (10, 2) + >>> y + array([0, 0, 1, 0, 2, 2, 2, 1, 1, 0]) + >>> X, y = make_blobs(n_samples=[3, 3, 4], centers=None, n_features=2, + ... random_state=0) + >>> print(X.shape) + (10, 2) + >>> y + array([0, 1, 2, 0, 2, 2, 2, 1, 1, 0]) + """ + generator = check_random_state(random_state) + + if isinstance(n_samples, numbers.Integral): + # Set n_centers by looking at centers arg + if centers is None: + centers = 3 + + if isinstance(centers, numbers.Integral): + n_centers = centers + centers = generator.uniform( + center_box[0], center_box[1], size=(n_centers, n_features) + ) + + else: + centers = check_array(centers) + n_features = centers.shape[1] + n_centers = centers.shape[0] + + else: + # Set n_centers by looking at [n_samples] arg + n_centers = len(n_samples) + if centers is None: + centers = generator.uniform( + center_box[0], center_box[1], size=(n_centers, n_features) + ) + if not isinstance(centers, Iterable): + raise ValueError( + "Parameter `centers` must be array-like. Got {!r} instead".format( + centers + ) + ) + if len(centers) != n_centers: + raise ValueError( + "Length of `n_samples` not consistent with number of " + f"centers. Got n_samples = {n_samples} and centers = {centers}" + ) + centers = check_array(centers) + n_features = centers.shape[1] + + # stds: if cluster_std is given as list, it must be consistent + # with the n_centers + if hasattr(cluster_std, "__len__") and len(cluster_std) != n_centers: + raise ValueError( + "Length of `clusters_std` not consistent with " + "number of centers. Got centers = {} " + "and cluster_std = {}".format(centers, cluster_std) + ) + + if isinstance(cluster_std, numbers.Real): + cluster_std = np.full(len(centers), cluster_std) + + if isinstance(n_samples, Iterable): + n_samples_per_center = n_samples + else: + n_samples_per_center = [int(n_samples // n_centers)] * n_centers + + for i in range(n_samples % n_centers): + n_samples_per_center[i] += 1 + + cum_sum_n_samples = np.cumsum(n_samples_per_center) + X = np.empty(shape=(sum(n_samples_per_center), n_features), dtype=np.float64) + y = np.empty(shape=(sum(n_samples_per_center),), dtype=int) + + for i, (n, std) in enumerate(zip(n_samples_per_center, cluster_std)): + start_idx = cum_sum_n_samples[i - 1] if i > 0 else 0 + end_idx = cum_sum_n_samples[i] + X[start_idx:end_idx] = generator.normal( + loc=centers[i], scale=std, size=(n, n_features) + ) + y[start_idx:end_idx] = i + + if shuffle: + X, y = util_shuffle(X, y, random_state=generator) + + if return_centers: + return X, y, centers + else: + return X, y + + +@validate_params( + { + "n_samples": [Interval(Integral, 1, None, closed="left")], + "n_features": [Interval(Integral, 5, None, closed="left")], + "noise": [Interval(Real, 0.0, None, closed="left")], + "random_state": ["random_state"], + }, + prefer_skip_nested_validation=True, +) +def make_friedman1(n_samples=100, n_features=10, *, noise=0.0, random_state=None): + """Generate the "Friedman #1" regression problem. + + This dataset is described in Friedman [1] and Breiman [2]. + + Inputs `X` are independent features uniformly distributed on the interval + [0, 1]. The output `y` is created according to the formula:: + + y(X) = 10 * sin(pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \ ++ 10 * X[:, 3] + 5 * X[:, 4] + noise * N(0, 1). + + Out of the `n_features` features, only 5 are actually used to compute + `y`. The remaining features are independent of `y`. + + The number of features has to be >= 5. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_samples : int, default=100 + The number of samples. + + n_features : int, default=10 + The number of features. Should be at least 5. + + noise : float, default=0.0 + The standard deviation of the gaussian noise applied to the output. + + random_state : int, RandomState instance or None, default=None + Determines random number generation for dataset noise. Pass an int + for reproducible output across multiple function calls. + See :term:`Glossary `. + + Returns + ------- + X : ndarray of shape (n_samples, n_features) + The input samples. + + y : ndarray of shape (n_samples,) + The output values. + + References + ---------- + .. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals + of Statistics 19 (1), pages 1-67, 1991. + + .. [2] L. Breiman, "Bagging predictors", Machine Learning 24, + pages 123-140, 1996. + + Examples + -------- + >>> from sklearn.datasets import make_friedman1 + >>> X, y = make_friedman1(random_state=42) + >>> X.shape + (100, 10) + >>> y.shape + (100,) + >>> list(y[:3]) + [16.8..., 5.8..., 9.4...] + """ + generator = check_random_state(random_state) + + X = generator.uniform(size=(n_samples, n_features)) + y = ( + 10 * np.sin(np.pi * X[:, 0] * X[:, 1]) + + 20 * (X[:, 2] - 0.5) ** 2 + + 10 * X[:, 3] + + 5 * X[:, 4] + + noise * generator.standard_normal(size=(n_samples)) + ) + + return X, y + + +@validate_params( + { + "n_samples": [Interval(Integral, 1, None, closed="left")], + "noise": [Interval(Real, 0, None, closed="left")], + "random_state": ["random_state"], + }, + prefer_skip_nested_validation=True, +) +def make_friedman2(n_samples=100, *, noise=0.0, random_state=None): + """Generate the "Friedman #2" regression problem. + + This dataset is described in Friedman [1] and Breiman [2]. + + Inputs `X` are 4 independent features uniformly distributed on the + intervals:: + + 0 <= X[:, 0] <= 100, + 40 * pi <= X[:, 1] <= 560 * pi, + 0 <= X[:, 2] <= 1, + 1 <= X[:, 3] <= 11. + + The output `y` is created according to the formula:: + + y(X) = (X[:, 0] ** 2 + (X[:, 1] * X[:, 2] \ + - 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 + noise * N(0, 1). + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_samples : int, default=100 + The number of samples. + + noise : float, default=0.0 + The standard deviation of the gaussian noise applied to the output. + + random_state : int, RandomState instance or None, default=None + Determines random number generation for dataset noise. Pass an int + for reproducible output across multiple function calls. + See :term:`Glossary `. + + Returns + ------- + X : ndarray of shape (n_samples, 4) + The input samples. + + y : ndarray of shape (n_samples,) + The output values. + + References + ---------- + .. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals + of Statistics 19 (1), pages 1-67, 1991. + + .. [2] L. Breiman, "Bagging predictors", Machine Learning 24, + pages 123-140, 1996. + + Examples + -------- + >>> from sklearn.datasets import make_friedman2 + >>> X, y = make_friedman2(random_state=42) + >>> X.shape + (100, 4) + >>> y.shape + (100,) + >>> list(y[:3]) + [1229.4..., 27.0..., 65.6...] + """ + generator = check_random_state(random_state) + + X = generator.uniform(size=(n_samples, 4)) + X[:, 0] *= 100 + X[:, 1] *= 520 * np.pi + X[:, 1] += 40 * np.pi + X[:, 3] *= 10 + X[:, 3] += 1 + + y = ( + X[:, 0] ** 2 + (X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) ** 2 + ) ** 0.5 + noise * generator.standard_normal(size=(n_samples)) + + return X, y + + +@validate_params( + { + "n_samples": [Interval(Integral, 1, None, closed="left")], + "noise": [Interval(Real, 0, None, closed="left")], + "random_state": ["random_state"], + }, + prefer_skip_nested_validation=True, +) +def make_friedman3(n_samples=100, *, noise=0.0, random_state=None): + """Generate the "Friedman #3" regression problem. + + This dataset is described in Friedman [1] and Breiman [2]. + + Inputs `X` are 4 independent features uniformly distributed on the + intervals:: + + 0 <= X[:, 0] <= 100, + 40 * pi <= X[:, 1] <= 560 * pi, + 0 <= X[:, 2] <= 1, + 1 <= X[:, 3] <= 11. + + The output `y` is created according to the formula:: + + y(X) = arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) \ +/ X[:, 0]) + noise * N(0, 1). + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_samples : int, default=100 + The number of samples. + + noise : float, default=0.0 + The standard deviation of the gaussian noise applied to the output. + + random_state : int, RandomState instance or None, default=None + Determines random number generation for dataset noise. Pass an int + for reproducible output across multiple function calls. + See :term:`Glossary `. + + Returns + ------- + X : ndarray of shape (n_samples, 4) + The input samples. + + y : ndarray of shape (n_samples,) + The output values. + + References + ---------- + .. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals + of Statistics 19 (1), pages 1-67, 1991. + + .. [2] L. Breiman, "Bagging predictors", Machine Learning 24, + pages 123-140, 1996. + + Examples + -------- + >>> from sklearn.datasets import make_friedman3 + >>> X, y = make_friedman3(random_state=42) + >>> X.shape + (100, 4) + >>> y.shape + (100,) + >>> list(y[:3]) + [1.5..., 0.9..., 0.4...] + """ + generator = check_random_state(random_state) + + X = generator.uniform(size=(n_samples, 4)) + X[:, 0] *= 100 + X[:, 1] *= 520 * np.pi + X[:, 1] += 40 * np.pi + X[:, 3] *= 10 + X[:, 3] += 1 + + y = np.arctan( + (X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) / X[:, 0] + ) + noise * generator.standard_normal(size=(n_samples)) + + return X, y + + +@validate_params( + { + "n_samples": [Interval(Integral, 1, None, closed="left")], + "n_features": [Interval(Integral, 1, None, closed="left")], + "effective_rank": [Interval(Integral, 1, None, closed="left")], + "tail_strength": [Interval(Real, 0, 1, closed="both")], + "random_state": ["random_state"], + }, + prefer_skip_nested_validation=True, +) +def make_low_rank_matrix( + n_samples=100, + n_features=100, + *, + effective_rank=10, + tail_strength=0.5, + random_state=None, +): + """Generate a mostly low rank matrix with bell-shaped singular values. + + Most of the variance can be explained by a bell-shaped curve of width + effective_rank: the low rank part of the singular values profile is:: + + (1 - tail_strength) * exp(-1.0 * (i / effective_rank) ** 2) + + The remaining singular values' tail is fat, decreasing as:: + + tail_strength * exp(-0.1 * i / effective_rank). + + The low rank part of the profile can be considered the structured + signal part of the data while the tail can be considered the noisy + part of the data that cannot be summarized by a low number of linear + components (singular vectors). + + This kind of singular profiles is often seen in practice, for instance: + - gray level pictures of faces + - TF-IDF vectors of text documents crawled from the web + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_samples : int, default=100 + The number of samples. + + n_features : int, default=100 + The number of features. + + effective_rank : int, default=10 + The approximate number of singular vectors required to explain most of + the data by linear combinations. + + tail_strength : float, default=0.5 + The relative importance of the fat noisy tail of the singular values + profile. The value should be between 0 and 1. + + random_state : int, RandomState instance or None, default=None + Determines random number generation for dataset creation. Pass an int + for reproducible output across multiple function calls. + See :term:`Glossary `. + + Returns + ------- + X : ndarray of shape (n_samples, n_features) + The matrix. + """ + generator = check_random_state(random_state) + n = min(n_samples, n_features) + + # Random (ortho normal) vectors + u, _ = linalg.qr( + generator.standard_normal(size=(n_samples, n)), + mode="economic", + check_finite=False, + ) + v, _ = linalg.qr( + generator.standard_normal(size=(n_features, n)), + mode="economic", + check_finite=False, + ) + + # Index of the singular values + singular_ind = np.arange(n, dtype=np.float64) + + # Build the singular profile by assembling signal and noise components + low_rank = (1 - tail_strength) * np.exp(-1.0 * (singular_ind / effective_rank) ** 2) + tail = tail_strength * np.exp(-0.1 * singular_ind / effective_rank) + s = np.identity(n) * (low_rank + tail) + + return np.dot(np.dot(u, s), v.T) + + +@validate_params( + { + "n_samples": [Interval(Integral, 1, None, closed="left")], + "n_components": [Interval(Integral, 1, None, closed="left")], + "n_features": [Interval(Integral, 1, None, closed="left")], + "n_nonzero_coefs": [Interval(Integral, 1, None, closed="left")], + "random_state": ["random_state"], + "data_transposed": ["boolean", Hidden(StrOptions({"deprecated"}))], + }, + prefer_skip_nested_validation=True, +) +def make_sparse_coded_signal( + n_samples, + *, + n_components, + n_features, + n_nonzero_coefs, + random_state=None, + data_transposed="deprecated", +): + """Generate a signal as a sparse combination of dictionary elements. + + Returns a matrix `Y = DX`, such that `D` is of shape `(n_features, n_components)`, + `X` is of shape `(n_components, n_samples)` and each column of `X` has exactly + `n_nonzero_coefs` non-zero elements. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_samples : int + Number of samples to generate. + + n_components : int + Number of components in the dictionary. + + n_features : int + Number of features of the dataset to generate. + + n_nonzero_coefs : int + Number of active (non-zero) coefficients in each sample. + + random_state : int, RandomState instance or None, default=None + Determines random number generation for dataset creation. Pass an int + for reproducible output across multiple function calls. + See :term:`Glossary `. + + data_transposed : bool, default=False + By default, Y, D and X are not transposed. + + .. versionadded:: 1.1 + + .. versionchanged:: 1.3 + Default value changed from True to False. + + .. deprecated:: 1.3 + `data_transposed` is deprecated and will be removed in 1.5. + + Returns + ------- + data : ndarray of shape (n_features, n_samples) or (n_samples, n_features) + The encoded signal (Y). The shape is `(n_samples, n_features)` if + `data_transposed` is False, otherwise it's `(n_features, n_samples)`. + + dictionary : ndarray of shape (n_features, n_components) or \ + (n_components, n_features) + The dictionary with normalized components (D). The shape is + `(n_components, n_features)` if `data_transposed` is False, otherwise it's + `(n_features, n_components)`. + + code : ndarray of shape (n_components, n_samples) or (n_samples, n_components) + The sparse code such that each column of this matrix has exactly + n_nonzero_coefs non-zero items (X). The shape is `(n_samples, n_components)` + if `data_transposed` is False, otherwise it's `(n_components, n_samples)`. + """ + generator = check_random_state(random_state) + + # generate dictionary + D = generator.standard_normal(size=(n_features, n_components)) + D /= np.sqrt(np.sum((D**2), axis=0)) + + # generate code + X = np.zeros((n_components, n_samples)) + for i in range(n_samples): + idx = np.arange(n_components) + generator.shuffle(idx) + idx = idx[:n_nonzero_coefs] + X[idx, i] = generator.standard_normal(size=n_nonzero_coefs) + + # encode signal + Y = np.dot(D, X) + + # TODO(1.5) remove data_transposed + # raise warning if data_transposed is not passed explicitly + if data_transposed != "deprecated": + warnings.warn( + "data_transposed was deprecated in version 1.3 and will be removed in 1.5.", + FutureWarning, + ) + else: + data_transposed = False + + # transpose if needed + if not data_transposed: + Y, D, X = Y.T, D.T, X.T + + return map(np.squeeze, (Y, D, X)) + + +@validate_params( + { + "n_samples": [Interval(Integral, 1, None, closed="left")], + "n_features": [Interval(Integral, 1, None, closed="left")], + "random_state": ["random_state"], + }, + prefer_skip_nested_validation=True, +) +def make_sparse_uncorrelated(n_samples=100, n_features=10, *, random_state=None): + """Generate a random regression problem with sparse uncorrelated design. + + This dataset is described in Celeux et al [1]. as:: + + X ~ N(0, 1) + y(X) = X[:, 0] + 2 * X[:, 1] - 2 * X[:, 2] - 1.5 * X[:, 3] + + Only the first 4 features are informative. The remaining features are + useless. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_samples : int, default=100 + The number of samples. + + n_features : int, default=10 + The number of features. + + random_state : int, RandomState instance or None, default=None + Determines random number generation for dataset creation. Pass an int + for reproducible output across multiple function calls. + See :term:`Glossary `. + + Returns + ------- + X : ndarray of shape (n_samples, n_features) + The input samples. + + y : ndarray of shape (n_samples,) + The output values. + + References + ---------- + .. [1] G. Celeux, M. El Anbari, J.-M. Marin, C. P. Robert, + "Regularization in regression: comparing Bayesian and frequentist + methods in a poorly informative situation", 2009. + """ + generator = check_random_state(random_state) + + X = generator.normal(loc=0, scale=1, size=(n_samples, n_features)) + y = generator.normal( + loc=(X[:, 0] + 2 * X[:, 1] - 2 * X[:, 2] - 1.5 * X[:, 3]), + scale=np.ones(n_samples), + ) + + return X, y + + +@validate_params( + { + "n_dim": [Interval(Integral, 1, None, closed="left")], + "random_state": ["random_state"], + }, + prefer_skip_nested_validation=True, +) +def make_spd_matrix(n_dim, *, random_state=None): + """Generate a random symmetric, positive-definite matrix. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_dim : int + The matrix dimension. + + random_state : int, RandomState instance or None, default=None + Determines random number generation for dataset creation. Pass an int + for reproducible output across multiple function calls. + See :term:`Glossary `. + + Returns + ------- + X : ndarray of shape (n_dim, n_dim) + The random symmetric, positive-definite matrix. + + See Also + -------- + make_sparse_spd_matrix: Generate a sparse symmetric definite positive matrix. + + Examples + -------- + >>> from sklearn.datasets import make_spd_matrix + >>> make_spd_matrix(n_dim=2, random_state=42) + array([[2.09..., 0.34...], + [0.34..., 0.21...]]) + """ + generator = check_random_state(random_state) + + A = generator.uniform(size=(n_dim, n_dim)) + U, _, Vt = linalg.svd(np.dot(A.T, A), check_finite=False) + X = np.dot(np.dot(U, 1.0 + np.diag(generator.uniform(size=n_dim))), Vt) + + return X + + +@validate_params( + { + "n_dim": [Hidden(None), Interval(Integral, 1, None, closed="left")], + "alpha": [Interval(Real, 0, 1, closed="both")], + "norm_diag": ["boolean"], + "smallest_coef": [Interval(Real, 0, 1, closed="both")], + "largest_coef": [Interval(Real, 0, 1, closed="both")], + "sparse_format": [ + StrOptions({"bsr", "coo", "csc", "csr", "dia", "dok", "lil"}), + None, + ], + "random_state": ["random_state"], + "dim": [ + Interval(Integral, 1, None, closed="left"), + Hidden(StrOptions({"deprecated"})), + ], + }, + prefer_skip_nested_validation=True, +) +def make_sparse_spd_matrix( + n_dim=None, + *, + alpha=0.95, + norm_diag=False, + smallest_coef=0.1, + largest_coef=0.9, + sparse_format=None, + random_state=None, + dim="deprecated", +): + """Generate a sparse symmetric definite positive matrix. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_dim : int, default=1 + The size of the random matrix to generate. + + .. versionchanged:: 1.4 + Renamed from ``dim`` to ``n_dim``. + + alpha : float, default=0.95 + The probability that a coefficient is zero (see notes). Larger values + enforce more sparsity. The value should be in the range 0 and 1. + + norm_diag : bool, default=False + Whether to normalize the output matrix to make the leading diagonal + elements all 1. + + smallest_coef : float, default=0.1 + The value of the smallest coefficient between 0 and 1. + + largest_coef : float, default=0.9 + The value of the largest coefficient between 0 and 1. + + sparse_format : str, default=None + String representing the output sparse format, such as 'csc', 'csr', etc. + If ``None``, return a dense numpy ndarray. + + .. versionadded:: 1.4 + + random_state : int, RandomState instance or None, default=None + Determines random number generation for dataset creation. Pass an int + for reproducible output across multiple function calls. + See :term:`Glossary `. + + dim : int, default=1 + The size of the random matrix to generate. + + .. deprecated:: 1.4 + `dim` is deprecated and will be removed in 1.6. + + Returns + ------- + prec : ndarray or sparse matrix of shape (dim, dim) + The generated matrix. If ``sparse_format=None``, this would be an ndarray. + Otherwise, this will be a sparse matrix of the specified format. + + See Also + -------- + make_spd_matrix : Generate a random symmetric, positive-definite matrix. + + Notes + ----- + The sparsity is actually imposed on the cholesky factor of the matrix. + Thus alpha does not translate directly into the filling fraction of + the matrix itself. + + Examples + -------- + >>> from sklearn.datasets import make_sparse_spd_matrix + >>> make_sparse_spd_matrix(n_dim=4, norm_diag=False, random_state=42) + array([[1., 0., 0., 0.], + [0., 1., 0., 0.], + [0., 0., 1., 0.], + [0., 0., 0., 1.]]) + """ + random_state = check_random_state(random_state) + + # TODO(1.6): remove in 1.6 + # Also make sure to change `n_dim` default back to 1 and deprecate None + if n_dim is not None and dim != "deprecated": + raise ValueError( + "`dim` and `n_dim` cannot be both specified. Please use `n_dim` only " + "as `dim` is deprecated in v1.4 and will be removed in v1.6." + ) + + if dim != "deprecated": + warnings.warn( + ( + "dim was deprecated in version 1.4 and will be removed in 1.6." + "Please use ``n_dim`` instead." + ), + FutureWarning, + ) + _n_dim = dim + elif n_dim is None: + _n_dim = 1 + else: + _n_dim = n_dim + + chol = -sp.eye(_n_dim) + aux = sp.random( + m=_n_dim, + n=_n_dim, + density=1 - alpha, + data_rvs=lambda x: random_state.uniform( + low=smallest_coef, high=largest_coef, size=x + ), + random_state=random_state, + ) + # We need to avoid "coo" format because it does not support slicing + aux = sp.tril(aux, k=-1, format="csc") + + # Permute the lines: we don't want to have asymmetries in the final + # SPD matrix + permutation = random_state.permutation(_n_dim) + aux = aux[permutation].T[permutation] + chol += aux + prec = chol.T @ chol + + if norm_diag: + # Form the diagonal vector into a row matrix + d = sp.diags(1.0 / np.sqrt(prec.diagonal())) + prec = d @ prec @ d + + if sparse_format is None: + return prec.toarray() + else: + return prec.asformat(sparse_format) + + +@validate_params( + { + "n_samples": [Interval(Integral, 1, None, closed="left")], + "noise": [Interval(Real, 0, None, closed="left")], + "random_state": ["random_state"], + "hole": ["boolean"], + }, + prefer_skip_nested_validation=True, +) +def make_swiss_roll(n_samples=100, *, noise=0.0, random_state=None, hole=False): + """Generate a swiss roll dataset. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_samples : int, default=100 + The number of sample points on the Swiss Roll. + + noise : float, default=0.0 + The standard deviation of the gaussian noise. + + random_state : int, RandomState instance or None, default=None + Determines random number generation for dataset creation. Pass an int + for reproducible output across multiple function calls. + See :term:`Glossary `. + + hole : bool, default=False + If True generates the swiss roll with hole dataset. + + Returns + ------- + X : ndarray of shape (n_samples, 3) + The points. + + t : ndarray of shape (n_samples,) + The univariate position of the sample according to the main dimension + of the points in the manifold. + + Notes + ----- + The algorithm is from Marsland [1]. + + References + ---------- + .. [1] S. Marsland, "Machine Learning: An Algorithmic Perspective", 2nd edition, + Chapter 6, 2014. + https://homepages.ecs.vuw.ac.nz/~marslast/Code/Ch6/lle.py + """ + generator = check_random_state(random_state) + + if not hole: + t = 1.5 * np.pi * (1 + 2 * generator.uniform(size=n_samples)) + y = 21 * generator.uniform(size=n_samples) + else: + corners = np.array( + [[np.pi * (1.5 + i), j * 7] for i in range(3) for j in range(3)] + ) + corners = np.delete(corners, 4, axis=0) + corner_index = generator.choice(8, n_samples) + parameters = generator.uniform(size=(2, n_samples)) * np.array([[np.pi], [7]]) + t, y = corners[corner_index].T + parameters + + x = t * np.cos(t) + z = t * np.sin(t) + + X = np.vstack((x, y, z)) + X += noise * generator.standard_normal(size=(3, n_samples)) + X = X.T + t = np.squeeze(t) + + return X, t + + +@validate_params( + { + "n_samples": [Interval(Integral, 1, None, closed="left")], + "noise": [Interval(Real, 0, None, closed="left")], + "random_state": ["random_state"], + }, + prefer_skip_nested_validation=True, +) +def make_s_curve(n_samples=100, *, noise=0.0, random_state=None): + """Generate an S curve dataset. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_samples : int, default=100 + The number of sample points on the S curve. + + noise : float, default=0.0 + The standard deviation of the gaussian noise. + + random_state : int, RandomState instance or None, default=None + Determines random number generation for dataset creation. Pass an int + for reproducible output across multiple function calls. + See :term:`Glossary `. + + Returns + ------- + X : ndarray of shape (n_samples, 3) + The points. + + t : ndarray of shape (n_samples,) + The univariate position of the sample according to the main dimension + of the points in the manifold. + """ + generator = check_random_state(random_state) + + t = 3 * np.pi * (generator.uniform(size=(1, n_samples)) - 0.5) + X = np.empty(shape=(n_samples, 3), dtype=np.float64) + X[:, 0] = np.sin(t) + X[:, 1] = 2.0 * generator.uniform(size=n_samples) + X[:, 2] = np.sign(t) * (np.cos(t) - 1) + X += noise * generator.standard_normal(size=(3, n_samples)).T + t = np.squeeze(t) + + return X, t + + +@validate_params( + { + "mean": ["array-like", None], + "cov": [Interval(Real, 0, None, closed="left")], + "n_samples": [Interval(Integral, 1, None, closed="left")], + "n_features": [Interval(Integral, 1, None, closed="left")], + "n_classes": [Interval(Integral, 1, None, closed="left")], + "shuffle": ["boolean"], + "random_state": ["random_state"], + }, + prefer_skip_nested_validation=True, +) +def make_gaussian_quantiles( + *, + mean=None, + cov=1.0, + n_samples=100, + n_features=2, + n_classes=3, + shuffle=True, + random_state=None, +): + r"""Generate isotropic Gaussian and label samples by quantile. + + This classification dataset is constructed by taking a multi-dimensional + standard normal distribution and defining classes separated by nested + concentric multi-dimensional spheres such that roughly equal numbers of + samples are in each class (quantiles of the :math:`\chi^2` distribution). + + For an example of usage, see + :ref:`sphx_glr_auto_examples_datasets_plot_random_dataset.py`. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + mean : array-like of shape (n_features,), default=None + The mean of the multi-dimensional normal distribution. + If None then use the origin (0, 0, ...). + + cov : float, default=1.0 + The covariance matrix will be this value times the unit matrix. This + dataset only produces symmetric normal distributions. + + n_samples : int, default=100 + The total number of points equally divided among classes. + + n_features : int, default=2 + The number of features for each sample. + + n_classes : int, default=3 + The number of classes. + + shuffle : bool, default=True + Shuffle the samples. + + random_state : int, RandomState instance or None, default=None + Determines random number generation for dataset creation. Pass an int + for reproducible output across multiple function calls. + See :term:`Glossary `. + + Returns + ------- + X : ndarray of shape (n_samples, n_features) + The generated samples. + + y : ndarray of shape (n_samples,) + The integer labels for quantile membership of each sample. + + Notes + ----- + The dataset is from Zhu et al [1]. + + References + ---------- + .. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009. + + Examples + -------- + >>> from sklearn.datasets import make_gaussian_quantiles + >>> X, y = make_gaussian_quantiles(random_state=42) + >>> X.shape + (100, 2) + >>> y.shape + (100,) + >>> list(y[:5]) + [2, 0, 1, 0, 2] + """ + if n_samples < n_classes: + raise ValueError("n_samples must be at least n_classes") + + generator = check_random_state(random_state) + + if mean is None: + mean = np.zeros(n_features) + else: + mean = np.array(mean) + + # Build multivariate normal distribution + X = generator.multivariate_normal(mean, cov * np.identity(n_features), (n_samples,)) + + # Sort by distance from origin + idx = np.argsort(np.sum((X - mean[np.newaxis, :]) ** 2, axis=1)) + X = X[idx, :] + + # Label by quantile + step = n_samples // n_classes + + y = np.hstack( + [ + np.repeat(np.arange(n_classes), step), + np.repeat(n_classes - 1, n_samples - step * n_classes), + ] + ) + + if shuffle: + X, y = util_shuffle(X, y, random_state=generator) + + return X, y + + +def _shuffle(data, random_state=None): + generator = check_random_state(random_state) + n_rows, n_cols = data.shape + row_idx = generator.permutation(n_rows) + col_idx = generator.permutation(n_cols) + result = data[row_idx][:, col_idx] + return result, row_idx, col_idx + + +@validate_params( + { + "shape": [tuple], + "n_clusters": [Interval(Integral, 1, None, closed="left")], + "noise": [Interval(Real, 0, None, closed="left")], + "minval": [Interval(Real, None, None, closed="neither")], + "maxval": [Interval(Real, None, None, closed="neither")], + "shuffle": ["boolean"], + "random_state": ["random_state"], + }, + prefer_skip_nested_validation=True, +) +def make_biclusters( + shape, + n_clusters, + *, + noise=0.0, + minval=10, + maxval=100, + shuffle=True, + random_state=None, +): + """Generate a constant block diagonal structure array for biclustering. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + shape : tuple of shape (n_rows, n_cols) + The shape of the result. + + n_clusters : int + The number of biclusters. + + noise : float, default=0.0 + The standard deviation of the gaussian noise. + + minval : float, default=10 + Minimum value of a bicluster. + + maxval : float, default=100 + Maximum value of a bicluster. + + shuffle : bool, default=True + Shuffle the samples. + + random_state : int, RandomState instance or None, default=None + Determines random number generation for dataset creation. Pass an int + for reproducible output across multiple function calls. + See :term:`Glossary `. + + Returns + ------- + X : ndarray of shape `shape` + The generated array. + + rows : ndarray of shape (n_clusters, X.shape[0]) + The indicators for cluster membership of each row. + + cols : ndarray of shape (n_clusters, X.shape[1]) + The indicators for cluster membership of each column. + + See Also + -------- + make_checkerboard: Generate an array with block checkerboard structure for + biclustering. + + References + ---------- + + .. [1] Dhillon, I. S. (2001, August). Co-clustering documents and + words using bipartite spectral graph partitioning. In Proceedings + of the seventh ACM SIGKDD international conference on Knowledge + discovery and data mining (pp. 269-274). ACM. + """ + generator = check_random_state(random_state) + n_rows, n_cols = shape + consts = generator.uniform(minval, maxval, n_clusters) + + # row and column clusters of approximately equal sizes + row_sizes = generator.multinomial(n_rows, np.repeat(1.0 / n_clusters, n_clusters)) + col_sizes = generator.multinomial(n_cols, np.repeat(1.0 / n_clusters, n_clusters)) + + row_labels = np.hstack( + [np.repeat(val, rep) for val, rep in zip(range(n_clusters), row_sizes)] + ) + col_labels = np.hstack( + [np.repeat(val, rep) for val, rep in zip(range(n_clusters), col_sizes)] + ) + + result = np.zeros(shape, dtype=np.float64) + for i in range(n_clusters): + selector = np.outer(row_labels == i, col_labels == i) + result[selector] += consts[i] + + if noise > 0: + result += generator.normal(scale=noise, size=result.shape) + + if shuffle: + result, row_idx, col_idx = _shuffle(result, random_state) + row_labels = row_labels[row_idx] + col_labels = col_labels[col_idx] + + rows = np.vstack([row_labels == c for c in range(n_clusters)]) + cols = np.vstack([col_labels == c for c in range(n_clusters)]) + + return result, rows, cols + + +@validate_params( + { + "shape": [tuple], + "n_clusters": [Interval(Integral, 1, None, closed="left"), "array-like"], + "noise": [Interval(Real, 0, None, closed="left")], + "minval": [Interval(Real, None, None, closed="neither")], + "maxval": [Interval(Real, None, None, closed="neither")], + "shuffle": ["boolean"], + "random_state": ["random_state"], + }, + prefer_skip_nested_validation=True, +) +def make_checkerboard( + shape, + n_clusters, + *, + noise=0.0, + minval=10, + maxval=100, + shuffle=True, + random_state=None, +): + """Generate an array with block checkerboard structure for biclustering. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + shape : tuple of shape (n_rows, n_cols) + The shape of the result. + + n_clusters : int or array-like or shape (n_row_clusters, n_column_clusters) + The number of row and column clusters. + + noise : float, default=0.0 + The standard deviation of the gaussian noise. + + minval : float, default=10 + Minimum value of a bicluster. + + maxval : float, default=100 + Maximum value of a bicluster. + + shuffle : bool, default=True + Shuffle the samples. + + random_state : int, RandomState instance or None, default=None + Determines random number generation for dataset creation. Pass an int + for reproducible output across multiple function calls. + See :term:`Glossary `. + + Returns + ------- + X : ndarray of shape `shape` + The generated array. + + rows : ndarray of shape (n_clusters, X.shape[0]) + The indicators for cluster membership of each row. + + cols : ndarray of shape (n_clusters, X.shape[1]) + The indicators for cluster membership of each column. + + See Also + -------- + make_biclusters : Generate an array with constant block diagonal structure + for biclustering. + + References + ---------- + .. [1] Kluger, Y., Basri, R., Chang, J. T., & Gerstein, M. (2003). + Spectral biclustering of microarray data: coclustering genes + and conditions. Genome research, 13(4), 703-716. + """ + generator = check_random_state(random_state) + + if hasattr(n_clusters, "__len__"): + n_row_clusters, n_col_clusters = n_clusters + else: + n_row_clusters = n_col_clusters = n_clusters + + # row and column clusters of approximately equal sizes + n_rows, n_cols = shape + row_sizes = generator.multinomial( + n_rows, np.repeat(1.0 / n_row_clusters, n_row_clusters) + ) + col_sizes = generator.multinomial( + n_cols, np.repeat(1.0 / n_col_clusters, n_col_clusters) + ) + + row_labels = np.hstack( + [np.repeat(val, rep) for val, rep in zip(range(n_row_clusters), row_sizes)] + ) + col_labels = np.hstack( + [np.repeat(val, rep) for val, rep in zip(range(n_col_clusters), col_sizes)] + ) + + result = np.zeros(shape, dtype=np.float64) + for i in range(n_row_clusters): + for j in range(n_col_clusters): + selector = np.outer(row_labels == i, col_labels == j) + result[selector] += generator.uniform(minval, maxval) + + if noise > 0: + result += generator.normal(scale=noise, size=result.shape) + + if shuffle: + result, row_idx, col_idx = _shuffle(result, random_state) + row_labels = row_labels[row_idx] + col_labels = col_labels[col_idx] + + rows = np.vstack( + [ + row_labels == label + for label in range(n_row_clusters) + for _ in range(n_col_clusters) + ] + ) + cols = np.vstack( + [ + col_labels == label + for _ in range(n_row_clusters) + for label in range(n_col_clusters) + ] + ) + + return result, rows, cols diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/_species_distributions.py b/venv/lib/python3.10/site-packages/sklearn/datasets/_species_distributions.py new file mode 100644 index 0000000000000000000000000000000000000000..7979604afab0e8aa8b64b8a65daa32a9ec2438b3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/datasets/_species_distributions.py @@ -0,0 +1,273 @@ +""" +============================= +Species distribution dataset +============================= + +This dataset represents the geographic distribution of species. +The dataset is provided by Phillips et. al. (2006). + +The two species are: + + - `"Bradypus variegatus" + `_ , + the Brown-throated Sloth. + + - `"Microryzomys minutus" + `_ , + also known as the Forest Small Rice Rat, a rodent that lives in Peru, + Colombia, Ecuador, Peru, and Venezuela. + +References +---------- + +`"Maximum entropy modeling of species geographic distributions" +`_ S. J. Phillips, +R. P. Anderson, R. E. Schapire - Ecological Modelling, 190:231-259, 2006. + +Notes +----- + +For an example of using this dataset, see +:ref:`examples/applications/plot_species_distribution_modeling.py +`. +""" + +# Authors: Peter Prettenhofer +# Jake Vanderplas +# +# License: BSD 3 clause + +import logging +from io import BytesIO +from os import PathLike, makedirs, remove +from os.path import exists + +import joblib +import numpy as np + +from ..utils import Bunch +from ..utils._param_validation import validate_params +from . import get_data_home +from ._base import RemoteFileMetadata, _fetch_remote, _pkl_filepath + +# The original data can be found at: +# https://biodiversityinformatics.amnh.org/open_source/maxent/samples.zip +SAMPLES = RemoteFileMetadata( + filename="samples.zip", + url="https://ndownloader.figshare.com/files/5976075", + checksum="abb07ad284ac50d9e6d20f1c4211e0fd3c098f7f85955e89d321ee8efe37ac28", +) + +# The original data can be found at: +# https://biodiversityinformatics.amnh.org/open_source/maxent/coverages.zip +COVERAGES = RemoteFileMetadata( + filename="coverages.zip", + url="https://ndownloader.figshare.com/files/5976078", + checksum="4d862674d72e79d6cee77e63b98651ec7926043ba7d39dcb31329cf3f6073807", +) + +DATA_ARCHIVE_NAME = "species_coverage.pkz" + + +logger = logging.getLogger(__name__) + + +def _load_coverage(F, header_length=6, dtype=np.int16): + """Load a coverage file from an open file object. + + This will return a numpy array of the given dtype + """ + header = [F.readline() for _ in range(header_length)] + make_tuple = lambda t: (t.split()[0], float(t.split()[1])) + header = dict([make_tuple(line) for line in header]) + + M = np.loadtxt(F, dtype=dtype) + nodata = int(header[b"NODATA_value"]) + if nodata != -9999: + M[nodata] = -9999 + return M + + +def _load_csv(F): + """Load csv file. + + Parameters + ---------- + F : file object + CSV file open in byte mode. + + Returns + ------- + rec : np.ndarray + record array representing the data + """ + names = F.readline().decode("ascii").strip().split(",") + + rec = np.loadtxt(F, skiprows=0, delimiter=",", dtype="S22,f4,f4") + rec.dtype.names = names + return rec + + +def construct_grids(batch): + """Construct the map grid from the batch object + + Parameters + ---------- + batch : Batch object + The object returned by :func:`fetch_species_distributions` + + Returns + ------- + (xgrid, ygrid) : 1-D arrays + The grid corresponding to the values in batch.coverages + """ + # x,y coordinates for corner cells + xmin = batch.x_left_lower_corner + batch.grid_size + xmax = xmin + (batch.Nx * batch.grid_size) + ymin = batch.y_left_lower_corner + batch.grid_size + ymax = ymin + (batch.Ny * batch.grid_size) + + # x coordinates of the grid cells + xgrid = np.arange(xmin, xmax, batch.grid_size) + # y coordinates of the grid cells + ygrid = np.arange(ymin, ymax, batch.grid_size) + + return (xgrid, ygrid) + + +@validate_params( + {"data_home": [str, PathLike, None], "download_if_missing": ["boolean"]}, + prefer_skip_nested_validation=True, +) +def fetch_species_distributions(*, data_home=None, download_if_missing=True): + """Loader for species distribution dataset from Phillips et. al. (2006). + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + data_home : str or path-like, default=None + Specify another download and cache folder for the datasets. By default + all scikit-learn data is stored in '~/scikit_learn_data' subfolders. + + download_if_missing : bool, default=True + If False, raise an OSError if the data is not locally available + instead of trying to download the data from the source site. + + Returns + ------- + data : :class:`~sklearn.utils.Bunch` + Dictionary-like object, with the following attributes. + + coverages : array, shape = [14, 1592, 1212] + These represent the 14 features measured + at each point of the map grid. + The latitude/longitude values for the grid are discussed below. + Missing data is represented by the value -9999. + train : record array, shape = (1624,) + The training points for the data. Each point has three fields: + + - train['species'] is the species name + - train['dd long'] is the longitude, in degrees + - train['dd lat'] is the latitude, in degrees + test : record array, shape = (620,) + The test points for the data. Same format as the training data. + Nx, Ny : integers + The number of longitudes (x) and latitudes (y) in the grid + x_left_lower_corner, y_left_lower_corner : floats + The (x,y) position of the lower-left corner, in degrees + grid_size : float + The spacing between points of the grid, in degrees + + Notes + ----- + + This dataset represents the geographic distribution of species. + The dataset is provided by Phillips et. al. (2006). + + The two species are: + + - `"Bradypus variegatus" + `_ , + the Brown-throated Sloth. + + - `"Microryzomys minutus" + `_ , + also known as the Forest Small Rice Rat, a rodent that lives in Peru, + Colombia, Ecuador, Peru, and Venezuela. + + - For an example of using this dataset with scikit-learn, see + :ref:`examples/applications/plot_species_distribution_modeling.py + `. + + References + ---------- + + * `"Maximum entropy modeling of species geographic distributions" + `_ + S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling, + 190:231-259, 2006. + + Examples + -------- + >>> from sklearn.datasets import fetch_species_distributions + >>> species = fetch_species_distributions() + >>> species.train[:5] + array([(b'microryzomys_minutus', -64.7 , -17.85 ), + (b'microryzomys_minutus', -67.8333, -16.3333), + (b'microryzomys_minutus', -67.8833, -16.3 ), + (b'microryzomys_minutus', -67.8 , -16.2667), + (b'microryzomys_minutus', -67.9833, -15.9 )], + dtype=[('species', 'S22'), ('dd long', ' +# Lars Buitinck +# Olivier Grisel +# License: BSD 3 clause + +import os.path +from contextlib import closing +from numbers import Integral + +import numpy as np +import scipy.sparse as sp + +from .. import __version__ +from ..utils import IS_PYPY, check_array +from ..utils._param_validation import HasMethods, Interval, StrOptions, validate_params + +if not IS_PYPY: + from ._svmlight_format_fast import ( + _dump_svmlight_file, + _load_svmlight_file, + ) +else: + + def _load_svmlight_file(*args, **kwargs): + raise NotImplementedError( + "load_svmlight_file is currently not " + "compatible with PyPy (see " + "https://github.com/scikit-learn/scikit-learn/issues/11543 " + "for the status updates)." + ) + + +@validate_params( + { + "f": [ + str, + Interval(Integral, 0, None, closed="left"), + os.PathLike, + HasMethods("read"), + ], + "n_features": [Interval(Integral, 1, None, closed="left"), None], + "dtype": "no_validation", # delegate validation to numpy + "multilabel": ["boolean"], + "zero_based": ["boolean", StrOptions({"auto"})], + "query_id": ["boolean"], + "offset": [Interval(Integral, 0, None, closed="left")], + "length": [Integral], + }, + prefer_skip_nested_validation=True, +) +def load_svmlight_file( + f, + *, + n_features=None, + dtype=np.float64, + multilabel=False, + zero_based="auto", + query_id=False, + offset=0, + length=-1, +): + """Load datasets in the svmlight / libsvm format into sparse CSR matrix. + + This format is a text-based format, with one sample per line. It does + not store zero valued features hence is suitable for sparse dataset. + + The first element of each line can be used to store a target variable + to predict. + + This format is used as the default format for both svmlight and the + libsvm command line programs. + + Parsing a text based source can be expensive. When repeatedly + working on the same dataset, it is recommended to wrap this + loader with joblib.Memory.cache to store a memmapped backup of the + CSR results of the first call and benefit from the near instantaneous + loading of memmapped structures for the subsequent calls. + + In case the file contains a pairwise preference constraint (known + as "qid" in the svmlight format) these are ignored unless the + query_id parameter is set to True. These pairwise preference + constraints can be used to constraint the combination of samples + when using pairwise loss functions (as is the case in some + learning to rank problems) so that only pairs with the same + query_id value are considered. + + This implementation is written in Cython and is reasonably fast. + However, a faster API-compatible loader is also available at: + + https://github.com/mblondel/svmlight-loader + + Parameters + ---------- + f : str, path-like, file-like or int + (Path to) a file to load. If a path ends in ".gz" or ".bz2", it will + be uncompressed on the fly. If an integer is passed, it is assumed to + be a file descriptor. A file-like or file descriptor will not be closed + by this function. A file-like object must be opened in binary mode. + + .. versionchanged:: 1.2 + Path-like objects are now accepted. + + n_features : int, default=None + The number of features to use. If None, it will be inferred. This + argument is useful to load several files that are subsets of a + bigger sliced dataset: each subset might not have examples of + every feature, hence the inferred shape might vary from one + slice to another. + n_features is only required if ``offset`` or ``length`` are passed a + non-default value. + + dtype : numpy data type, default=np.float64 + Data type of dataset to be loaded. This will be the data type of the + output numpy arrays ``X`` and ``y``. + + multilabel : bool, default=False + Samples may have several labels each (see + https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html). + + zero_based : bool or "auto", default="auto" + Whether column indices in f are zero-based (True) or one-based + (False). If column indices are one-based, they are transformed to + zero-based to match Python/NumPy conventions. + If set to "auto", a heuristic check is applied to determine this from + the file contents. Both kinds of files occur "in the wild", but they + are unfortunately not self-identifying. Using "auto" or True should + always be safe when no ``offset`` or ``length`` is passed. + If ``offset`` or ``length`` are passed, the "auto" mode falls back + to ``zero_based=True`` to avoid having the heuristic check yield + inconsistent results on different segments of the file. + + query_id : bool, default=False + If True, will return the query_id array for each file. + + offset : int, default=0 + Ignore the offset first bytes by seeking forward, then + discarding the following bytes up until the next new line + character. + + length : int, default=-1 + If strictly positive, stop reading any new line of data once the + position in the file has reached the (offset + length) bytes threshold. + + Returns + ------- + X : scipy.sparse matrix of shape (n_samples, n_features) + The data matrix. + + y : ndarray of shape (n_samples,), or a list of tuples of length n_samples + The target. It is a list of tuples when ``multilabel=True``, else a + ndarray. + + query_id : array of shape (n_samples,) + The query_id for each sample. Only returned when query_id is set to + True. + + See Also + -------- + load_svmlight_files : Similar function for loading multiple files in this + format, enforcing the same number of features/columns on all of them. + + Examples + -------- + To use joblib.Memory to cache the svmlight file:: + + from joblib import Memory + from .datasets import load_svmlight_file + mem = Memory("./mycache") + + @mem.cache + def get_data(): + data = load_svmlight_file("mysvmlightfile") + return data[0], data[1] + + X, y = get_data() + """ + return tuple( + load_svmlight_files( + [f], + n_features=n_features, + dtype=dtype, + multilabel=multilabel, + zero_based=zero_based, + query_id=query_id, + offset=offset, + length=length, + ) + ) + + +def _gen_open(f): + if isinstance(f, int): # file descriptor + return open(f, "rb", closefd=False) + elif isinstance(f, os.PathLike): + f = os.fspath(f) + elif not isinstance(f, str): + raise TypeError("expected {str, int, path-like, file-like}, got %s" % type(f)) + + _, ext = os.path.splitext(f) + if ext == ".gz": + import gzip + + return gzip.open(f, "rb") + elif ext == ".bz2": + from bz2 import BZ2File + + return BZ2File(f, "rb") + else: + return open(f, "rb") + + +def _open_and_load(f, dtype, multilabel, zero_based, query_id, offset=0, length=-1): + if hasattr(f, "read"): + actual_dtype, data, ind, indptr, labels, query = _load_svmlight_file( + f, dtype, multilabel, zero_based, query_id, offset, length + ) + else: + with closing(_gen_open(f)) as f: + actual_dtype, data, ind, indptr, labels, query = _load_svmlight_file( + f, dtype, multilabel, zero_based, query_id, offset, length + ) + + # convert from array.array, give data the right dtype + if not multilabel: + labels = np.frombuffer(labels, np.float64) + data = np.frombuffer(data, actual_dtype) + indices = np.frombuffer(ind, np.longlong) + indptr = np.frombuffer(indptr, dtype=np.longlong) # never empty + query = np.frombuffer(query, np.int64) + + data = np.asarray(data, dtype=dtype) # no-op for float{32,64} + return data, indices, indptr, labels, query + + +@validate_params( + { + "files": [ + "array-like", + str, + os.PathLike, + HasMethods("read"), + Interval(Integral, 0, None, closed="left"), + ], + "n_features": [Interval(Integral, 1, None, closed="left"), None], + "dtype": "no_validation", # delegate validation to numpy + "multilabel": ["boolean"], + "zero_based": ["boolean", StrOptions({"auto"})], + "query_id": ["boolean"], + "offset": [Interval(Integral, 0, None, closed="left")], + "length": [Integral], + }, + prefer_skip_nested_validation=True, +) +def load_svmlight_files( + files, + *, + n_features=None, + dtype=np.float64, + multilabel=False, + zero_based="auto", + query_id=False, + offset=0, + length=-1, +): + """Load dataset from multiple files in SVMlight format. + + This function is equivalent to mapping load_svmlight_file over a list of + files, except that the results are concatenated into a single, flat list + and the samples vectors are constrained to all have the same number of + features. + + In case the file contains a pairwise preference constraint (known + as "qid" in the svmlight format) these are ignored unless the + query_id parameter is set to True. These pairwise preference + constraints can be used to constraint the combination of samples + when using pairwise loss functions (as is the case in some + learning to rank problems) so that only pairs with the same + query_id value are considered. + + Parameters + ---------- + files : array-like, dtype=str, path-like, file-like or int + (Paths of) files to load. If a path ends in ".gz" or ".bz2", it will + be uncompressed on the fly. If an integer is passed, it is assumed to + be a file descriptor. File-likes and file descriptors will not be + closed by this function. File-like objects must be opened in binary + mode. + + .. versionchanged:: 1.2 + Path-like objects are now accepted. + + n_features : int, default=None + The number of features to use. If None, it will be inferred from the + maximum column index occurring in any of the files. + + This can be set to a higher value than the actual number of features + in any of the input files, but setting it to a lower value will cause + an exception to be raised. + + dtype : numpy data type, default=np.float64 + Data type of dataset to be loaded. This will be the data type of the + output numpy arrays ``X`` and ``y``. + + multilabel : bool, default=False + Samples may have several labels each (see + https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html). + + zero_based : bool or "auto", default="auto" + Whether column indices in f are zero-based (True) or one-based + (False). If column indices are one-based, they are transformed to + zero-based to match Python/NumPy conventions. + If set to "auto", a heuristic check is applied to determine this from + the file contents. Both kinds of files occur "in the wild", but they + are unfortunately not self-identifying. Using "auto" or True should + always be safe when no offset or length is passed. + If offset or length are passed, the "auto" mode falls back + to zero_based=True to avoid having the heuristic check yield + inconsistent results on different segments of the file. + + query_id : bool, default=False + If True, will return the query_id array for each file. + + offset : int, default=0 + Ignore the offset first bytes by seeking forward, then + discarding the following bytes up until the next new line + character. + + length : int, default=-1 + If strictly positive, stop reading any new line of data once the + position in the file has reached the (offset + length) bytes threshold. + + Returns + ------- + [X1, y1, ..., Xn, yn] or [X1, y1, q1, ..., Xn, yn, qn]: list of arrays + Each (Xi, yi) pair is the result from load_svmlight_file(files[i]). + If query_id is set to True, this will return instead (Xi, yi, qi) + triplets. + + See Also + -------- + load_svmlight_file: Similar function for loading a single file in this + format. + + Notes + ----- + When fitting a model to a matrix X_train and evaluating it against a + matrix X_test, it is essential that X_train and X_test have the same + number of features (X_train.shape[1] == X_test.shape[1]). This may not + be the case if you load the files individually with load_svmlight_file. + """ + if (offset != 0 or length > 0) and zero_based == "auto": + # disable heuristic search to avoid getting inconsistent results on + # different segments of the file + zero_based = True + + if (offset != 0 or length > 0) and n_features is None: + raise ValueError("n_features is required when offset or length is specified.") + + r = [ + _open_and_load( + f, + dtype, + multilabel, + bool(zero_based), + bool(query_id), + offset=offset, + length=length, + ) + for f in files + ] + + if ( + zero_based is False + or zero_based == "auto" + and all(len(tmp[1]) and np.min(tmp[1]) > 0 for tmp in r) + ): + for _, indices, _, _, _ in r: + indices -= 1 + + n_f = max(ind[1].max() if len(ind[1]) else 0 for ind in r) + 1 + + if n_features is None: + n_features = n_f + elif n_features < n_f: + raise ValueError( + "n_features was set to {}, but input file contains {} features".format( + n_features, n_f + ) + ) + + result = [] + for data, indices, indptr, y, query_values in r: + shape = (indptr.shape[0] - 1, n_features) + X = sp.csr_matrix((data, indices, indptr), shape) + X.sort_indices() + result += X, y + if query_id: + result.append(query_values) + + return result + + +def _dump_svmlight(X, y, f, multilabel, one_based, comment, query_id): + if comment: + f.write( + ( + "# Generated by dump_svmlight_file from scikit-learn %s\n" % __version__ + ).encode() + ) + f.write( + ("# Column indices are %s-based\n" % ["zero", "one"][one_based]).encode() + ) + + f.write(b"#\n") + f.writelines(b"# %s\n" % line for line in comment.splitlines()) + X_is_sp = sp.issparse(X) + y_is_sp = sp.issparse(y) + if not multilabel and not y_is_sp: + y = y[:, np.newaxis] + _dump_svmlight_file( + X, + y, + f, + multilabel, + one_based, + query_id, + X_is_sp, + y_is_sp, + ) + + +@validate_params( + { + "X": ["array-like", "sparse matrix"], + "y": ["array-like", "sparse matrix"], + "f": [str, HasMethods(["write"])], + "zero_based": ["boolean"], + "comment": [str, bytes, None], + "query_id": ["array-like", None], + "multilabel": ["boolean"], + }, + prefer_skip_nested_validation=True, +) +def dump_svmlight_file( + X, + y, + f, + *, + zero_based=True, + comment=None, + query_id=None, + multilabel=False, +): + """Dump the dataset in svmlight / libsvm file format. + + This format is a text-based format, with one sample per line. It does + not store zero valued features hence is suitable for sparse dataset. + + The first element of each line can be used to store a target variable + to predict. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training vectors, where `n_samples` is the number of samples and + `n_features` is the number of features. + + y : {array-like, sparse matrix}, shape = (n_samples,) or (n_samples, n_labels) + Target values. Class labels must be an + integer or float, or array-like objects of integer or float for + multilabel classifications. + + f : str or file-like in binary mode + If string, specifies the path that will contain the data. + If file-like, data will be written to f. f should be opened in binary + mode. + + zero_based : bool, default=True + Whether column indices should be written zero-based (True) or one-based + (False). + + comment : str or bytes, default=None + Comment to insert at the top of the file. This should be either a + Unicode string, which will be encoded as UTF-8, or an ASCII byte + string. + If a comment is given, then it will be preceded by one that identifies + the file as having been dumped by scikit-learn. Note that not all + tools grok comments in SVMlight files. + + query_id : array-like of shape (n_samples,), default=None + Array containing pairwise preference constraints (qid in svmlight + format). + + multilabel : bool, default=False + Samples may have several labels each (see + https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html). + + .. versionadded:: 0.17 + parameter `multilabel` to support multilabel datasets. + + Examples + -------- + >>> from sklearn.datasets import dump_svmlight_file, make_classification + >>> X, y = make_classification(random_state=0) + >>> output_file = "my_dataset.svmlight" + >>> dump_svmlight_file(X, y, output_file) # doctest: +SKIP + """ + if comment is not None: + # Convert comment string to list of lines in UTF-8. + # If a byte string is passed, then check whether it's ASCII; + # if a user wants to get fancy, they'll have to decode themselves. + if isinstance(comment, bytes): + comment.decode("ascii") # just for the exception + else: + comment = comment.encode("utf-8") + if b"\0" in comment: + raise ValueError("comment string contains NUL byte") + + yval = check_array(y, accept_sparse="csr", ensure_2d=False) + if sp.issparse(yval): + if yval.shape[1] != 1 and not multilabel: + raise ValueError( + "expected y of shape (n_samples, 1), got %r" % (yval.shape,) + ) + else: + if yval.ndim != 1 and not multilabel: + raise ValueError("expected y of shape (n_samples,), got %r" % (yval.shape,)) + + Xval = check_array(X, accept_sparse="csr") + if Xval.shape[0] != yval.shape[0]: + raise ValueError( + "X.shape[0] and y.shape[0] should be the same, got %r and %r instead." + % (Xval.shape[0], yval.shape[0]) + ) + + # We had some issues with CSR matrices with unsorted indices (e.g. #1501), + # so sort them here, but first make sure we don't modify the user's X. + # TODO We can do this cheaper; sorted_indices copies the whole matrix. + if yval is y and hasattr(yval, "sorted_indices"): + y = yval.sorted_indices() + else: + y = yval + if hasattr(y, "sort_indices"): + y.sort_indices() + + if Xval is X and hasattr(Xval, "sorted_indices"): + X = Xval.sorted_indices() + else: + X = Xval + if hasattr(X, "sort_indices"): + X.sort_indices() + + if query_id is None: + # NOTE: query_id is passed to Cython functions using a fused type on query_id. + # Yet as of Cython>=3.0, memory views can't be None otherwise the runtime + # would not known which concrete implementation to dispatch the Python call to. + # TODO: simplify interfaces and implementations in _svmlight_format_fast.pyx. + query_id = np.array([], dtype=np.int32) + else: + query_id = np.asarray(query_id) + if query_id.shape[0] != y.shape[0]: + raise ValueError( + "expected query_id of shape (n_samples,), got %r" % (query_id.shape,) + ) + + one_based = not zero_based + + if hasattr(f, "write"): + _dump_svmlight(X, y, f, multilabel, one_based, comment, query_id) + else: + with open(f, "wb") as f: + _dump_svmlight(X, y, f, multilabel, one_based, comment, query_id) diff --git a/venv/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bded3324785404e543078667da6a8e857d14c44d Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_dict_learning.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_dict_learning.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5fb2c9356d8955ada5ced0eecf395173a0ee5285 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_dict_learning.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_factor_analysis.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_factor_analysis.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a194904cd486a250ceb2dad9fa75194e35cc8982 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_factor_analysis.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_incremental_pca.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_incremental_pca.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..03c3dcc92ddac110637e33acd3fdd2423bef592e Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_incremental_pca.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_kernel_pca.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_kernel_pca.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..378133d8b1195ad2adfcb9497ae7b59664d22959 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_kernel_pca.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_lda.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_lda.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4dcf0ca12482855f7a73fd47f76d51d422c25c5f Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_lda.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_pca.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_pca.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bf814d8d8f3d602166e06428d81cb51520c12fe5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_pca.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_sparse_pca.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_sparse_pca.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cd53cd036cb52213b175d31f0eff43357e106c30 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_sparse_pca.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/decomposition/tests/test_dict_learning.py b/venv/lib/python3.10/site-packages/sklearn/decomposition/tests/test_dict_learning.py new file mode 100644 index 0000000000000000000000000000000000000000..b79df4db8cd74a70452ca7212b36b7ddc305caa3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/decomposition/tests/test_dict_learning.py @@ -0,0 +1,983 @@ +import itertools +import warnings +from functools import partial + +import numpy as np +import pytest + +import sklearn +from sklearn.base import clone +from sklearn.decomposition import ( + DictionaryLearning, + MiniBatchDictionaryLearning, + SparseCoder, + dict_learning, + dict_learning_online, + sparse_encode, +) +from sklearn.decomposition._dict_learning import _update_dict +from sklearn.exceptions import ConvergenceWarning +from sklearn.utils import check_array +from sklearn.utils._testing import ( + TempMemmap, + assert_allclose, + assert_array_almost_equal, + assert_array_equal, + ignore_warnings, +) +from sklearn.utils.estimator_checks import ( + check_transformer_data_not_an_array, + check_transformer_general, + check_transformers_unfitted, +) +from sklearn.utils.parallel import Parallel + +rng_global = np.random.RandomState(0) +n_samples, n_features = 10, 8 +X = rng_global.randn(n_samples, n_features) + + +def test_sparse_encode_shapes_omp(): + rng = np.random.RandomState(0) + algorithms = ["omp", "lasso_lars", "lasso_cd", "lars", "threshold"] + for n_components, n_samples in itertools.product([1, 5], [1, 9]): + X_ = rng.randn(n_samples, n_features) + dictionary = rng.randn(n_components, n_features) + for algorithm, n_jobs in itertools.product(algorithms, [1, 2]): + code = sparse_encode(X_, dictionary, algorithm=algorithm, n_jobs=n_jobs) + assert code.shape == (n_samples, n_components) + + +def test_dict_learning_shapes(): + n_components = 5 + dico = DictionaryLearning(n_components, random_state=0).fit(X) + assert dico.components_.shape == (n_components, n_features) + + n_components = 1 + dico = DictionaryLearning(n_components, random_state=0).fit(X) + assert dico.components_.shape == (n_components, n_features) + assert dico.transform(X).shape == (X.shape[0], n_components) + + +def test_dict_learning_overcomplete(): + n_components = 12 + dico = DictionaryLearning(n_components, random_state=0).fit(X) + assert dico.components_.shape == (n_components, n_features) + + +def test_max_iter(): + def ricker_function(resolution, center, width): + """Discrete sub-sampled Ricker (Mexican hat) wavelet""" + x = np.linspace(0, resolution - 1, resolution) + x = ( + (2 / (np.sqrt(3 * width) * np.pi**0.25)) + * (1 - (x - center) ** 2 / width**2) + * np.exp(-((x - center) ** 2) / (2 * width**2)) + ) + return x + + def ricker_matrix(width, resolution, n_components): + """Dictionary of Ricker (Mexican hat) wavelets""" + centers = np.linspace(0, resolution - 1, n_components) + D = np.empty((n_components, resolution)) + for i, center in enumerate(centers): + D[i] = ricker_function(resolution, center, width) + D /= np.sqrt(np.sum(D**2, axis=1))[:, np.newaxis] + return D + + transform_algorithm = "lasso_cd" + resolution = 1024 + subsampling = 3 # subsampling factor + n_components = resolution // subsampling + + # Compute a wavelet dictionary + D_multi = np.r_[ + tuple( + ricker_matrix( + width=w, resolution=resolution, n_components=n_components // 5 + ) + for w in (10, 50, 100, 500, 1000) + ) + ] + + X = np.linspace(0, resolution - 1, resolution) + first_quarter = X < resolution / 4 + X[first_quarter] = 3.0 + X[np.logical_not(first_quarter)] = -1.0 + X = X.reshape(1, -1) + + # check that the underlying model fails to converge + with pytest.warns(ConvergenceWarning): + model = SparseCoder( + D_multi, transform_algorithm=transform_algorithm, transform_max_iter=1 + ) + model.fit_transform(X) + + # check that the underlying model converges w/o warnings + with warnings.catch_warnings(): + warnings.simplefilter("error", ConvergenceWarning) + model = SparseCoder( + D_multi, transform_algorithm=transform_algorithm, transform_max_iter=2000 + ) + model.fit_transform(X) + + +def test_dict_learning_lars_positive_parameter(): + n_components = 5 + alpha = 1 + err_msg = "Positive constraint not supported for 'lars' coding method." + with pytest.raises(ValueError, match=err_msg): + dict_learning(X, n_components, alpha=alpha, positive_code=True) + + +@pytest.mark.parametrize( + "transform_algorithm", + [ + "lasso_lars", + "lasso_cd", + "threshold", + ], +) +@pytest.mark.parametrize("positive_code", [False, True]) +@pytest.mark.parametrize("positive_dict", [False, True]) +def test_dict_learning_positivity(transform_algorithm, positive_code, positive_dict): + n_components = 5 + dico = DictionaryLearning( + n_components, + transform_algorithm=transform_algorithm, + random_state=0, + positive_code=positive_code, + positive_dict=positive_dict, + fit_algorithm="cd", + ).fit(X) + + code = dico.transform(X) + if positive_dict: + assert (dico.components_ >= 0).all() + else: + assert (dico.components_ < 0).any() + if positive_code: + assert (code >= 0).all() + else: + assert (code < 0).any() + + +@pytest.mark.parametrize("positive_dict", [False, True]) +def test_dict_learning_lars_dict_positivity(positive_dict): + n_components = 5 + dico = DictionaryLearning( + n_components, + transform_algorithm="lars", + random_state=0, + positive_dict=positive_dict, + fit_algorithm="cd", + ).fit(X) + + if positive_dict: + assert (dico.components_ >= 0).all() + else: + assert (dico.components_ < 0).any() + + +def test_dict_learning_lars_code_positivity(): + n_components = 5 + dico = DictionaryLearning( + n_components, + transform_algorithm="lars", + random_state=0, + positive_code=True, + fit_algorithm="cd", + ).fit(X) + + err_msg = "Positive constraint not supported for '{}' coding method." + err_msg = err_msg.format("lars") + with pytest.raises(ValueError, match=err_msg): + dico.transform(X) + + +def test_dict_learning_reconstruction(): + n_components = 12 + dico = DictionaryLearning( + n_components, transform_algorithm="omp", transform_alpha=0.001, random_state=0 + ) + code = dico.fit(X).transform(X) + assert_array_almost_equal(np.dot(code, dico.components_), X) + + dico.set_params(transform_algorithm="lasso_lars") + code = dico.transform(X) + assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2) + + # used to test lars here too, but there's no guarantee the number of + # nonzero atoms is right. + + +def test_dict_learning_reconstruction_parallel(): + # regression test that parallel reconstruction works with n_jobs>1 + n_components = 12 + dico = DictionaryLearning( + n_components, + transform_algorithm="omp", + transform_alpha=0.001, + random_state=0, + n_jobs=4, + ) + code = dico.fit(X).transform(X) + assert_array_almost_equal(np.dot(code, dico.components_), X) + + dico.set_params(transform_algorithm="lasso_lars") + code = dico.transform(X) + assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2) + + +def test_dict_learning_lassocd_readonly_data(): + n_components = 12 + with TempMemmap(X) as X_read_only: + dico = DictionaryLearning( + n_components, + transform_algorithm="lasso_cd", + transform_alpha=0.001, + random_state=0, + n_jobs=4, + ) + with ignore_warnings(category=ConvergenceWarning): + code = dico.fit(X_read_only).transform(X_read_only) + assert_array_almost_equal( + np.dot(code, dico.components_), X_read_only, decimal=2 + ) + + +def test_dict_learning_nonzero_coefs(): + n_components = 4 + dico = DictionaryLearning( + n_components, + transform_algorithm="lars", + transform_n_nonzero_coefs=3, + random_state=0, + ) + code = dico.fit(X).transform(X[np.newaxis, 1]) + assert len(np.flatnonzero(code)) == 3 + + dico.set_params(transform_algorithm="omp") + code = dico.transform(X[np.newaxis, 1]) + assert len(np.flatnonzero(code)) == 3 + + +def test_dict_learning_split(): + n_components = 5 + dico = DictionaryLearning( + n_components, transform_algorithm="threshold", random_state=0 + ) + code = dico.fit(X).transform(X) + dico.split_sign = True + split_code = dico.transform(X) + + assert_array_almost_equal( + split_code[:, :n_components] - split_code[:, n_components:], code + ) + + +def test_dict_learning_online_shapes(): + rng = np.random.RandomState(0) + n_components = 8 + + code, dictionary = dict_learning_online( + X, + n_components=n_components, + batch_size=4, + max_iter=10, + method="cd", + random_state=rng, + return_code=True, + ) + assert code.shape == (n_samples, n_components) + assert dictionary.shape == (n_components, n_features) + assert np.dot(code, dictionary).shape == X.shape + + dictionary = dict_learning_online( + X, + n_components=n_components, + batch_size=4, + max_iter=10, + method="cd", + random_state=rng, + return_code=False, + ) + assert dictionary.shape == (n_components, n_features) + + +def test_dict_learning_online_lars_positive_parameter(): + err_msg = "Positive constraint not supported for 'lars' coding method." + with pytest.raises(ValueError, match=err_msg): + dict_learning_online(X, batch_size=4, max_iter=10, positive_code=True) + + +@pytest.mark.parametrize( + "transform_algorithm", + [ + "lasso_lars", + "lasso_cd", + "threshold", + ], +) +@pytest.mark.parametrize("positive_code", [False, True]) +@pytest.mark.parametrize("positive_dict", [False, True]) +def test_minibatch_dictionary_learning_positivity( + transform_algorithm, positive_code, positive_dict +): + n_components = 8 + dico = MiniBatchDictionaryLearning( + n_components, + batch_size=4, + max_iter=10, + transform_algorithm=transform_algorithm, + random_state=0, + positive_code=positive_code, + positive_dict=positive_dict, + fit_algorithm="cd", + ).fit(X) + + code = dico.transform(X) + if positive_dict: + assert (dico.components_ >= 0).all() + else: + assert (dico.components_ < 0).any() + if positive_code: + assert (code >= 0).all() + else: + assert (code < 0).any() + + +@pytest.mark.parametrize("positive_dict", [False, True]) +def test_minibatch_dictionary_learning_lars(positive_dict): + n_components = 8 + + dico = MiniBatchDictionaryLearning( + n_components, + batch_size=4, + max_iter=10, + transform_algorithm="lars", + random_state=0, + positive_dict=positive_dict, + fit_algorithm="cd", + ).fit(X) + + if positive_dict: + assert (dico.components_ >= 0).all() + else: + assert (dico.components_ < 0).any() + + +@pytest.mark.parametrize("positive_code", [False, True]) +@pytest.mark.parametrize("positive_dict", [False, True]) +def test_dict_learning_online_positivity(positive_code, positive_dict): + rng = np.random.RandomState(0) + n_components = 8 + + code, dictionary = dict_learning_online( + X, + n_components=n_components, + batch_size=4, + method="cd", + alpha=1, + random_state=rng, + positive_dict=positive_dict, + positive_code=positive_code, + ) + if positive_dict: + assert (dictionary >= 0).all() + else: + assert (dictionary < 0).any() + if positive_code: + assert (code >= 0).all() + else: + assert (code < 0).any() + + +def test_dict_learning_online_verbosity(): + # test verbosity for better coverage + n_components = 5 + import sys + from io import StringIO + + old_stdout = sys.stdout + try: + sys.stdout = StringIO() + + # convergence monitoring verbosity + dico = MiniBatchDictionaryLearning( + n_components, batch_size=4, max_iter=5, verbose=1, tol=0.1, random_state=0 + ) + dico.fit(X) + dico = MiniBatchDictionaryLearning( + n_components, + batch_size=4, + max_iter=5, + verbose=1, + max_no_improvement=2, + random_state=0, + ) + dico.fit(X) + # higher verbosity level + dico = MiniBatchDictionaryLearning( + n_components, batch_size=4, max_iter=5, verbose=2, random_state=0 + ) + dico.fit(X) + + # function API verbosity + dict_learning_online( + X, + n_components=n_components, + batch_size=4, + alpha=1, + verbose=1, + random_state=0, + ) + dict_learning_online( + X, + n_components=n_components, + batch_size=4, + alpha=1, + verbose=2, + random_state=0, + ) + finally: + sys.stdout = old_stdout + + assert dico.components_.shape == (n_components, n_features) + + +def test_dict_learning_online_estimator_shapes(): + n_components = 5 + dico = MiniBatchDictionaryLearning( + n_components, batch_size=4, max_iter=5, random_state=0 + ) + dico.fit(X) + assert dico.components_.shape == (n_components, n_features) + + +def test_dict_learning_online_overcomplete(): + n_components = 12 + dico = MiniBatchDictionaryLearning( + n_components, batch_size=4, max_iter=5, random_state=0 + ).fit(X) + assert dico.components_.shape == (n_components, n_features) + + +def test_dict_learning_online_initialization(): + n_components = 12 + rng = np.random.RandomState(0) + V = rng.randn(n_components, n_features) + dico = MiniBatchDictionaryLearning( + n_components, batch_size=4, max_iter=0, dict_init=V, random_state=0 + ).fit(X) + assert_array_equal(dico.components_, V) + + +def test_dict_learning_online_readonly_initialization(): + n_components = 12 + rng = np.random.RandomState(0) + V = rng.randn(n_components, n_features) + V.setflags(write=False) + MiniBatchDictionaryLearning( + n_components, + batch_size=4, + max_iter=1, + dict_init=V, + random_state=0, + shuffle=False, + ).fit(X) + + +def test_dict_learning_online_partial_fit(): + n_components = 12 + rng = np.random.RandomState(0) + V = rng.randn(n_components, n_features) # random init + V /= np.sum(V**2, axis=1)[:, np.newaxis] + dict1 = MiniBatchDictionaryLearning( + n_components, + max_iter=10, + batch_size=1, + alpha=1, + shuffle=False, + dict_init=V, + max_no_improvement=None, + tol=0.0, + random_state=0, + ).fit(X) + dict2 = MiniBatchDictionaryLearning( + n_components, alpha=1, dict_init=V, random_state=0 + ) + for i in range(10): + for sample in X: + dict2.partial_fit(sample[np.newaxis, :]) + + assert not np.all(sparse_encode(X, dict1.components_, alpha=1) == 0) + assert_array_almost_equal(dict1.components_, dict2.components_, decimal=2) + + # partial_fit should ignore max_iter (#17433) + assert dict1.n_steps_ == dict2.n_steps_ == 100 + + +def test_sparse_encode_shapes(): + n_components = 12 + rng = np.random.RandomState(0) + V = rng.randn(n_components, n_features) # random init + V /= np.sum(V**2, axis=1)[:, np.newaxis] + for algo in ("lasso_lars", "lasso_cd", "lars", "omp", "threshold"): + code = sparse_encode(X, V, algorithm=algo) + assert code.shape == (n_samples, n_components) + + +@pytest.mark.parametrize("algo", ["lasso_lars", "lasso_cd", "threshold"]) +@pytest.mark.parametrize("positive", [False, True]) +def test_sparse_encode_positivity(algo, positive): + n_components = 12 + rng = np.random.RandomState(0) + V = rng.randn(n_components, n_features) # random init + V /= np.sum(V**2, axis=1)[:, np.newaxis] + code = sparse_encode(X, V, algorithm=algo, positive=positive) + if positive: + assert (code >= 0).all() + else: + assert (code < 0).any() + + +@pytest.mark.parametrize("algo", ["lars", "omp"]) +def test_sparse_encode_unavailable_positivity(algo): + n_components = 12 + rng = np.random.RandomState(0) + V = rng.randn(n_components, n_features) # random init + V /= np.sum(V**2, axis=1)[:, np.newaxis] + err_msg = "Positive constraint not supported for '{}' coding method." + err_msg = err_msg.format(algo) + with pytest.raises(ValueError, match=err_msg): + sparse_encode(X, V, algorithm=algo, positive=True) + + +def test_sparse_encode_input(): + n_components = 100 + rng = np.random.RandomState(0) + V = rng.randn(n_components, n_features) # random init + V /= np.sum(V**2, axis=1)[:, np.newaxis] + Xf = check_array(X, order="F") + for algo in ("lasso_lars", "lasso_cd", "lars", "omp", "threshold"): + a = sparse_encode(X, V, algorithm=algo) + b = sparse_encode(Xf, V, algorithm=algo) + assert_array_almost_equal(a, b) + + +def test_sparse_encode_error(): + n_components = 12 + rng = np.random.RandomState(0) + V = rng.randn(n_components, n_features) # random init + V /= np.sum(V**2, axis=1)[:, np.newaxis] + code = sparse_encode(X, V, alpha=0.001) + assert not np.all(code == 0) + assert np.sqrt(np.sum((np.dot(code, V) - X) ** 2)) < 0.1 + + +def test_sparse_encode_error_default_sparsity(): + rng = np.random.RandomState(0) + X = rng.randn(100, 64) + D = rng.randn(2, 64) + code = ignore_warnings(sparse_encode)(X, D, algorithm="omp", n_nonzero_coefs=None) + assert code.shape == (100, 2) + + +def test_sparse_coder_estimator(): + n_components = 12 + rng = np.random.RandomState(0) + V = rng.randn(n_components, n_features) # random init + V /= np.sum(V**2, axis=1)[:, np.newaxis] + coder = SparseCoder( + dictionary=V, transform_algorithm="lasso_lars", transform_alpha=0.001 + ).transform(X) + assert not np.all(coder == 0) + assert np.sqrt(np.sum((np.dot(coder, V) - X) ** 2)) < 0.1 + + +def test_sparse_coder_estimator_clone(): + n_components = 12 + rng = np.random.RandomState(0) + V = rng.randn(n_components, n_features) # random init + V /= np.sum(V**2, axis=1)[:, np.newaxis] + coder = SparseCoder( + dictionary=V, transform_algorithm="lasso_lars", transform_alpha=0.001 + ) + cloned = clone(coder) + assert id(cloned) != id(coder) + np.testing.assert_allclose(cloned.dictionary, coder.dictionary) + assert id(cloned.dictionary) != id(coder.dictionary) + assert cloned.n_components_ == coder.n_components_ + assert cloned.n_features_in_ == coder.n_features_in_ + data = np.random.rand(n_samples, n_features).astype(np.float32) + np.testing.assert_allclose(cloned.transform(data), coder.transform(data)) + + +def test_sparse_coder_parallel_mmap(): + # Non-regression test for: + # https://github.com/scikit-learn/scikit-learn/issues/5956 + # Test that SparseCoder does not error by passing reading only + # arrays to child processes + + rng = np.random.RandomState(777) + n_components, n_features = 40, 64 + init_dict = rng.rand(n_components, n_features) + # Ensure that `data` is >2M. Joblib memory maps arrays + # if they are larger than 1MB. The 4 accounts for float32 + # data type + n_samples = int(2e6) // (4 * n_features) + data = np.random.rand(n_samples, n_features).astype(np.float32) + + sc = SparseCoder(init_dict, transform_algorithm="omp", n_jobs=2) + sc.fit_transform(data) + + +def test_sparse_coder_common_transformer(): + rng = np.random.RandomState(777) + n_components, n_features = 40, 3 + init_dict = rng.rand(n_components, n_features) + + sc = SparseCoder(init_dict) + + check_transformer_data_not_an_array(sc.__class__.__name__, sc) + check_transformer_general(sc.__class__.__name__, sc) + check_transformer_general_memmap = partial( + check_transformer_general, readonly_memmap=True + ) + check_transformer_general_memmap(sc.__class__.__name__, sc) + check_transformers_unfitted(sc.__class__.__name__, sc) + + +def test_sparse_coder_n_features_in(): + d = np.array([[1, 2, 3], [1, 2, 3]]) + sc = SparseCoder(d) + assert sc.n_features_in_ == d.shape[1] + + +def test_update_dict(): + # Check the dict update in batch mode vs online mode + # Non-regression test for #4866 + rng = np.random.RandomState(0) + + code = np.array([[0.5, -0.5], [0.1, 0.9]]) + dictionary = np.array([[1.0, 0.0], [0.6, 0.8]]) + + X = np.dot(code, dictionary) + rng.randn(2, 2) + + # full batch update + newd_batch = dictionary.copy() + _update_dict(newd_batch, X, code) + + # online update + A = np.dot(code.T, code) + B = np.dot(X.T, code) + newd_online = dictionary.copy() + _update_dict(newd_online, X, code, A, B) + + assert_allclose(newd_batch, newd_online) + + +@pytest.mark.parametrize( + "algorithm", ("lasso_lars", "lasso_cd", "lars", "threshold", "omp") +) +@pytest.mark.parametrize("data_type", (np.float32, np.float64)) +# Note: do not check integer input because `lasso_lars` and `lars` fail with +# `ValueError` in `_lars_path_solver` +def test_sparse_encode_dtype_match(data_type, algorithm): + n_components = 6 + rng = np.random.RandomState(0) + dictionary = rng.randn(n_components, n_features) + code = sparse_encode( + X.astype(data_type), dictionary.astype(data_type), algorithm=algorithm + ) + assert code.dtype == data_type + + +@pytest.mark.parametrize( + "algorithm", ("lasso_lars", "lasso_cd", "lars", "threshold", "omp") +) +def test_sparse_encode_numerical_consistency(algorithm): + # verify numerical consistency among np.float32 and np.float64 + rtol = 1e-4 + n_components = 6 + rng = np.random.RandomState(0) + dictionary = rng.randn(n_components, n_features) + code_32 = sparse_encode( + X.astype(np.float32), dictionary.astype(np.float32), algorithm=algorithm + ) + code_64 = sparse_encode( + X.astype(np.float64), dictionary.astype(np.float64), algorithm=algorithm + ) + assert_allclose(code_32, code_64, rtol=rtol) + + +@pytest.mark.parametrize( + "transform_algorithm", ("lasso_lars", "lasso_cd", "lars", "threshold", "omp") +) +@pytest.mark.parametrize("data_type", (np.float32, np.float64)) +# Note: do not check integer input because `lasso_lars` and `lars` fail with +# `ValueError` in `_lars_path_solver` +def test_sparse_coder_dtype_match(data_type, transform_algorithm): + # Verify preserving dtype for transform in sparse coder + n_components = 6 + rng = np.random.RandomState(0) + dictionary = rng.randn(n_components, n_features) + coder = SparseCoder( + dictionary.astype(data_type), transform_algorithm=transform_algorithm + ) + code = coder.transform(X.astype(data_type)) + assert code.dtype == data_type + + +@pytest.mark.parametrize("fit_algorithm", ("lars", "cd")) +@pytest.mark.parametrize( + "transform_algorithm", ("lasso_lars", "lasso_cd", "lars", "threshold", "omp") +) +@pytest.mark.parametrize( + "data_type, expected_type", + ( + (np.float32, np.float32), + (np.float64, np.float64), + (np.int32, np.float64), + (np.int64, np.float64), + ), +) +def test_dictionary_learning_dtype_match( + data_type, + expected_type, + fit_algorithm, + transform_algorithm, +): + # Verify preserving dtype for fit and transform in dictionary learning class + dict_learner = DictionaryLearning( + n_components=8, + fit_algorithm=fit_algorithm, + transform_algorithm=transform_algorithm, + random_state=0, + ) + dict_learner.fit(X.astype(data_type)) + assert dict_learner.components_.dtype == expected_type + assert dict_learner.transform(X.astype(data_type)).dtype == expected_type + + +@pytest.mark.parametrize("fit_algorithm", ("lars", "cd")) +@pytest.mark.parametrize( + "transform_algorithm", ("lasso_lars", "lasso_cd", "lars", "threshold", "omp") +) +@pytest.mark.parametrize( + "data_type, expected_type", + ( + (np.float32, np.float32), + (np.float64, np.float64), + (np.int32, np.float64), + (np.int64, np.float64), + ), +) +def test_minibatch_dictionary_learning_dtype_match( + data_type, + expected_type, + fit_algorithm, + transform_algorithm, +): + # Verify preserving dtype for fit and transform in minibatch dictionary learning + dict_learner = MiniBatchDictionaryLearning( + n_components=8, + batch_size=10, + fit_algorithm=fit_algorithm, + transform_algorithm=transform_algorithm, + max_iter=100, + tol=1e-1, + random_state=0, + ) + dict_learner.fit(X.astype(data_type)) + + assert dict_learner.components_.dtype == expected_type + assert dict_learner.transform(X.astype(data_type)).dtype == expected_type + assert dict_learner._A.dtype == expected_type + assert dict_learner._B.dtype == expected_type + + +@pytest.mark.parametrize("method", ("lars", "cd")) +@pytest.mark.parametrize( + "data_type, expected_type", + ( + (np.float32, np.float32), + (np.float64, np.float64), + (np.int32, np.float64), + (np.int64, np.float64), + ), +) +def test_dict_learning_dtype_match(data_type, expected_type, method): + # Verify output matrix dtype + rng = np.random.RandomState(0) + n_components = 8 + code, dictionary, _ = dict_learning( + X.astype(data_type), + n_components=n_components, + alpha=1, + random_state=rng, + method=method, + ) + assert code.dtype == expected_type + assert dictionary.dtype == expected_type + + +@pytest.mark.parametrize("method", ("lars", "cd")) +def test_dict_learning_numerical_consistency(method): + # verify numerically consistent among np.float32 and np.float64 + rtol = 1e-6 + n_components = 4 + alpha = 2 + + U_64, V_64, _ = dict_learning( + X.astype(np.float64), + n_components=n_components, + alpha=alpha, + random_state=0, + method=method, + ) + U_32, V_32, _ = dict_learning( + X.astype(np.float32), + n_components=n_components, + alpha=alpha, + random_state=0, + method=method, + ) + + # Optimal solution (U*, V*) is not unique. + # If (U*, V*) is optimal solution, (-U*,-V*) is also optimal, + # and (column permutated U*, row permutated V*) are also optional + # as long as holding UV. + # So here UV, ||U||_1,1 and sum(||V_k||_2^2) are verified + # instead of comparing directly U and V. + assert_allclose(np.matmul(U_64, V_64), np.matmul(U_32, V_32), rtol=rtol) + assert_allclose(np.sum(np.abs(U_64)), np.sum(np.abs(U_32)), rtol=rtol) + assert_allclose(np.sum(V_64**2), np.sum(V_32**2), rtol=rtol) + # verify an obtained solution is not degenerate + assert np.mean(U_64 != 0.0) > 0.05 + assert np.count_nonzero(U_64 != 0.0) == np.count_nonzero(U_32 != 0.0) + + +@pytest.mark.parametrize("method", ("lars", "cd")) +@pytest.mark.parametrize( + "data_type, expected_type", + ( + (np.float32, np.float32), + (np.float64, np.float64), + (np.int32, np.float64), + (np.int64, np.float64), + ), +) +def test_dict_learning_online_dtype_match(data_type, expected_type, method): + # Verify output matrix dtype + rng = np.random.RandomState(0) + n_components = 8 + code, dictionary = dict_learning_online( + X.astype(data_type), + n_components=n_components, + alpha=1, + batch_size=10, + random_state=rng, + method=method, + ) + assert code.dtype == expected_type + assert dictionary.dtype == expected_type + + +@pytest.mark.parametrize("method", ("lars", "cd")) +def test_dict_learning_online_numerical_consistency(method): + # verify numerically consistent among np.float32 and np.float64 + rtol = 1e-4 + n_components = 4 + alpha = 1 + + U_64, V_64 = dict_learning_online( + X.astype(np.float64), + n_components=n_components, + max_iter=1_000, + alpha=alpha, + batch_size=10, + random_state=0, + method=method, + tol=0.0, + max_no_improvement=None, + ) + U_32, V_32 = dict_learning_online( + X.astype(np.float32), + n_components=n_components, + max_iter=1_000, + alpha=alpha, + batch_size=10, + random_state=0, + method=method, + tol=0.0, + max_no_improvement=None, + ) + + # Optimal solution (U*, V*) is not unique. + # If (U*, V*) is optimal solution, (-U*,-V*) is also optimal, + # and (column permutated U*, row permutated V*) are also optional + # as long as holding UV. + # So here UV, ||U||_1,1 and sum(||V_k||_2) are verified + # instead of comparing directly U and V. + assert_allclose(np.matmul(U_64, V_64), np.matmul(U_32, V_32), rtol=rtol) + assert_allclose(np.sum(np.abs(U_64)), np.sum(np.abs(U_32)), rtol=rtol) + assert_allclose(np.sum(V_64**2), np.sum(V_32**2), rtol=rtol) + # verify an obtained solution is not degenerate + assert np.mean(U_64 != 0.0) > 0.05 + assert np.count_nonzero(U_64 != 0.0) == np.count_nonzero(U_32 != 0.0) + + +@pytest.mark.parametrize( + "estimator", + [ + SparseCoder(X.T), + DictionaryLearning(), + MiniBatchDictionaryLearning(batch_size=4, max_iter=10), + ], + ids=lambda x: x.__class__.__name__, +) +def test_get_feature_names_out(estimator): + """Check feature names for dict learning estimators.""" + estimator.fit(X) + n_components = X.shape[1] + + feature_names_out = estimator.get_feature_names_out() + estimator_name = estimator.__class__.__name__.lower() + assert_array_equal( + feature_names_out, + [f"{estimator_name}{i}" for i in range(n_components)], + ) + + +def test_cd_work_on_joblib_memmapped_data(monkeypatch): + monkeypatch.setattr( + sklearn.decomposition._dict_learning, + "Parallel", + partial(Parallel, max_nbytes=100), + ) + + rng = np.random.RandomState(0) + X_train = rng.randn(10, 10) + + dict_learner = DictionaryLearning( + n_components=5, + random_state=0, + n_jobs=2, + fit_algorithm="cd", + max_iter=50, + verbose=True, + ) + + # This must run and complete without error. + dict_learner.fit(X_train) + + +# TODO(1.6): remove in 1.6 +def test_xxx(): + warn_msg = "`max_iter=None` is deprecated in version 1.4 and will be removed" + with pytest.warns(FutureWarning, match=warn_msg): + MiniBatchDictionaryLearning(max_iter=None, random_state=0).fit(X) + with pytest.warns(FutureWarning, match=warn_msg): + dict_learning_online(X, max_iter=None, random_state=0) diff --git a/venv/lib/python3.10/site-packages/sklearn/decomposition/tests/test_sparse_pca.py b/venv/lib/python3.10/site-packages/sklearn/decomposition/tests/test_sparse_pca.py new file mode 100644 index 0000000000000000000000000000000000000000..3797970e3d6badc0a9537f410ae04cb24958bcf7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/decomposition/tests/test_sparse_pca.py @@ -0,0 +1,367 @@ +# Author: Vlad Niculae +# License: BSD 3 clause + +import sys + +import numpy as np +import pytest +from numpy.testing import assert_array_equal + +from sklearn.decomposition import PCA, MiniBatchSparsePCA, SparsePCA +from sklearn.utils import check_random_state +from sklearn.utils._testing import ( + assert_allclose, + assert_array_almost_equal, + if_safe_multiprocessing_with_blas, +) + + +def generate_toy_data(n_components, n_samples, image_size, random_state=None): + n_features = image_size[0] * image_size[1] + + rng = check_random_state(random_state) + U = rng.randn(n_samples, n_components) + V = rng.randn(n_components, n_features) + + centers = [(3, 3), (6, 7), (8, 1)] + sz = [1, 2, 1] + for k in range(n_components): + img = np.zeros(image_size) + xmin, xmax = centers[k][0] - sz[k], centers[k][0] + sz[k] + ymin, ymax = centers[k][1] - sz[k], centers[k][1] + sz[k] + img[xmin:xmax][:, ymin:ymax] = 1.0 + V[k, :] = img.ravel() + + # Y is defined by : Y = UV + noise + Y = np.dot(U, V) + Y += 0.1 * rng.randn(Y.shape[0], Y.shape[1]) # Add noise + return Y, U, V + + +# SparsePCA can be a bit slow. To avoid having test times go up, we +# test different aspects of the code in the same test + + +def test_correct_shapes(): + rng = np.random.RandomState(0) + X = rng.randn(12, 10) + spca = SparsePCA(n_components=8, random_state=rng) + U = spca.fit_transform(X) + assert spca.components_.shape == (8, 10) + assert U.shape == (12, 8) + # test overcomplete decomposition + spca = SparsePCA(n_components=13, random_state=rng) + U = spca.fit_transform(X) + assert spca.components_.shape == (13, 10) + assert U.shape == (12, 13) + + +def test_fit_transform(): + alpha = 1 + rng = np.random.RandomState(0) + Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array + spca_lars = SparsePCA(n_components=3, method="lars", alpha=alpha, random_state=0) + spca_lars.fit(Y) + + # Test that CD gives similar results + spca_lasso = SparsePCA(n_components=3, method="cd", random_state=0, alpha=alpha) + spca_lasso.fit(Y) + assert_array_almost_equal(spca_lasso.components_, spca_lars.components_) + + +@if_safe_multiprocessing_with_blas +def test_fit_transform_parallel(): + alpha = 1 + rng = np.random.RandomState(0) + Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array + spca_lars = SparsePCA(n_components=3, method="lars", alpha=alpha, random_state=0) + spca_lars.fit(Y) + U1 = spca_lars.transform(Y) + # Test multiple CPUs + spca = SparsePCA( + n_components=3, n_jobs=2, method="lars", alpha=alpha, random_state=0 + ).fit(Y) + U2 = spca.transform(Y) + assert not np.all(spca_lars.components_ == 0) + assert_array_almost_equal(U1, U2) + + +def test_transform_nan(): + # Test that SparsePCA won't return NaN when there is 0 feature in all + # samples. + rng = np.random.RandomState(0) + Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array + Y[:, 0] = 0 + estimator = SparsePCA(n_components=8) + assert not np.any(np.isnan(estimator.fit_transform(Y))) + + +def test_fit_transform_tall(): + rng = np.random.RandomState(0) + Y, _, _ = generate_toy_data(3, 65, (8, 8), random_state=rng) # tall array + spca_lars = SparsePCA(n_components=3, method="lars", random_state=rng) + U1 = spca_lars.fit_transform(Y) + spca_lasso = SparsePCA(n_components=3, method="cd", random_state=rng) + U2 = spca_lasso.fit(Y).transform(Y) + assert_array_almost_equal(U1, U2) + + +def test_initialization(): + rng = np.random.RandomState(0) + U_init = rng.randn(5, 3) + V_init = rng.randn(3, 4) + model = SparsePCA( + n_components=3, U_init=U_init, V_init=V_init, max_iter=0, random_state=rng + ) + model.fit(rng.randn(5, 4)) + assert_allclose(model.components_, V_init / np.linalg.norm(V_init, axis=1)[:, None]) + + +def test_mini_batch_correct_shapes(): + rng = np.random.RandomState(0) + X = rng.randn(12, 10) + pca = MiniBatchSparsePCA(n_components=8, max_iter=1, random_state=rng) + U = pca.fit_transform(X) + assert pca.components_.shape == (8, 10) + assert U.shape == (12, 8) + # test overcomplete decomposition + pca = MiniBatchSparsePCA(n_components=13, max_iter=1, random_state=rng) + U = pca.fit_transform(X) + assert pca.components_.shape == (13, 10) + assert U.shape == (12, 13) + + +# XXX: test always skipped +@pytest.mark.skipif(True, reason="skipping mini_batch_fit_transform.") +def test_mini_batch_fit_transform(): + alpha = 1 + rng = np.random.RandomState(0) + Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array + spca_lars = MiniBatchSparsePCA(n_components=3, random_state=0, alpha=alpha).fit(Y) + U1 = spca_lars.transform(Y) + # Test multiple CPUs + if sys.platform == "win32": # fake parallelism for win32 + import joblib + + _mp = joblib.parallel.multiprocessing + joblib.parallel.multiprocessing = None + try: + spca = MiniBatchSparsePCA( + n_components=3, n_jobs=2, alpha=alpha, random_state=0 + ) + U2 = spca.fit(Y).transform(Y) + finally: + joblib.parallel.multiprocessing = _mp + else: # we can efficiently use parallelism + spca = MiniBatchSparsePCA(n_components=3, n_jobs=2, alpha=alpha, random_state=0) + U2 = spca.fit(Y).transform(Y) + assert not np.all(spca_lars.components_ == 0) + assert_array_almost_equal(U1, U2) + # Test that CD gives similar results + spca_lasso = MiniBatchSparsePCA( + n_components=3, method="cd", alpha=alpha, random_state=0 + ).fit(Y) + assert_array_almost_equal(spca_lasso.components_, spca_lars.components_) + + +def test_scaling_fit_transform(): + alpha = 1 + rng = np.random.RandomState(0) + Y, _, _ = generate_toy_data(3, 1000, (8, 8), random_state=rng) + spca_lars = SparsePCA(n_components=3, method="lars", alpha=alpha, random_state=rng) + results_train = spca_lars.fit_transform(Y) + results_test = spca_lars.transform(Y[:10]) + assert_allclose(results_train[0], results_test[0]) + + +def test_pca_vs_spca(): + rng = np.random.RandomState(0) + Y, _, _ = generate_toy_data(3, 1000, (8, 8), random_state=rng) + Z, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) + spca = SparsePCA(alpha=0, ridge_alpha=0, n_components=2) + pca = PCA(n_components=2) + pca.fit(Y) + spca.fit(Y) + results_test_pca = pca.transform(Z) + results_test_spca = spca.transform(Z) + assert_allclose( + np.abs(spca.components_.dot(pca.components_.T)), np.eye(2), atol=1e-5 + ) + results_test_pca *= np.sign(results_test_pca[0, :]) + results_test_spca *= np.sign(results_test_spca[0, :]) + assert_allclose(results_test_pca, results_test_spca) + + +@pytest.mark.parametrize("SPCA", [SparsePCA, MiniBatchSparsePCA]) +@pytest.mark.parametrize("n_components", [None, 3]) +def test_spca_n_components_(SPCA, n_components): + rng = np.random.RandomState(0) + n_samples, n_features = 12, 10 + X = rng.randn(n_samples, n_features) + + model = SPCA(n_components=n_components).fit(X) + + if n_components is not None: + assert model.n_components_ == n_components + else: + assert model.n_components_ == n_features + + +@pytest.mark.parametrize("SPCA", (SparsePCA, MiniBatchSparsePCA)) +@pytest.mark.parametrize("method", ("lars", "cd")) +@pytest.mark.parametrize( + "data_type, expected_type", + ( + (np.float32, np.float32), + (np.float64, np.float64), + (np.int32, np.float64), + (np.int64, np.float64), + ), +) +def test_sparse_pca_dtype_match(SPCA, method, data_type, expected_type): + # Verify output matrix dtype + n_samples, n_features, n_components = 12, 10, 3 + rng = np.random.RandomState(0) + input_array = rng.randn(n_samples, n_features).astype(data_type) + model = SPCA(n_components=n_components, method=method) + transformed = model.fit_transform(input_array) + + assert transformed.dtype == expected_type + assert model.components_.dtype == expected_type + + +@pytest.mark.parametrize("SPCA", (SparsePCA, MiniBatchSparsePCA)) +@pytest.mark.parametrize("method", ("lars", "cd")) +def test_sparse_pca_numerical_consistency(SPCA, method): + # Verify numericall consistentency among np.float32 and np.float64 + rtol = 1e-3 + alpha = 2 + n_samples, n_features, n_components = 12, 10, 3 + rng = np.random.RandomState(0) + input_array = rng.randn(n_samples, n_features) + + model_32 = SPCA( + n_components=n_components, alpha=alpha, method=method, random_state=0 + ) + transformed_32 = model_32.fit_transform(input_array.astype(np.float32)) + + model_64 = SPCA( + n_components=n_components, alpha=alpha, method=method, random_state=0 + ) + transformed_64 = model_64.fit_transform(input_array.astype(np.float64)) + + assert_allclose(transformed_64, transformed_32, rtol=rtol) + assert_allclose(model_64.components_, model_32.components_, rtol=rtol) + + +@pytest.mark.parametrize("SPCA", [SparsePCA, MiniBatchSparsePCA]) +def test_spca_feature_names_out(SPCA): + """Check feature names out for *SparsePCA.""" + rng = np.random.RandomState(0) + n_samples, n_features = 12, 10 + X = rng.randn(n_samples, n_features) + + model = SPCA(n_components=4).fit(X) + names = model.get_feature_names_out() + + estimator_name = SPCA.__name__.lower() + assert_array_equal([f"{estimator_name}{i}" for i in range(4)], names) + + +# TODO(1.6): remove in 1.6 +def test_spca_max_iter_None_deprecation(): + """Check that we raise a warning for the deprecation of `max_iter=None`.""" + rng = np.random.RandomState(0) + n_samples, n_features = 12, 10 + X = rng.randn(n_samples, n_features) + + warn_msg = "`max_iter=None` is deprecated in version 1.4 and will be removed" + with pytest.warns(FutureWarning, match=warn_msg): + MiniBatchSparsePCA(max_iter=None).fit(X) + + +def test_spca_early_stopping(global_random_seed): + """Check that `tol` and `max_no_improvement` act as early stopping.""" + rng = np.random.RandomState(global_random_seed) + n_samples, n_features = 50, 10 + X = rng.randn(n_samples, n_features) + + # vary the tolerance to force the early stopping of one of the model + model_early_stopped = MiniBatchSparsePCA( + max_iter=100, tol=0.5, random_state=global_random_seed + ).fit(X) + model_not_early_stopped = MiniBatchSparsePCA( + max_iter=100, tol=1e-3, random_state=global_random_seed + ).fit(X) + assert model_early_stopped.n_iter_ < model_not_early_stopped.n_iter_ + + # force the max number of no improvement to a large value to check that + # it does help to early stop + model_early_stopped = MiniBatchSparsePCA( + max_iter=100, tol=1e-6, max_no_improvement=2, random_state=global_random_seed + ).fit(X) + model_not_early_stopped = MiniBatchSparsePCA( + max_iter=100, tol=1e-6, max_no_improvement=100, random_state=global_random_seed + ).fit(X) + assert model_early_stopped.n_iter_ < model_not_early_stopped.n_iter_ + + +def test_equivalence_components_pca_spca(global_random_seed): + """Check the equivalence of the components found by PCA and SparsePCA. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/23932 + """ + rng = np.random.RandomState(global_random_seed) + X = rng.randn(50, 4) + + n_components = 2 + pca = PCA( + n_components=n_components, + svd_solver="randomized", + random_state=0, + ).fit(X) + spca = SparsePCA( + n_components=n_components, + method="lars", + ridge_alpha=0, + alpha=0, + random_state=0, + ).fit(X) + + assert_allclose(pca.components_, spca.components_) + + +def test_sparse_pca_inverse_transform(): + """Check that `inverse_transform` in `SparsePCA` and `PCA` are similar.""" + rng = np.random.RandomState(0) + n_samples, n_features = 10, 5 + X = rng.randn(n_samples, n_features) + + n_components = 2 + spca = SparsePCA( + n_components=n_components, alpha=1e-12, ridge_alpha=1e-12, random_state=0 + ) + pca = PCA(n_components=n_components, random_state=0) + X_trans_spca = spca.fit_transform(X) + X_trans_pca = pca.fit_transform(X) + assert_allclose( + spca.inverse_transform(X_trans_spca), pca.inverse_transform(X_trans_pca) + ) + + +@pytest.mark.parametrize("SPCA", [SparsePCA, MiniBatchSparsePCA]) +def test_transform_inverse_transform_round_trip(SPCA): + """Check the `transform` and `inverse_transform` round trip with no loss of + information. + """ + rng = np.random.RandomState(0) + n_samples, n_features = 10, 5 + X = rng.randn(n_samples, n_features) + + n_components = n_features + spca = SPCA( + n_components=n_components, alpha=1e-12, ridge_alpha=1e-12, random_state=0 + ) + X_trans_spca = spca.fit_transform(X) + assert_allclose(spca.inverse_transform(X_trans_spca), X) diff --git a/venv/lib/python3.10/site-packages/sklearn/metrics/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/metrics/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5eee5a102ccc303f1481a99c9be5b67b1c5344e1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/metrics/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/metrics/__pycache__/_base.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/metrics/__pycache__/_base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..db27ed03b18ba09a03fb94999549f35f5d397a96 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/metrics/__pycache__/_base.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/metrics/__pycache__/_classification.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/metrics/__pycache__/_classification.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cec2e89403aecbec85a93cd7dd978261825224fb Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/metrics/__pycache__/_classification.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/metrics/__pycache__/_ranking.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/metrics/__pycache__/_ranking.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cdc4c09d0a6d68dec0edf5244724ba56b5769e78 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/metrics/__pycache__/_ranking.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/metrics/__pycache__/_regression.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/metrics/__pycache__/_regression.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..45c9f83536ead282778a665da8f3cf9b6b11f087 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/metrics/__pycache__/_regression.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/metrics/__pycache__/_scorer.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/metrics/__pycache__/_scorer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a2f147627ea88ef2df637086c0d5349934ebd8c9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/metrics/__pycache__/_scorer.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/metrics/__pycache__/pairwise.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/metrics/__pycache__/pairwise.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b4247c81b420f2081ce4a92ecbc620b4c01068a4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/metrics/__pycache__/pairwise.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/metrics/cluster/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/metrics/cluster/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8f5e5a9d6474dd818ab64520698a8836d71e85aa Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/metrics/cluster/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/metrics/cluster/__pycache__/_bicluster.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/metrics/cluster/__pycache__/_bicluster.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a0409f12b8d9d3c5837819828db50331d1790b5e Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/metrics/cluster/__pycache__/_bicluster.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/metrics/cluster/__pycache__/_supervised.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/metrics/cluster/__pycache__/_supervised.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2007ba24b372cc8e0b97c71cdcccf1f157d867b4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/metrics/cluster/__pycache__/_supervised.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/metrics/cluster/__pycache__/_unsupervised.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/metrics/cluster/__pycache__/_unsupervised.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b12e0e746a18d9296c90b34e05d804bf782bd7ed Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/metrics/cluster/__pycache__/_unsupervised.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/metrics/cluster/_supervised.py b/venv/lib/python3.10/site-packages/sklearn/metrics/cluster/_supervised.py new file mode 100644 index 0000000000000000000000000000000000000000..1c8f5b800180f0a14ff05516dbaffb71a61cb881 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/metrics/cluster/_supervised.py @@ -0,0 +1,1298 @@ +"""Utilities to evaluate the clustering performance of models. + +Functions named as *_score return a scalar value to maximize: the higher the +better. +""" + +# Authors: Olivier Grisel +# Wei LI +# Diego Molla +# Arnaud Fouchet +# Thierry Guillemot +# Gregory Stupp +# Joel Nothman +# Arya McCarthy +# Uwe F Mayer +# License: BSD 3 clause + + +import warnings +from math import log +from numbers import Real + +import numpy as np +from scipy import sparse as sp + +from ...utils._param_validation import Interval, StrOptions, validate_params +from ...utils.multiclass import type_of_target +from ...utils.validation import check_array, check_consistent_length +from ._expected_mutual_info_fast import expected_mutual_information + + +def check_clusterings(labels_true, labels_pred): + """Check that the labels arrays are 1D and of same dimension. + + Parameters + ---------- + labels_true : array-like of shape (n_samples,) + The true labels. + + labels_pred : array-like of shape (n_samples,) + The predicted labels. + """ + labels_true = check_array( + labels_true, + ensure_2d=False, + ensure_min_samples=0, + dtype=None, + ) + + labels_pred = check_array( + labels_pred, + ensure_2d=False, + ensure_min_samples=0, + dtype=None, + ) + + type_label = type_of_target(labels_true) + type_pred = type_of_target(labels_pred) + + if "continuous" in (type_pred, type_label): + msg = ( + "Clustering metrics expects discrete values but received" + f" {type_label} values for label, and {type_pred} values " + "for target" + ) + warnings.warn(msg, UserWarning) + + # input checks + if labels_true.ndim != 1: + raise ValueError("labels_true must be 1D: shape is %r" % (labels_true.shape,)) + if labels_pred.ndim != 1: + raise ValueError("labels_pred must be 1D: shape is %r" % (labels_pred.shape,)) + check_consistent_length(labels_true, labels_pred) + + return labels_true, labels_pred + + +def _generalized_average(U, V, average_method): + """Return a particular mean of two numbers.""" + if average_method == "min": + return min(U, V) + elif average_method == "geometric": + return np.sqrt(U * V) + elif average_method == "arithmetic": + return np.mean([U, V]) + elif average_method == "max": + return max(U, V) + else: + raise ValueError( + "'average_method' must be 'min', 'geometric', 'arithmetic', or 'max'" + ) + + +@validate_params( + { + "labels_true": ["array-like", None], + "labels_pred": ["array-like", None], + "eps": [Interval(Real, 0, None, closed="left"), None], + "sparse": ["boolean"], + "dtype": "no_validation", # delegate the validation to SciPy + }, + prefer_skip_nested_validation=True, +) +def contingency_matrix( + labels_true, labels_pred, *, eps=None, sparse=False, dtype=np.int64 +): + """Build a contingency matrix describing the relationship between labels. + + Parameters + ---------- + labels_true : array-like of shape (n_samples,) + Ground truth class labels to be used as a reference. + + labels_pred : array-like of shape (n_samples,) + Cluster labels to evaluate. + + eps : float, default=None + If a float, that value is added to all values in the contingency + matrix. This helps to stop NaN propagation. + If ``None``, nothing is adjusted. + + sparse : bool, default=False + If `True`, return a sparse CSR continency matrix. If `eps` is not + `None` and `sparse` is `True` will raise ValueError. + + .. versionadded:: 0.18 + + dtype : numeric type, default=np.int64 + Output dtype. Ignored if `eps` is not `None`. + + .. versionadded:: 0.24 + + Returns + ------- + contingency : {array-like, sparse}, shape=[n_classes_true, n_classes_pred] + Matrix :math:`C` such that :math:`C_{i, j}` is the number of samples in + true class :math:`i` and in predicted class :math:`j`. If + ``eps is None``, the dtype of this array will be integer unless set + otherwise with the ``dtype`` argument. If ``eps`` is given, the dtype + will be float. + Will be a ``sklearn.sparse.csr_matrix`` if ``sparse=True``. + + Examples + -------- + >>> from sklearn.metrics.cluster import contingency_matrix + >>> labels_true = [0, 0, 1, 1, 2, 2] + >>> labels_pred = [1, 0, 2, 1, 0, 2] + >>> contingency_matrix(labels_true, labels_pred) + array([[1, 1, 0], + [0, 1, 1], + [1, 0, 1]]) + """ + + if eps is not None and sparse: + raise ValueError("Cannot set 'eps' when sparse=True") + + classes, class_idx = np.unique(labels_true, return_inverse=True) + clusters, cluster_idx = np.unique(labels_pred, return_inverse=True) + n_classes = classes.shape[0] + n_clusters = clusters.shape[0] + # Using coo_matrix to accelerate simple histogram calculation, + # i.e. bins are consecutive integers + # Currently, coo_matrix is faster than histogram2d for simple cases + contingency = sp.coo_matrix( + (np.ones(class_idx.shape[0]), (class_idx, cluster_idx)), + shape=(n_classes, n_clusters), + dtype=dtype, + ) + if sparse: + contingency = contingency.tocsr() + contingency.sum_duplicates() + else: + contingency = contingency.toarray() + if eps is not None: + # don't use += as contingency is integer + contingency = contingency + eps + return contingency + + +# clustering measures + + +@validate_params( + { + "labels_true": ["array-like"], + "labels_pred": ["array-like"], + }, + prefer_skip_nested_validation=True, +) +def pair_confusion_matrix(labels_true, labels_pred): + """Pair confusion matrix arising from two clusterings [1]_. + + The pair confusion matrix :math:`C` computes a 2 by 2 similarity matrix + between two clusterings by considering all pairs of samples and counting + pairs that are assigned into the same or into different clusters under + the true and predicted clusterings. + + Considering a pair of samples that is clustered together a positive pair, + then as in binary classification the count of true negatives is + :math:`C_{00}`, false negatives is :math:`C_{10}`, true positives is + :math:`C_{11}` and false positives is :math:`C_{01}`. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + labels_true : array-like of shape (n_samples,), dtype=integral + Ground truth class labels to be used as a reference. + + labels_pred : array-like of shape (n_samples,), dtype=integral + Cluster labels to evaluate. + + Returns + ------- + C : ndarray of shape (2, 2), dtype=np.int64 + The contingency matrix. + + See Also + -------- + sklearn.metrics.rand_score : Rand Score. + sklearn.metrics.adjusted_rand_score : Adjusted Rand Score. + sklearn.metrics.adjusted_mutual_info_score : Adjusted Mutual Information. + + References + ---------- + .. [1] :doi:`Hubert, L., Arabie, P. "Comparing partitions." + Journal of Classification 2, 193–218 (1985). + <10.1007/BF01908075>` + + Examples + -------- + Perfectly matching labelings have all non-zero entries on the + diagonal regardless of actual label values: + + >>> from sklearn.metrics.cluster import pair_confusion_matrix + >>> pair_confusion_matrix([0, 0, 1, 1], [1, 1, 0, 0]) + array([[8, 0], + [0, 4]]... + + Labelings that assign all classes members to the same clusters + are complete but may be not always pure, hence penalized, and + have some off-diagonal non-zero entries: + + >>> pair_confusion_matrix([0, 0, 1, 2], [0, 0, 1, 1]) + array([[8, 2], + [0, 2]]... + + Note that the matrix is not symmetric. + """ + labels_true, labels_pred = check_clusterings(labels_true, labels_pred) + n_samples = np.int64(labels_true.shape[0]) + + # Computation using the contingency data + contingency = contingency_matrix( + labels_true, labels_pred, sparse=True, dtype=np.int64 + ) + n_c = np.ravel(contingency.sum(axis=1)) + n_k = np.ravel(contingency.sum(axis=0)) + sum_squares = (contingency.data**2).sum() + C = np.empty((2, 2), dtype=np.int64) + C[1, 1] = sum_squares - n_samples + C[0, 1] = contingency.dot(n_k).sum() - sum_squares + C[1, 0] = contingency.transpose().dot(n_c).sum() - sum_squares + C[0, 0] = n_samples**2 - C[0, 1] - C[1, 0] - sum_squares + return C + + +@validate_params( + { + "labels_true": ["array-like"], + "labels_pred": ["array-like"], + }, + prefer_skip_nested_validation=True, +) +def rand_score(labels_true, labels_pred): + """Rand index. + + The Rand Index computes a similarity measure between two clusterings + by considering all pairs of samples and counting pairs that are + assigned in the same or different clusters in the predicted and + true clusterings [1]_ [2]_. + + The raw RI score [3]_ is: + + RI = (number of agreeing pairs) / (number of pairs) + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + labels_true : array-like of shape (n_samples,), dtype=integral + Ground truth class labels to be used as a reference. + + labels_pred : array-like of shape (n_samples,), dtype=integral + Cluster labels to evaluate. + + Returns + ------- + RI : float + Similarity score between 0.0 and 1.0, inclusive, 1.0 stands for + perfect match. + + See Also + -------- + adjusted_rand_score: Adjusted Rand Score. + adjusted_mutual_info_score: Adjusted Mutual Information. + + References + ---------- + .. [1] :doi:`Hubert, L., Arabie, P. "Comparing partitions." + Journal of Classification 2, 193–218 (1985). + <10.1007/BF01908075>`. + + .. [2] `Wikipedia: Simple Matching Coefficient + `_ + + .. [3] `Wikipedia: Rand Index `_ + + Examples + -------- + Perfectly matching labelings have a score of 1 even + + >>> from sklearn.metrics.cluster import rand_score + >>> rand_score([0, 0, 1, 1], [1, 1, 0, 0]) + 1.0 + + Labelings that assign all classes members to the same clusters + are complete but may not always be pure, hence penalized: + + >>> rand_score([0, 0, 1, 2], [0, 0, 1, 1]) + 0.83... + """ + contingency = pair_confusion_matrix(labels_true, labels_pred) + numerator = contingency.diagonal().sum() + denominator = contingency.sum() + + if numerator == denominator or denominator == 0: + # Special limit cases: no clustering since the data is not split; + # or trivial clustering where each document is assigned a unique + # cluster. These are perfect matches hence return 1.0. + return 1.0 + + return numerator / denominator + + +@validate_params( + { + "labels_true": ["array-like"], + "labels_pred": ["array-like"], + }, + prefer_skip_nested_validation=True, +) +def adjusted_rand_score(labels_true, labels_pred): + """Rand index adjusted for chance. + + The Rand Index computes a similarity measure between two clusterings + by considering all pairs of samples and counting pairs that are + assigned in the same or different clusters in the predicted and + true clusterings. + + The raw RI score is then "adjusted for chance" into the ARI score + using the following scheme:: + + ARI = (RI - Expected_RI) / (max(RI) - Expected_RI) + + The adjusted Rand index is thus ensured to have a value close to + 0.0 for random labeling independently of the number of clusters and + samples and exactly 1.0 when the clusterings are identical (up to + a permutation). The adjusted Rand index is bounded below by -0.5 for + especially discordant clusterings. + + ARI is a symmetric measure:: + + adjusted_rand_score(a, b) == adjusted_rand_score(b, a) + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + labels_true : array-like of shape (n_samples,), dtype=int + Ground truth class labels to be used as a reference. + + labels_pred : array-like of shape (n_samples,), dtype=int + Cluster labels to evaluate. + + Returns + ------- + ARI : float + Similarity score between -0.5 and 1.0. Random labelings have an ARI + close to 0.0. 1.0 stands for perfect match. + + See Also + -------- + adjusted_mutual_info_score : Adjusted Mutual Information. + + References + ---------- + .. [Hubert1985] L. Hubert and P. Arabie, Comparing Partitions, + Journal of Classification 1985 + https://link.springer.com/article/10.1007%2FBF01908075 + + .. [Steinley2004] D. Steinley, Properties of the Hubert-Arabie + adjusted Rand index, Psychological Methods 2004 + + .. [wk] https://en.wikipedia.org/wiki/Rand_index#Adjusted_Rand_index + + .. [Chacon] :doi:`Minimum adjusted Rand index for two clusterings of a given size, + 2022, J. E. Chacón and A. I. Rastrojo <10.1007/s11634-022-00491-w>` + + Examples + -------- + Perfectly matching labelings have a score of 1 even + + >>> from sklearn.metrics.cluster import adjusted_rand_score + >>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 1]) + 1.0 + >>> adjusted_rand_score([0, 0, 1, 1], [1, 1, 0, 0]) + 1.0 + + Labelings that assign all classes members to the same clusters + are complete but may not always be pure, hence penalized:: + + >>> adjusted_rand_score([0, 0, 1, 2], [0, 0, 1, 1]) + 0.57... + + ARI is symmetric, so labelings that have pure clusters with members + coming from the same classes but unnecessary splits are penalized:: + + >>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 2]) + 0.57... + + If classes members are completely split across different clusters, the + assignment is totally incomplete, hence the ARI is very low:: + + >>> adjusted_rand_score([0, 0, 0, 0], [0, 1, 2, 3]) + 0.0 + + ARI may take a negative value for especially discordant labelings that + are a worse choice than the expected value of random labels:: + + >>> adjusted_rand_score([0, 0, 1, 1], [0, 1, 0, 1]) + -0.5 + """ + (tn, fp), (fn, tp) = pair_confusion_matrix(labels_true, labels_pred) + # convert to Python integer types, to avoid overflow or underflow + tn, fp, fn, tp = int(tn), int(fp), int(fn), int(tp) + + # Special cases: empty data or full agreement + if fn == 0 and fp == 0: + return 1.0 + + return 2.0 * (tp * tn - fn * fp) / ((tp + fn) * (fn + tn) + (tp + fp) * (fp + tn)) + + +@validate_params( + { + "labels_true": ["array-like"], + "labels_pred": ["array-like"], + "beta": [Interval(Real, 0, None, closed="left")], + }, + prefer_skip_nested_validation=True, +) +def homogeneity_completeness_v_measure(labels_true, labels_pred, *, beta=1.0): + """Compute the homogeneity and completeness and V-Measure scores at once. + + Those metrics are based on normalized conditional entropy measures of + the clustering labeling to evaluate given the knowledge of a Ground + Truth class labels of the same samples. + + A clustering result satisfies homogeneity if all of its clusters + contain only data points which are members of a single class. + + A clustering result satisfies completeness if all the data points + that are members of a given class are elements of the same cluster. + + Both scores have positive values between 0.0 and 1.0, larger values + being desirable. + + Those 3 metrics are independent of the absolute values of the labels: + a permutation of the class or cluster label values won't change the + score values in any way. + + V-Measure is furthermore symmetric: swapping ``labels_true`` and + ``label_pred`` will give the same score. This does not hold for + homogeneity and completeness. V-Measure is identical to + :func:`normalized_mutual_info_score` with the arithmetic averaging + method. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + labels_true : array-like of shape (n_samples,) + Ground truth class labels to be used as a reference. + + labels_pred : array-like of shape (n_samples,) + Gluster labels to evaluate. + + beta : float, default=1.0 + Ratio of weight attributed to ``homogeneity`` vs ``completeness``. + If ``beta`` is greater than 1, ``completeness`` is weighted more + strongly in the calculation. If ``beta`` is less than 1, + ``homogeneity`` is weighted more strongly. + + Returns + ------- + homogeneity : float + Score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling. + + completeness : float + Score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling. + + v_measure : float + Harmonic mean of the first two. + + See Also + -------- + homogeneity_score : Homogeneity metric of cluster labeling. + completeness_score : Completeness metric of cluster labeling. + v_measure_score : V-Measure (NMI with arithmetic mean option). + + Examples + -------- + >>> from sklearn.metrics import homogeneity_completeness_v_measure + >>> y_true, y_pred = [0, 0, 1, 1, 2, 2], [0, 0, 1, 2, 2, 2] + >>> homogeneity_completeness_v_measure(y_true, y_pred) + (0.71..., 0.77..., 0.73...) + """ + labels_true, labels_pred = check_clusterings(labels_true, labels_pred) + + if len(labels_true) == 0: + return 1.0, 1.0, 1.0 + + entropy_C = entropy(labels_true) + entropy_K = entropy(labels_pred) + + contingency = contingency_matrix(labels_true, labels_pred, sparse=True) + MI = mutual_info_score(None, None, contingency=contingency) + + homogeneity = MI / (entropy_C) if entropy_C else 1.0 + completeness = MI / (entropy_K) if entropy_K else 1.0 + + if homogeneity + completeness == 0.0: + v_measure_score = 0.0 + else: + v_measure_score = ( + (1 + beta) + * homogeneity + * completeness + / (beta * homogeneity + completeness) + ) + + return homogeneity, completeness, v_measure_score + + +@validate_params( + { + "labels_true": ["array-like"], + "labels_pred": ["array-like"], + }, + prefer_skip_nested_validation=True, +) +def homogeneity_score(labels_true, labels_pred): + """Homogeneity metric of a cluster labeling given a ground truth. + + A clustering result satisfies homogeneity if all of its clusters + contain only data points which are members of a single class. + + This metric is independent of the absolute values of the labels: + a permutation of the class or cluster label values won't change the + score value in any way. + + This metric is not symmetric: switching ``label_true`` with ``label_pred`` + will return the :func:`completeness_score` which will be different in + general. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + labels_true : array-like of shape (n_samples,) + Ground truth class labels to be used as a reference. + + labels_pred : array-like of shape (n_samples,) + Cluster labels to evaluate. + + Returns + ------- + homogeneity : float + Score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling. + + See Also + -------- + completeness_score : Completeness metric of cluster labeling. + v_measure_score : V-Measure (NMI with arithmetic mean option). + + References + ---------- + + .. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A + conditional entropy-based external cluster evaluation measure + `_ + + Examples + -------- + + Perfect labelings are homogeneous:: + + >>> from sklearn.metrics.cluster import homogeneity_score + >>> homogeneity_score([0, 0, 1, 1], [1, 1, 0, 0]) + 1.0 + + Non-perfect labelings that further split classes into more clusters can be + perfectly homogeneous:: + + >>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 1, 2])) + 1.000000 + >>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 2, 3])) + 1.000000 + + Clusters that include samples from different classes do not make for an + homogeneous labeling:: + + >>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 0, 1])) + 0.0... + >>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 0, 0])) + 0.0... + """ + return homogeneity_completeness_v_measure(labels_true, labels_pred)[0] + + +@validate_params( + { + "labels_true": ["array-like"], + "labels_pred": ["array-like"], + }, + prefer_skip_nested_validation=True, +) +def completeness_score(labels_true, labels_pred): + """Compute completeness metric of a cluster labeling given a ground truth. + + A clustering result satisfies completeness if all the data points + that are members of a given class are elements of the same cluster. + + This metric is independent of the absolute values of the labels: + a permutation of the class or cluster label values won't change the + score value in any way. + + This metric is not symmetric: switching ``label_true`` with ``label_pred`` + will return the :func:`homogeneity_score` which will be different in + general. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + labels_true : array-like of shape (n_samples,) + Ground truth class labels to be used as a reference. + + labels_pred : array-like of shape (n_samples,) + Cluster labels to evaluate. + + Returns + ------- + completeness : float + Score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling. + + See Also + -------- + homogeneity_score : Homogeneity metric of cluster labeling. + v_measure_score : V-Measure (NMI with arithmetic mean option). + + References + ---------- + + .. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A + conditional entropy-based external cluster evaluation measure + `_ + + Examples + -------- + + Perfect labelings are complete:: + + >>> from sklearn.metrics.cluster import completeness_score + >>> completeness_score([0, 0, 1, 1], [1, 1, 0, 0]) + 1.0 + + Non-perfect labelings that assign all classes members to the same clusters + are still complete:: + + >>> print(completeness_score([0, 0, 1, 1], [0, 0, 0, 0])) + 1.0 + >>> print(completeness_score([0, 1, 2, 3], [0, 0, 1, 1])) + 0.999... + + If classes members are split across different clusters, the + assignment cannot be complete:: + + >>> print(completeness_score([0, 0, 1, 1], [0, 1, 0, 1])) + 0.0 + >>> print(completeness_score([0, 0, 0, 0], [0, 1, 2, 3])) + 0.0 + """ + return homogeneity_completeness_v_measure(labels_true, labels_pred)[1] + + +@validate_params( + { + "labels_true": ["array-like"], + "labels_pred": ["array-like"], + "beta": [Interval(Real, 0, None, closed="left")], + }, + prefer_skip_nested_validation=True, +) +def v_measure_score(labels_true, labels_pred, *, beta=1.0): + """V-measure cluster labeling given a ground truth. + + This score is identical to :func:`normalized_mutual_info_score` with + the ``'arithmetic'`` option for averaging. + + The V-measure is the harmonic mean between homogeneity and completeness:: + + v = (1 + beta) * homogeneity * completeness + / (beta * homogeneity + completeness) + + This metric is independent of the absolute values of the labels: + a permutation of the class or cluster label values won't change the + score value in any way. + + This metric is furthermore symmetric: switching ``label_true`` with + ``label_pred`` will return the same score value. This can be useful to + measure the agreement of two independent label assignments strategies + on the same dataset when the real ground truth is not known. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + labels_true : array-like of shape (n_samples,) + Ground truth class labels to be used as a reference. + + labels_pred : array-like of shape (n_samples,) + Cluster labels to evaluate. + + beta : float, default=1.0 + Ratio of weight attributed to ``homogeneity`` vs ``completeness``. + If ``beta`` is greater than 1, ``completeness`` is weighted more + strongly in the calculation. If ``beta`` is less than 1, + ``homogeneity`` is weighted more strongly. + + Returns + ------- + v_measure : float + Score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling. + + See Also + -------- + homogeneity_score : Homogeneity metric of cluster labeling. + completeness_score : Completeness metric of cluster labeling. + normalized_mutual_info_score : Normalized Mutual Information. + + References + ---------- + + .. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A + conditional entropy-based external cluster evaluation measure + `_ + + Examples + -------- + Perfect labelings are both homogeneous and complete, hence have score 1.0:: + + >>> from sklearn.metrics.cluster import v_measure_score + >>> v_measure_score([0, 0, 1, 1], [0, 0, 1, 1]) + 1.0 + >>> v_measure_score([0, 0, 1, 1], [1, 1, 0, 0]) + 1.0 + + Labelings that assign all classes members to the same clusters + are complete but not homogeneous, hence penalized:: + + >>> print("%.6f" % v_measure_score([0, 0, 1, 2], [0, 0, 1, 1])) + 0.8... + >>> print("%.6f" % v_measure_score([0, 1, 2, 3], [0, 0, 1, 1])) + 0.66... + + Labelings that have pure clusters with members coming from the same + classes are homogeneous but un-necessary splits harm completeness + and thus penalize V-measure as well:: + + >>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 1, 2])) + 0.8... + >>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 1, 2, 3])) + 0.66... + + If classes members are completely split across different clusters, + the assignment is totally incomplete, hence the V-Measure is null:: + + >>> print("%.6f" % v_measure_score([0, 0, 0, 0], [0, 1, 2, 3])) + 0.0... + + Clusters that include samples from totally different classes totally + destroy the homogeneity of the labeling, hence:: + + >>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 0, 0])) + 0.0... + """ + return homogeneity_completeness_v_measure(labels_true, labels_pred, beta=beta)[2] + + +@validate_params( + { + "labels_true": ["array-like", None], + "labels_pred": ["array-like", None], + "contingency": ["array-like", "sparse matrix", None], + }, + prefer_skip_nested_validation=True, +) +def mutual_info_score(labels_true, labels_pred, *, contingency=None): + """Mutual Information between two clusterings. + + The Mutual Information is a measure of the similarity between two labels + of the same data. Where :math:`|U_i|` is the number of the samples + in cluster :math:`U_i` and :math:`|V_j|` is the number of the + samples in cluster :math:`V_j`, the Mutual Information + between clusterings :math:`U` and :math:`V` is given as: + + .. math:: + + MI(U,V)=\\sum_{i=1}^{|U|} \\sum_{j=1}^{|V|} \\frac{|U_i\\cap V_j|}{N} + \\log\\frac{N|U_i \\cap V_j|}{|U_i||V_j|} + + This metric is independent of the absolute values of the labels: + a permutation of the class or cluster label values won't change the + score value in any way. + + This metric is furthermore symmetric: switching :math:`U` (i.e + ``label_true``) with :math:`V` (i.e. ``label_pred``) will return the + same score value. This can be useful to measure the agreement of two + independent label assignments strategies on the same dataset when the + real ground truth is not known. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + labels_true : array-like of shape (n_samples,), dtype=integral + A clustering of the data into disjoint subsets, called :math:`U` in + the above formula. + + labels_pred : array-like of shape (n_samples,), dtype=integral + A clustering of the data into disjoint subsets, called :math:`V` in + the above formula. + + contingency : {array-like, sparse matrix} of shape \ + (n_classes_true, n_classes_pred), default=None + A contingency matrix given by the + :func:`~sklearn.metrics.cluster.contingency_matrix` function. If value + is ``None``, it will be computed, otherwise the given value is used, + with ``labels_true`` and ``labels_pred`` ignored. + + Returns + ------- + mi : float + Mutual information, a non-negative value, measured in nats using the + natural logarithm. + + See Also + -------- + adjusted_mutual_info_score : Adjusted against chance Mutual Information. + normalized_mutual_info_score : Normalized Mutual Information. + + Notes + ----- + The logarithm used is the natural logarithm (base-e). + + Examples + -------- + >>> from sklearn.metrics import mutual_info_score + >>> labels_true = [0, 1, 1, 0, 1, 0] + >>> labels_pred = [0, 1, 0, 0, 1, 1] + >>> mutual_info_score(labels_true, labels_pred) + 0.056... + """ + if contingency is None: + labels_true, labels_pred = check_clusterings(labels_true, labels_pred) + contingency = contingency_matrix(labels_true, labels_pred, sparse=True) + else: + contingency = check_array( + contingency, + accept_sparse=["csr", "csc", "coo"], + dtype=[int, np.int32, np.int64], + ) + + if isinstance(contingency, np.ndarray): + # For an array + nzx, nzy = np.nonzero(contingency) + nz_val = contingency[nzx, nzy] + else: + # For a sparse matrix + nzx, nzy, nz_val = sp.find(contingency) + + contingency_sum = contingency.sum() + pi = np.ravel(contingency.sum(axis=1)) + pj = np.ravel(contingency.sum(axis=0)) + + # Since MI <= min(H(X), H(Y)), any labelling with zero entropy, i.e. containing a + # single cluster, implies MI = 0 + if pi.size == 1 or pj.size == 1: + return 0.0 + + log_contingency_nm = np.log(nz_val) + contingency_nm = nz_val / contingency_sum + # Don't need to calculate the full outer product, just for non-zeroes + outer = pi.take(nzx).astype(np.int64, copy=False) * pj.take(nzy).astype( + np.int64, copy=False + ) + log_outer = -np.log(outer) + log(pi.sum()) + log(pj.sum()) + mi = ( + contingency_nm * (log_contingency_nm - log(contingency_sum)) + + contingency_nm * log_outer + ) + mi = np.where(np.abs(mi) < np.finfo(mi.dtype).eps, 0.0, mi) + return np.clip(mi.sum(), 0.0, None) + + +@validate_params( + { + "labels_true": ["array-like"], + "labels_pred": ["array-like"], + "average_method": [StrOptions({"arithmetic", "max", "min", "geometric"})], + }, + prefer_skip_nested_validation=True, +) +def adjusted_mutual_info_score( + labels_true, labels_pred, *, average_method="arithmetic" +): + """Adjusted Mutual Information between two clusterings. + + Adjusted Mutual Information (AMI) is an adjustment of the Mutual + Information (MI) score to account for chance. It accounts for the fact that + the MI is generally higher for two clusterings with a larger number of + clusters, regardless of whether there is actually more information shared. + For two clusterings :math:`U` and :math:`V`, the AMI is given as:: + + AMI(U, V) = [MI(U, V) - E(MI(U, V))] / [avg(H(U), H(V)) - E(MI(U, V))] + + This metric is independent of the absolute values of the labels: + a permutation of the class or cluster label values won't change the + score value in any way. + + This metric is furthermore symmetric: switching :math:`U` (``label_true``) + with :math:`V` (``labels_pred``) will return the same score value. This can + be useful to measure the agreement of two independent label assignments + strategies on the same dataset when the real ground truth is not known. + + Be mindful that this function is an order of magnitude slower than other + metrics, such as the Adjusted Rand Index. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + labels_true : int array-like of shape (n_samples,) + A clustering of the data into disjoint subsets, called :math:`U` in + the above formula. + + labels_pred : int array-like of shape (n_samples,) + A clustering of the data into disjoint subsets, called :math:`V` in + the above formula. + + average_method : {'min', 'geometric', 'arithmetic', 'max'}, default='arithmetic' + How to compute the normalizer in the denominator. + + .. versionadded:: 0.20 + + .. versionchanged:: 0.22 + The default value of ``average_method`` changed from 'max' to + 'arithmetic'. + + Returns + ------- + ami: float (upperlimited by 1.0) + The AMI returns a value of 1 when the two partitions are identical + (ie perfectly matched). Random partitions (independent labellings) have + an expected AMI around 0 on average hence can be negative. The value is + in adjusted nats (based on the natural logarithm). + + See Also + -------- + adjusted_rand_score : Adjusted Rand Index. + mutual_info_score : Mutual Information (not adjusted for chance). + + References + ---------- + .. [1] `Vinh, Epps, and Bailey, (2010). Information Theoretic Measures for + Clusterings Comparison: Variants, Properties, Normalization and + Correction for Chance, JMLR + `_ + + .. [2] `Wikipedia entry for the Adjusted Mutual Information + `_ + + Examples + -------- + + Perfect labelings are both homogeneous and complete, hence have + score 1.0:: + + >>> from sklearn.metrics.cluster import adjusted_mutual_info_score + >>> adjusted_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1]) + ... # doctest: +SKIP + 1.0 + >>> adjusted_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0]) + ... # doctest: +SKIP + 1.0 + + If classes members are completely split across different clusters, + the assignment is totally in-complete, hence the AMI is null:: + + >>> adjusted_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3]) + ... # doctest: +SKIP + 0.0 + """ + labels_true, labels_pred = check_clusterings(labels_true, labels_pred) + n_samples = labels_true.shape[0] + classes = np.unique(labels_true) + clusters = np.unique(labels_pred) + + # Special limit cases: no clustering since the data is not split. + # It corresponds to both labellings having zero entropy. + # This is a perfect match hence return 1.0. + if ( + classes.shape[0] == clusters.shape[0] == 1 + or classes.shape[0] == clusters.shape[0] == 0 + ): + return 1.0 + + contingency = contingency_matrix(labels_true, labels_pred, sparse=True) + # Calculate the MI for the two clusterings + mi = mutual_info_score(labels_true, labels_pred, contingency=contingency) + # Calculate the expected value for the mutual information + emi = expected_mutual_information(contingency, n_samples) + # Calculate entropy for each labeling + h_true, h_pred = entropy(labels_true), entropy(labels_pred) + normalizer = _generalized_average(h_true, h_pred, average_method) + denominator = normalizer - emi + # Avoid 0.0 / 0.0 when expectation equals maximum, i.e. a perfect match. + # normalizer should always be >= emi, but because of floating-point + # representation, sometimes emi is slightly larger. Correct this + # by preserving the sign. + if denominator < 0: + denominator = min(denominator, -np.finfo("float64").eps) + else: + denominator = max(denominator, np.finfo("float64").eps) + ami = (mi - emi) / denominator + return ami + + +@validate_params( + { + "labels_true": ["array-like"], + "labels_pred": ["array-like"], + "average_method": [StrOptions({"arithmetic", "max", "min", "geometric"})], + }, + prefer_skip_nested_validation=True, +) +def normalized_mutual_info_score( + labels_true, labels_pred, *, average_method="arithmetic" +): + """Normalized Mutual Information between two clusterings. + + Normalized Mutual Information (NMI) is a normalization of the Mutual + Information (MI) score to scale the results between 0 (no mutual + information) and 1 (perfect correlation). In this function, mutual + information is normalized by some generalized mean of ``H(labels_true)`` + and ``H(labels_pred))``, defined by the `average_method`. + + This measure is not adjusted for chance. Therefore + :func:`adjusted_mutual_info_score` might be preferred. + + This metric is independent of the absolute values of the labels: + a permutation of the class or cluster label values won't change the + score value in any way. + + This metric is furthermore symmetric: switching ``label_true`` with + ``label_pred`` will return the same score value. This can be useful to + measure the agreement of two independent label assignments strategies + on the same dataset when the real ground truth is not known. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + labels_true : int array-like of shape (n_samples,) + A clustering of the data into disjoint subsets. + + labels_pred : int array-like of shape (n_samples,) + A clustering of the data into disjoint subsets. + + average_method : {'min', 'geometric', 'arithmetic', 'max'}, default='arithmetic' + How to compute the normalizer in the denominator. + + .. versionadded:: 0.20 + + .. versionchanged:: 0.22 + The default value of ``average_method`` changed from 'geometric' to + 'arithmetic'. + + Returns + ------- + nmi : float + Score between 0.0 and 1.0 in normalized nats (based on the natural + logarithm). 1.0 stands for perfectly complete labeling. + + See Also + -------- + v_measure_score : V-Measure (NMI with arithmetic mean option). + adjusted_rand_score : Adjusted Rand Index. + adjusted_mutual_info_score : Adjusted Mutual Information (adjusted + against chance). + + Examples + -------- + + Perfect labelings are both homogeneous and complete, hence have + score 1.0:: + + >>> from sklearn.metrics.cluster import normalized_mutual_info_score + >>> normalized_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1]) + ... # doctest: +SKIP + 1.0 + >>> normalized_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0]) + ... # doctest: +SKIP + 1.0 + + If classes members are completely split across different clusters, + the assignment is totally in-complete, hence the NMI is null:: + + >>> normalized_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3]) + ... # doctest: +SKIP + 0.0 + """ + labels_true, labels_pred = check_clusterings(labels_true, labels_pred) + classes = np.unique(labels_true) + clusters = np.unique(labels_pred) + + # Special limit cases: no clustering since the data is not split. + # It corresponds to both labellings having zero entropy. + # This is a perfect match hence return 1.0. + if ( + classes.shape[0] == clusters.shape[0] == 1 + or classes.shape[0] == clusters.shape[0] == 0 + ): + return 1.0 + + contingency = contingency_matrix(labels_true, labels_pred, sparse=True) + contingency = contingency.astype(np.float64, copy=False) + # Calculate the MI for the two clusterings + mi = mutual_info_score(labels_true, labels_pred, contingency=contingency) + + # At this point mi = 0 can't be a perfect match (the special case of a single + # cluster has been dealt with before). Hence, if mi = 0, the nmi must be 0 whatever + # the normalization. + if mi == 0: + return 0.0 + + # Calculate entropy for each labeling + h_true, h_pred = entropy(labels_true), entropy(labels_pred) + + normalizer = _generalized_average(h_true, h_pred, average_method) + return mi / normalizer + + +@validate_params( + { + "labels_true": ["array-like"], + "labels_pred": ["array-like"], + "sparse": ["boolean"], + }, + prefer_skip_nested_validation=True, +) +def fowlkes_mallows_score(labels_true, labels_pred, *, sparse=False): + """Measure the similarity of two clusterings of a set of points. + + .. versionadded:: 0.18 + + The Fowlkes-Mallows index (FMI) is defined as the geometric mean between of + the precision and recall:: + + FMI = TP / sqrt((TP + FP) * (TP + FN)) + + Where ``TP`` is the number of **True Positive** (i.e. the number of pair of + points that belongs in the same clusters in both ``labels_true`` and + ``labels_pred``), ``FP`` is the number of **False Positive** (i.e. the + number of pair of points that belongs in the same clusters in + ``labels_true`` and not in ``labels_pred``) and ``FN`` is the number of + **False Negative** (i.e. the number of pair of points that belongs in the + same clusters in ``labels_pred`` and not in ``labels_True``). + + The score ranges from 0 to 1. A high value indicates a good similarity + between two clusters. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + labels_true : array-like of shape (n_samples,), dtype=int + A clustering of the data into disjoint subsets. + + labels_pred : array-like of shape (n_samples,), dtype=int + A clustering of the data into disjoint subsets. + + sparse : bool, default=False + Compute contingency matrix internally with sparse matrix. + + Returns + ------- + score : float + The resulting Fowlkes-Mallows score. + + References + ---------- + .. [1] `E. B. Fowkles and C. L. Mallows, 1983. "A method for comparing two + hierarchical clusterings". Journal of the American Statistical + Association + `_ + + .. [2] `Wikipedia entry for the Fowlkes-Mallows Index + `_ + + Examples + -------- + + Perfect labelings are both homogeneous and complete, hence have + score 1.0:: + + >>> from sklearn.metrics.cluster import fowlkes_mallows_score + >>> fowlkes_mallows_score([0, 0, 1, 1], [0, 0, 1, 1]) + 1.0 + >>> fowlkes_mallows_score([0, 0, 1, 1], [1, 1, 0, 0]) + 1.0 + + If classes members are completely split across different clusters, + the assignment is totally random, hence the FMI is null:: + + >>> fowlkes_mallows_score([0, 0, 0, 0], [0, 1, 2, 3]) + 0.0 + """ + labels_true, labels_pred = check_clusterings(labels_true, labels_pred) + (n_samples,) = labels_true.shape + + c = contingency_matrix(labels_true, labels_pred, sparse=True) + c = c.astype(np.int64, copy=False) + tk = np.dot(c.data, c.data) - n_samples + pk = np.sum(np.asarray(c.sum(axis=0)).ravel() ** 2) - n_samples + qk = np.sum(np.asarray(c.sum(axis=1)).ravel() ** 2) - n_samples + return np.sqrt(tk / pk) * np.sqrt(tk / qk) if tk != 0.0 else 0.0 + + +@validate_params( + { + "labels": ["array-like"], + }, + prefer_skip_nested_validation=True, +) +def entropy(labels): + """Calculate the entropy for a labeling. + + Parameters + ---------- + labels : array-like of shape (n_samples,), dtype=int + The labels. + + Returns + ------- + entropy : float + The entropy for a labeling. + + Notes + ----- + The logarithm used is the natural logarithm (base-e). + """ + if len(labels) == 0: + return 1.0 + label_idx = np.unique(labels, return_inverse=True)[1] + pi = np.bincount(label_idx).astype(np.float64) + pi = pi[pi > 0] + + # single cluster => zero entropy + if pi.size == 1: + return 0.0 + + pi_sum = np.sum(pi) + # log(a / b) should be calculated as log(a) - log(b) for + # possible loss of precision + return -np.sum((pi / pi_sum) * (np.log(pi) - log(pi_sum))) diff --git a/venv/lib/python3.10/site-packages/sklearn/metrics/cluster/tests/__init__.py b/venv/lib/python3.10/site-packages/sklearn/metrics/cluster/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/sklearn/metrics/cluster/tests/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/metrics/cluster/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1aa7012fba96d198110ad286acd1b5730162de2f Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/metrics/cluster/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/metrics/cluster/tests/__pycache__/test_bicluster.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/metrics/cluster/tests/__pycache__/test_bicluster.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d065435a2891301573462298bad148e6a14efe68 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/metrics/cluster/tests/__pycache__/test_bicluster.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/metrics/cluster/tests/__pycache__/test_common.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/metrics/cluster/tests/__pycache__/test_common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..76fbfaff4a63c65830fcb81f11c7642edd4ac3e7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/metrics/cluster/tests/__pycache__/test_common.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/metrics/cluster/tests/__pycache__/test_supervised.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/metrics/cluster/tests/__pycache__/test_supervised.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..019dce78e6ce59bb6702e2f26c44346a56cb62d2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/metrics/cluster/tests/__pycache__/test_supervised.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/metrics/cluster/tests/__pycache__/test_unsupervised.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/metrics/cluster/tests/__pycache__/test_unsupervised.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..77927fcff3516d77763195b00cdbbf5f5d693d32 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/metrics/cluster/tests/__pycache__/test_unsupervised.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/metrics/cluster/tests/test_bicluster.py b/venv/lib/python3.10/site-packages/sklearn/metrics/cluster/tests/test_bicluster.py new file mode 100644 index 0000000000000000000000000000000000000000..53f7805100a1313709d1d8868d45071b3066f836 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/metrics/cluster/tests/test_bicluster.py @@ -0,0 +1,56 @@ +"""Testing for bicluster metrics module""" + +import numpy as np + +from sklearn.metrics import consensus_score +from sklearn.metrics.cluster._bicluster import _jaccard +from sklearn.utils._testing import assert_almost_equal + + +def test_jaccard(): + a1 = np.array([True, True, False, False]) + a2 = np.array([True, True, True, True]) + a3 = np.array([False, True, True, False]) + a4 = np.array([False, False, True, True]) + + assert _jaccard(a1, a1, a1, a1) == 1 + assert _jaccard(a1, a1, a2, a2) == 0.25 + assert _jaccard(a1, a1, a3, a3) == 1.0 / 7 + assert _jaccard(a1, a1, a4, a4) == 0 + + +def test_consensus_score(): + a = [[True, True, False, False], [False, False, True, True]] + b = a[::-1] + + assert consensus_score((a, a), (a, a)) == 1 + assert consensus_score((a, a), (b, b)) == 1 + assert consensus_score((a, b), (a, b)) == 1 + assert consensus_score((a, b), (b, a)) == 1 + + assert consensus_score((a, a), (b, a)) == 0 + assert consensus_score((a, a), (a, b)) == 0 + assert consensus_score((b, b), (a, b)) == 0 + assert consensus_score((b, b), (b, a)) == 0 + + +def test_consensus_score_issue2445(): + """Different number of biclusters in A and B""" + a_rows = np.array( + [ + [True, True, False, False], + [False, False, True, True], + [False, False, False, True], + ] + ) + a_cols = np.array( + [ + [True, True, False, False], + [False, False, True, True], + [False, False, False, True], + ] + ) + idx = [0, 2] + s = consensus_score((a_rows, a_cols), (a_rows[idx], a_cols[idx])) + # B contains 2 of the 3 biclusters in A, so score should be 2/3 + assert_almost_equal(s, 2.0 / 3.0) diff --git a/venv/lib/python3.10/site-packages/sklearn/metrics/cluster/tests/test_common.py b/venv/lib/python3.10/site-packages/sklearn/metrics/cluster/tests/test_common.py new file mode 100644 index 0000000000000000000000000000000000000000..bc32b7df7f561f086aa8096ed285968c2c8d09ab --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/metrics/cluster/tests/test_common.py @@ -0,0 +1,219 @@ +from functools import partial +from itertools import chain + +import numpy as np +import pytest + +from sklearn.metrics.cluster import ( + adjusted_mutual_info_score, + adjusted_rand_score, + calinski_harabasz_score, + completeness_score, + davies_bouldin_score, + fowlkes_mallows_score, + homogeneity_score, + mutual_info_score, + normalized_mutual_info_score, + rand_score, + silhouette_score, + v_measure_score, +) +from sklearn.utils._testing import assert_allclose + +# Dictionaries of metrics +# ------------------------ +# The goal of having those dictionaries is to have an easy way to call a +# particular metric and associate a name to each function: +# - SUPERVISED_METRICS: all supervised cluster metrics - (when given a +# ground truth value) +# - UNSUPERVISED_METRICS: all unsupervised cluster metrics +# +# Those dictionaries will be used to test systematically some invariance +# properties, e.g. invariance toward several input layout. +# + +SUPERVISED_METRICS = { + "adjusted_mutual_info_score": adjusted_mutual_info_score, + "adjusted_rand_score": adjusted_rand_score, + "rand_score": rand_score, + "completeness_score": completeness_score, + "homogeneity_score": homogeneity_score, + "mutual_info_score": mutual_info_score, + "normalized_mutual_info_score": normalized_mutual_info_score, + "v_measure_score": v_measure_score, + "fowlkes_mallows_score": fowlkes_mallows_score, +} + +UNSUPERVISED_METRICS = { + "silhouette_score": silhouette_score, + "silhouette_manhattan": partial(silhouette_score, metric="manhattan"), + "calinski_harabasz_score": calinski_harabasz_score, + "davies_bouldin_score": davies_bouldin_score, +} + +# Lists of metrics with common properties +# --------------------------------------- +# Lists of metrics with common properties are used to test systematically some +# functionalities and invariance, e.g. SYMMETRIC_METRICS lists all metrics +# that are symmetric with respect to their input argument y_true and y_pred. +# +# -------------------------------------------------------------------- +# Symmetric with respect to their input arguments y_true and y_pred. +# Symmetric metrics only apply to supervised clusters. +SYMMETRIC_METRICS = [ + "adjusted_rand_score", + "rand_score", + "v_measure_score", + "mutual_info_score", + "adjusted_mutual_info_score", + "normalized_mutual_info_score", + "fowlkes_mallows_score", +] + +NON_SYMMETRIC_METRICS = ["homogeneity_score", "completeness_score"] + +# Metrics whose upper bound is 1 +NORMALIZED_METRICS = [ + "adjusted_rand_score", + "rand_score", + "homogeneity_score", + "completeness_score", + "v_measure_score", + "adjusted_mutual_info_score", + "fowlkes_mallows_score", + "normalized_mutual_info_score", +] + + +rng = np.random.RandomState(0) +y1 = rng.randint(3, size=30) +y2 = rng.randint(3, size=30) + + +def test_symmetric_non_symmetric_union(): + assert sorted(SYMMETRIC_METRICS + NON_SYMMETRIC_METRICS) == sorted( + SUPERVISED_METRICS + ) + + +# 0.22 AMI and NMI changes +@pytest.mark.filterwarnings("ignore::FutureWarning") +@pytest.mark.parametrize( + "metric_name, y1, y2", [(name, y1, y2) for name in SYMMETRIC_METRICS] +) +def test_symmetry(metric_name, y1, y2): + metric = SUPERVISED_METRICS[metric_name] + assert metric(y1, y2) == pytest.approx(metric(y2, y1)) + + +@pytest.mark.parametrize( + "metric_name, y1, y2", [(name, y1, y2) for name in NON_SYMMETRIC_METRICS] +) +def test_non_symmetry(metric_name, y1, y2): + metric = SUPERVISED_METRICS[metric_name] + assert metric(y1, y2) != pytest.approx(metric(y2, y1)) + + +# 0.22 AMI and NMI changes +@pytest.mark.filterwarnings("ignore::FutureWarning") +@pytest.mark.parametrize("metric_name", NORMALIZED_METRICS) +def test_normalized_output(metric_name): + upper_bound_1 = [0, 0, 0, 1, 1, 1] + upper_bound_2 = [0, 0, 0, 1, 1, 1] + metric = SUPERVISED_METRICS[metric_name] + assert metric([0, 0, 0, 1, 1], [0, 0, 0, 1, 2]) > 0.0 + assert metric([0, 0, 1, 1, 2], [0, 0, 1, 1, 1]) > 0.0 + assert metric([0, 0, 0, 1, 2], [0, 1, 1, 1, 1]) < 1.0 + assert metric([0, 0, 0, 1, 2], [0, 1, 1, 1, 1]) < 1.0 + assert metric(upper_bound_1, upper_bound_2) == pytest.approx(1.0) + + lower_bound_1 = [0, 0, 0, 0, 0, 0] + lower_bound_2 = [0, 1, 2, 3, 4, 5] + score = np.array( + [metric(lower_bound_1, lower_bound_2), metric(lower_bound_2, lower_bound_1)] + ) + assert not (score < 0).any() + + +# 0.22 AMI and NMI changes +@pytest.mark.filterwarnings("ignore::FutureWarning") +@pytest.mark.parametrize("metric_name", chain(SUPERVISED_METRICS, UNSUPERVISED_METRICS)) +def test_permute_labels(metric_name): + # All clustering metrics do not change score due to permutations of labels + # that is when 0 and 1 exchanged. + y_label = np.array([0, 0, 0, 1, 1, 0, 1]) + y_pred = np.array([1, 0, 1, 0, 1, 1, 0]) + if metric_name in SUPERVISED_METRICS: + metric = SUPERVISED_METRICS[metric_name] + score_1 = metric(y_pred, y_label) + assert_allclose(score_1, metric(1 - y_pred, y_label)) + assert_allclose(score_1, metric(1 - y_pred, 1 - y_label)) + assert_allclose(score_1, metric(y_pred, 1 - y_label)) + else: + metric = UNSUPERVISED_METRICS[metric_name] + X = np.random.randint(10, size=(7, 10)) + score_1 = metric(X, y_pred) + assert_allclose(score_1, metric(X, 1 - y_pred)) + + +# 0.22 AMI and NMI changes +@pytest.mark.filterwarnings("ignore::FutureWarning") +@pytest.mark.parametrize("metric_name", chain(SUPERVISED_METRICS, UNSUPERVISED_METRICS)) +# For all clustering metrics Input parameters can be both +# in the form of arrays lists, positive, negative or string +def test_format_invariance(metric_name): + y_true = [0, 0, 0, 0, 1, 1, 1, 1] + y_pred = [0, 1, 2, 3, 4, 5, 6, 7] + + def generate_formats(y): + y = np.array(y) + yield y, "array of ints" + yield y.tolist(), "list of ints" + yield [str(x) + "-a" for x in y.tolist()], "list of strs" + yield ( + np.array([str(x) + "-a" for x in y.tolist()], dtype=object), + "array of strs", + ) + yield y - 1, "including negative ints" + yield y + 1, "strictly positive ints" + + if metric_name in SUPERVISED_METRICS: + metric = SUPERVISED_METRICS[metric_name] + score_1 = metric(y_true, y_pred) + y_true_gen = generate_formats(y_true) + y_pred_gen = generate_formats(y_pred) + for (y_true_fmt, fmt_name), (y_pred_fmt, _) in zip(y_true_gen, y_pred_gen): + assert score_1 == metric(y_true_fmt, y_pred_fmt) + else: + metric = UNSUPERVISED_METRICS[metric_name] + X = np.random.randint(10, size=(8, 10)) + score_1 = metric(X, y_true) + assert score_1 == metric(X.astype(float), y_true) + y_true_gen = generate_formats(y_true) + for y_true_fmt, fmt_name in y_true_gen: + assert score_1 == metric(X, y_true_fmt) + + +@pytest.mark.parametrize("metric", SUPERVISED_METRICS.values()) +def test_single_sample(metric): + # only the supervised metrics support single sample + for i, j in [(0, 0), (0, 1), (1, 0), (1, 1)]: + metric([i], [j]) + + +@pytest.mark.parametrize( + "metric_name, metric_func", dict(SUPERVISED_METRICS, **UNSUPERVISED_METRICS).items() +) +def test_inf_nan_input(metric_name, metric_func): + if metric_name in SUPERVISED_METRICS: + invalids = [ + ([0, 1], [np.inf, np.inf]), + ([0, 1], [np.nan, np.nan]), + ([0, 1], [np.nan, np.inf]), + ] + else: + X = np.random.randint(10, size=(2, 10)) + invalids = [(X, [np.inf, np.inf]), (X, [np.nan, np.nan]), (X, [np.nan, np.inf])] + with pytest.raises(ValueError, match=r"contains (NaN|infinity)"): + for args in invalids: + metric_func(*args) diff --git a/venv/lib/python3.10/site-packages/sklearn/metrics/cluster/tests/test_supervised.py b/venv/lib/python3.10/site-packages/sklearn/metrics/cluster/tests/test_supervised.py new file mode 100644 index 0000000000000000000000000000000000000000..dfaa58ff62c018f40be4a069b4fad47e9c9aa396 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/metrics/cluster/tests/test_supervised.py @@ -0,0 +1,482 @@ +import warnings + +import numpy as np +import pytest +from numpy.testing import assert_allclose, assert_array_almost_equal, assert_array_equal + +from sklearn.metrics.cluster import ( + adjusted_mutual_info_score, + adjusted_rand_score, + completeness_score, + contingency_matrix, + entropy, + expected_mutual_information, + fowlkes_mallows_score, + homogeneity_completeness_v_measure, + homogeneity_score, + mutual_info_score, + normalized_mutual_info_score, + pair_confusion_matrix, + rand_score, + v_measure_score, +) +from sklearn.metrics.cluster._supervised import _generalized_average, check_clusterings +from sklearn.utils import assert_all_finite +from sklearn.utils._testing import assert_almost_equal + +score_funcs = [ + adjusted_rand_score, + rand_score, + homogeneity_score, + completeness_score, + v_measure_score, + adjusted_mutual_info_score, + normalized_mutual_info_score, +] + + +def test_error_messages_on_wrong_input(): + for score_func in score_funcs: + expected = ( + r"Found input variables with inconsistent numbers " r"of samples: \[2, 3\]" + ) + with pytest.raises(ValueError, match=expected): + score_func([0, 1], [1, 1, 1]) + + expected = r"labels_true must be 1D: shape is \(2" + with pytest.raises(ValueError, match=expected): + score_func([[0, 1], [1, 0]], [1, 1, 1]) + + expected = r"labels_pred must be 1D: shape is \(2" + with pytest.raises(ValueError, match=expected): + score_func([0, 1, 0], [[1, 1], [0, 0]]) + + +def test_generalized_average(): + a, b = 1, 2 + methods = ["min", "geometric", "arithmetic", "max"] + means = [_generalized_average(a, b, method) for method in methods] + assert means[0] <= means[1] <= means[2] <= means[3] + c, d = 12, 12 + means = [_generalized_average(c, d, method) for method in methods] + assert means[0] == means[1] == means[2] == means[3] + + +def test_perfect_matches(): + for score_func in score_funcs: + assert score_func([], []) == pytest.approx(1.0) + assert score_func([0], [1]) == pytest.approx(1.0) + assert score_func([0, 0, 0], [0, 0, 0]) == pytest.approx(1.0) + assert score_func([0, 1, 0], [42, 7, 42]) == pytest.approx(1.0) + assert score_func([0.0, 1.0, 0.0], [42.0, 7.0, 42.0]) == pytest.approx(1.0) + assert score_func([0.0, 1.0, 2.0], [42.0, 7.0, 2.0]) == pytest.approx(1.0) + assert score_func([0, 1, 2], [42, 7, 2]) == pytest.approx(1.0) + score_funcs_with_changing_means = [ + normalized_mutual_info_score, + adjusted_mutual_info_score, + ] + means = {"min", "geometric", "arithmetic", "max"} + for score_func in score_funcs_with_changing_means: + for mean in means: + assert score_func([], [], average_method=mean) == pytest.approx(1.0) + assert score_func([0], [1], average_method=mean) == pytest.approx(1.0) + assert score_func( + [0, 0, 0], [0, 0, 0], average_method=mean + ) == pytest.approx(1.0) + assert score_func( + [0, 1, 0], [42, 7, 42], average_method=mean + ) == pytest.approx(1.0) + assert score_func( + [0.0, 1.0, 0.0], [42.0, 7.0, 42.0], average_method=mean + ) == pytest.approx(1.0) + assert score_func( + [0.0, 1.0, 2.0], [42.0, 7.0, 2.0], average_method=mean + ) == pytest.approx(1.0) + assert score_func( + [0, 1, 2], [42, 7, 2], average_method=mean + ) == pytest.approx(1.0) + + +def test_homogeneous_but_not_complete_labeling(): + # homogeneous but not complete clustering + h, c, v = homogeneity_completeness_v_measure([0, 0, 0, 1, 1, 1], [0, 0, 0, 1, 2, 2]) + assert_almost_equal(h, 1.00, 2) + assert_almost_equal(c, 0.69, 2) + assert_almost_equal(v, 0.81, 2) + + +def test_complete_but_not_homogeneous_labeling(): + # complete but not homogeneous clustering + h, c, v = homogeneity_completeness_v_measure([0, 0, 1, 1, 2, 2], [0, 0, 1, 1, 1, 1]) + assert_almost_equal(h, 0.58, 2) + assert_almost_equal(c, 1.00, 2) + assert_almost_equal(v, 0.73, 2) + + +def test_not_complete_and_not_homogeneous_labeling(): + # neither complete nor homogeneous but not so bad either + h, c, v = homogeneity_completeness_v_measure([0, 0, 0, 1, 1, 1], [0, 1, 0, 1, 2, 2]) + assert_almost_equal(h, 0.67, 2) + assert_almost_equal(c, 0.42, 2) + assert_almost_equal(v, 0.52, 2) + + +def test_beta_parameter(): + # test for when beta passed to + # homogeneity_completeness_v_measure + # and v_measure_score + beta_test = 0.2 + h_test = 0.67 + c_test = 0.42 + v_test = (1 + beta_test) * h_test * c_test / (beta_test * h_test + c_test) + + h, c, v = homogeneity_completeness_v_measure( + [0, 0, 0, 1, 1, 1], [0, 1, 0, 1, 2, 2], beta=beta_test + ) + assert_almost_equal(h, h_test, 2) + assert_almost_equal(c, c_test, 2) + assert_almost_equal(v, v_test, 2) + + v = v_measure_score([0, 0, 0, 1, 1, 1], [0, 1, 0, 1, 2, 2], beta=beta_test) + assert_almost_equal(v, v_test, 2) + + +def test_non_consecutive_labels(): + # regression tests for labels with gaps + h, c, v = homogeneity_completeness_v_measure([0, 0, 0, 2, 2, 2], [0, 1, 0, 1, 2, 2]) + assert_almost_equal(h, 0.67, 2) + assert_almost_equal(c, 0.42, 2) + assert_almost_equal(v, 0.52, 2) + + h, c, v = homogeneity_completeness_v_measure([0, 0, 0, 1, 1, 1], [0, 4, 0, 4, 2, 2]) + assert_almost_equal(h, 0.67, 2) + assert_almost_equal(c, 0.42, 2) + assert_almost_equal(v, 0.52, 2) + + ari_1 = adjusted_rand_score([0, 0, 0, 1, 1, 1], [0, 1, 0, 1, 2, 2]) + ari_2 = adjusted_rand_score([0, 0, 0, 1, 1, 1], [0, 4, 0, 4, 2, 2]) + assert_almost_equal(ari_1, 0.24, 2) + assert_almost_equal(ari_2, 0.24, 2) + + ri_1 = rand_score([0, 0, 0, 1, 1, 1], [0, 1, 0, 1, 2, 2]) + ri_2 = rand_score([0, 0, 0, 1, 1, 1], [0, 4, 0, 4, 2, 2]) + assert_almost_equal(ri_1, 0.66, 2) + assert_almost_equal(ri_2, 0.66, 2) + + +def uniform_labelings_scores(score_func, n_samples, k_range, n_runs=10, seed=42): + # Compute score for random uniform cluster labelings + random_labels = np.random.RandomState(seed).randint + scores = np.zeros((len(k_range), n_runs)) + for i, k in enumerate(k_range): + for j in range(n_runs): + labels_a = random_labels(low=0, high=k, size=n_samples) + labels_b = random_labels(low=0, high=k, size=n_samples) + scores[i, j] = score_func(labels_a, labels_b) + return scores + + +def test_adjustment_for_chance(): + # Check that adjusted scores are almost zero on random labels + n_clusters_range = [2, 10, 50, 90] + n_samples = 100 + n_runs = 10 + + scores = uniform_labelings_scores( + adjusted_rand_score, n_samples, n_clusters_range, n_runs + ) + + max_abs_scores = np.abs(scores).max(axis=1) + assert_array_almost_equal(max_abs_scores, [0.02, 0.03, 0.03, 0.02], 2) + + +def test_adjusted_mutual_info_score(): + # Compute the Adjusted Mutual Information and test against known values + labels_a = np.array([1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3]) + labels_b = np.array([1, 1, 1, 1, 2, 1, 2, 2, 2, 2, 3, 1, 3, 3, 3, 2, 2]) + # Mutual information + mi = mutual_info_score(labels_a, labels_b) + assert_almost_equal(mi, 0.41022, 5) + # with provided sparse contingency + C = contingency_matrix(labels_a, labels_b, sparse=True) + mi = mutual_info_score(labels_a, labels_b, contingency=C) + assert_almost_equal(mi, 0.41022, 5) + # with provided dense contingency + C = contingency_matrix(labels_a, labels_b) + mi = mutual_info_score(labels_a, labels_b, contingency=C) + assert_almost_equal(mi, 0.41022, 5) + # Expected mutual information + n_samples = C.sum() + emi = expected_mutual_information(C, n_samples) + assert_almost_equal(emi, 0.15042, 5) + # Adjusted mutual information + ami = adjusted_mutual_info_score(labels_a, labels_b) + assert_almost_equal(ami, 0.27821, 5) + ami = adjusted_mutual_info_score([1, 1, 2, 2], [2, 2, 3, 3]) + assert ami == pytest.approx(1.0) + # Test with a very large array + a110 = np.array([list(labels_a) * 110]).flatten() + b110 = np.array([list(labels_b) * 110]).flatten() + ami = adjusted_mutual_info_score(a110, b110) + assert_almost_equal(ami, 0.38, 2) + + +def test_expected_mutual_info_overflow(): + # Test for regression where contingency cell exceeds 2**16 + # leading to overflow in np.outer, resulting in EMI > 1 + assert expected_mutual_information(np.array([[70000]]), 70000) <= 1 + + +def test_int_overflow_mutual_info_fowlkes_mallows_score(): + # Test overflow in mutual_info_classif and fowlkes_mallows_score + x = np.array( + [1] * (52632 + 2529) + + [2] * (14660 + 793) + + [3] * (3271 + 204) + + [4] * (814 + 39) + + [5] * (316 + 20) + ) + y = np.array( + [0] * 52632 + + [1] * 2529 + + [0] * 14660 + + [1] * 793 + + [0] * 3271 + + [1] * 204 + + [0] * 814 + + [1] * 39 + + [0] * 316 + + [1] * 20 + ) + + assert_all_finite(mutual_info_score(x, y)) + assert_all_finite(fowlkes_mallows_score(x, y)) + + +def test_entropy(): + ent = entropy([0, 0, 42.0]) + assert_almost_equal(ent, 0.6365141, 5) + assert_almost_equal(entropy([]), 1) + assert entropy([1, 1, 1, 1]) == 0 + + +def test_contingency_matrix(): + labels_a = np.array([1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3]) + labels_b = np.array([1, 1, 1, 1, 2, 1, 2, 2, 2, 2, 3, 1, 3, 3, 3, 2, 2]) + C = contingency_matrix(labels_a, labels_b) + C2 = np.histogram2d(labels_a, labels_b, bins=(np.arange(1, 5), np.arange(1, 5)))[0] + assert_array_almost_equal(C, C2) + C = contingency_matrix(labels_a, labels_b, eps=0.1) + assert_array_almost_equal(C, C2 + 0.1) + + +def test_contingency_matrix_sparse(): + labels_a = np.array([1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3]) + labels_b = np.array([1, 1, 1, 1, 2, 1, 2, 2, 2, 2, 3, 1, 3, 3, 3, 2, 2]) + C = contingency_matrix(labels_a, labels_b) + C_sparse = contingency_matrix(labels_a, labels_b, sparse=True).toarray() + assert_array_almost_equal(C, C_sparse) + with pytest.raises(ValueError, match="Cannot set 'eps' when sparse=True"): + contingency_matrix(labels_a, labels_b, eps=1e-10, sparse=True) + + +def test_exactly_zero_info_score(): + # Check numerical stability when information is exactly zero + for i in np.logspace(1, 4, 4).astype(int): + labels_a, labels_b = (np.ones(i, dtype=int), np.arange(i, dtype=int)) + assert normalized_mutual_info_score(labels_a, labels_b) == pytest.approx(0.0) + assert v_measure_score(labels_a, labels_b) == pytest.approx(0.0) + assert adjusted_mutual_info_score(labels_a, labels_b) == pytest.approx(0.0) + assert normalized_mutual_info_score(labels_a, labels_b) == pytest.approx(0.0) + for method in ["min", "geometric", "arithmetic", "max"]: + assert adjusted_mutual_info_score( + labels_a, labels_b, average_method=method + ) == pytest.approx(0.0) + assert normalized_mutual_info_score( + labels_a, labels_b, average_method=method + ) == pytest.approx(0.0) + + +def test_v_measure_and_mutual_information(seed=36): + # Check relation between v_measure, entropy and mutual information + for i in np.logspace(1, 4, 4).astype(int): + random_state = np.random.RandomState(seed) + labels_a, labels_b = ( + random_state.randint(0, 10, i), + random_state.randint(0, 10, i), + ) + assert_almost_equal( + v_measure_score(labels_a, labels_b), + 2.0 + * mutual_info_score(labels_a, labels_b) + / (entropy(labels_a) + entropy(labels_b)), + 0, + ) + avg = "arithmetic" + assert_almost_equal( + v_measure_score(labels_a, labels_b), + normalized_mutual_info_score(labels_a, labels_b, average_method=avg), + ) + + +def test_fowlkes_mallows_score(): + # General case + score = fowlkes_mallows_score([0, 0, 0, 1, 1, 1], [0, 0, 1, 1, 2, 2]) + assert_almost_equal(score, 4.0 / np.sqrt(12.0 * 6.0)) + + # Perfect match but where the label names changed + perfect_score = fowlkes_mallows_score([0, 0, 0, 1, 1, 1], [1, 1, 1, 0, 0, 0]) + assert_almost_equal(perfect_score, 1.0) + + # Worst case + worst_score = fowlkes_mallows_score([0, 0, 0, 0, 0, 0], [0, 1, 2, 3, 4, 5]) + assert_almost_equal(worst_score, 0.0) + + +def test_fowlkes_mallows_score_properties(): + # handcrafted example + labels_a = np.array([0, 0, 0, 1, 1, 2]) + labels_b = np.array([1, 1, 2, 2, 0, 0]) + expected = 1.0 / np.sqrt((1.0 + 3.0) * (1.0 + 2.0)) + # FMI = TP / sqrt((TP + FP) * (TP + FN)) + + score_original = fowlkes_mallows_score(labels_a, labels_b) + assert_almost_equal(score_original, expected) + + # symmetric property + score_symmetric = fowlkes_mallows_score(labels_b, labels_a) + assert_almost_equal(score_symmetric, expected) + + # permutation property + score_permuted = fowlkes_mallows_score((labels_a + 1) % 3, labels_b) + assert_almost_equal(score_permuted, expected) + + # symmetric and permutation(both together) + score_both = fowlkes_mallows_score(labels_b, (labels_a + 2) % 3) + assert_almost_equal(score_both, expected) + + +@pytest.mark.parametrize( + "labels_true, labels_pred", + [ + (["a"] * 6, [1, 1, 0, 0, 1, 1]), + ([1] * 6, [1, 1, 0, 0, 1, 1]), + ([1, 1, 0, 0, 1, 1], ["a"] * 6), + ([1, 1, 0, 0, 1, 1], [1] * 6), + (["a"] * 6, ["a"] * 6), + ], +) +def test_mutual_info_score_positive_constant_label(labels_true, labels_pred): + # Check that MI = 0 when one or both labelling are constant + # non-regression test for #16355 + assert mutual_info_score(labels_true, labels_pred) == 0 + + +def test_check_clustering_error(): + # Test warning message for continuous values + rng = np.random.RandomState(42) + noise = rng.rand(500) + wavelength = np.linspace(0.01, 1, 500) * 1e-6 + msg = ( + "Clustering metrics expects discrete values but received " + "continuous values for label, and continuous values for " + "target" + ) + + with pytest.warns(UserWarning, match=msg): + check_clusterings(wavelength, noise) + + +def test_pair_confusion_matrix_fully_dispersed(): + # edge case: every element is its own cluster + N = 100 + clustering1 = list(range(N)) + clustering2 = clustering1 + expected = np.array([[N * (N - 1), 0], [0, 0]]) + assert_array_equal(pair_confusion_matrix(clustering1, clustering2), expected) + + +def test_pair_confusion_matrix_single_cluster(): + # edge case: only one cluster + N = 100 + clustering1 = np.zeros((N,)) + clustering2 = clustering1 + expected = np.array([[0, 0], [0, N * (N - 1)]]) + assert_array_equal(pair_confusion_matrix(clustering1, clustering2), expected) + + +def test_pair_confusion_matrix(): + # regular case: different non-trivial clusterings + n = 10 + N = n**2 + clustering1 = np.hstack([[i + 1] * n for i in range(n)]) + clustering2 = np.hstack([[i + 1] * (n + 1) for i in range(n)])[:N] + # basic quadratic implementation + expected = np.zeros(shape=(2, 2), dtype=np.int64) + for i in range(len(clustering1)): + for j in range(len(clustering2)): + if i != j: + same_cluster_1 = int(clustering1[i] == clustering1[j]) + same_cluster_2 = int(clustering2[i] == clustering2[j]) + expected[same_cluster_1, same_cluster_2] += 1 + assert_array_equal(pair_confusion_matrix(clustering1, clustering2), expected) + + +@pytest.mark.parametrize( + "clustering1, clustering2", + [(list(range(100)), list(range(100))), (np.zeros((100,)), np.zeros((100,)))], +) +def test_rand_score_edge_cases(clustering1, clustering2): + # edge case 1: every element is its own cluster + # edge case 2: only one cluster + assert_allclose(rand_score(clustering1, clustering2), 1.0) + + +def test_rand_score(): + # regular case: different non-trivial clusterings + clustering1 = [0, 0, 0, 1, 1, 1] + clustering2 = [0, 1, 0, 1, 2, 2] + # pair confusion matrix + D11 = 2 * 2 # ordered pairs (1, 3), (5, 6) + D10 = 2 * 4 # ordered pairs (1, 2), (2, 3), (4, 5), (4, 6) + D01 = 2 * 1 # ordered pair (2, 4) + D00 = 5 * 6 - D11 - D01 - D10 # the remaining pairs + # rand score + expected_numerator = D00 + D11 + expected_denominator = D00 + D01 + D10 + D11 + expected = expected_numerator / expected_denominator + assert_allclose(rand_score(clustering1, clustering2), expected) + + +def test_adjusted_rand_score_overflow(): + """Check that large amount of data will not lead to overflow in + `adjusted_rand_score`. + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/20305 + """ + rng = np.random.RandomState(0) + y_true = rng.randint(0, 2, 100_000, dtype=np.int8) + y_pred = rng.randint(0, 2, 100_000, dtype=np.int8) + with warnings.catch_warnings(): + warnings.simplefilter("error", RuntimeWarning) + adjusted_rand_score(y_true, y_pred) + + +@pytest.mark.parametrize("average_method", ["min", "arithmetic", "geometric", "max"]) +def test_normalized_mutual_info_score_bounded(average_method): + """Check that nmi returns a score between 0 (included) and 1 (excluded + for non-perfect match) + + Non-regression test for issue #13836 + """ + labels1 = [0] * 469 + labels2 = [1] + labels1[1:] + labels3 = [0, 1] + labels1[2:] + + # labels1 is constant. The mutual info between labels1 and any other labelling is 0. + nmi = normalized_mutual_info_score(labels1, labels2, average_method=average_method) + assert nmi == 0 + + # non constant, non perfect matching labels + nmi = normalized_mutual_info_score(labels2, labels3, average_method=average_method) + assert 0 <= nmi < 1 diff --git a/venv/lib/python3.10/site-packages/sklearn/metrics/cluster/tests/test_unsupervised.py b/venv/lib/python3.10/site-packages/sklearn/metrics/cluster/tests/test_unsupervised.py new file mode 100644 index 0000000000000000000000000000000000000000..a0420bbd406ec873022ee3a6e511c51fafd82f11 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/metrics/cluster/tests/test_unsupervised.py @@ -0,0 +1,413 @@ +import warnings + +import numpy as np +import pytest +from numpy.testing import assert_allclose +from scipy.sparse import issparse + +from sklearn import datasets +from sklearn.metrics import pairwise_distances +from sklearn.metrics.cluster import ( + calinski_harabasz_score, + davies_bouldin_score, + silhouette_samples, + silhouette_score, +) +from sklearn.metrics.cluster._unsupervised import _silhouette_reduce +from sklearn.utils._testing import assert_array_equal +from sklearn.utils.fixes import ( + CSC_CONTAINERS, + CSR_CONTAINERS, + DOK_CONTAINERS, + LIL_CONTAINERS, +) + + +@pytest.mark.parametrize( + "sparse_container", + [None] + CSR_CONTAINERS + CSC_CONTAINERS + DOK_CONTAINERS + LIL_CONTAINERS, +) +@pytest.mark.parametrize("sample_size", [None, "half"]) +def test_silhouette(sparse_container, sample_size): + # Tests the Silhouette Coefficient. + dataset = datasets.load_iris() + X, y = dataset.data, dataset.target + if sparse_container is not None: + X = sparse_container(X) + sample_size = int(X.shape[0] / 2) if sample_size == "half" else sample_size + + D = pairwise_distances(X, metric="euclidean") + # Given that the actual labels are used, we can assume that S would be positive. + score_precomputed = silhouette_score( + D, y, metric="precomputed", sample_size=sample_size, random_state=0 + ) + score_euclidean = silhouette_score( + X, y, metric="euclidean", sample_size=sample_size, random_state=0 + ) + assert score_precomputed > 0 + assert score_euclidean > 0 + assert score_precomputed == pytest.approx(score_euclidean) + + +def test_cluster_size_1(): + # Assert Silhouette Coefficient == 0 when there is 1 sample in a cluster + # (cluster 0). We also test the case where there are identical samples + # as the only members of a cluster (cluster 2). To our knowledge, this case + # is not discussed in reference material, and we choose for it a sample + # score of 1. + X = [[0.0], [1.0], [1.0], [2.0], [3.0], [3.0]] + labels = np.array([0, 1, 1, 1, 2, 2]) + + # Cluster 0: 1 sample -> score of 0 by Rousseeuw's convention + # Cluster 1: intra-cluster = [.5, .5, 1] + # inter-cluster = [1, 1, 1] + # silhouette = [.5, .5, 0] + # Cluster 2: intra-cluster = [0, 0] + # inter-cluster = [arbitrary, arbitrary] + # silhouette = [1., 1.] + + silhouette = silhouette_score(X, labels) + assert not np.isnan(silhouette) + ss = silhouette_samples(X, labels) + assert_array_equal(ss, [0, 0.5, 0.5, 0, 1, 1]) + + +def test_silhouette_paper_example(): + # Explicitly check per-sample results against Rousseeuw (1987) + # Data from Table 1 + lower = [ + 5.58, + 7.00, + 6.50, + 7.08, + 7.00, + 3.83, + 4.83, + 5.08, + 8.17, + 5.83, + 2.17, + 5.75, + 6.67, + 6.92, + 4.92, + 6.42, + 5.00, + 5.58, + 6.00, + 4.67, + 6.42, + 3.42, + 5.50, + 6.42, + 6.42, + 5.00, + 3.92, + 6.17, + 2.50, + 4.92, + 6.25, + 7.33, + 4.50, + 2.25, + 6.33, + 2.75, + 6.08, + 6.67, + 4.25, + 2.67, + 6.00, + 6.17, + 6.17, + 6.92, + 6.17, + 5.25, + 6.83, + 4.50, + 3.75, + 5.75, + 5.42, + 6.08, + 5.83, + 6.67, + 3.67, + 4.75, + 3.00, + 6.08, + 6.67, + 5.00, + 5.58, + 4.83, + 6.17, + 5.67, + 6.50, + 6.92, + ] + D = np.zeros((12, 12)) + D[np.tril_indices(12, -1)] = lower + D += D.T + + names = [ + "BEL", + "BRA", + "CHI", + "CUB", + "EGY", + "FRA", + "IND", + "ISR", + "USA", + "USS", + "YUG", + "ZAI", + ] + + # Data from Figure 2 + labels1 = [1, 1, 2, 2, 1, 1, 2, 1, 1, 2, 2, 1] + expected1 = { + "USA": 0.43, + "BEL": 0.39, + "FRA": 0.35, + "ISR": 0.30, + "BRA": 0.22, + "EGY": 0.20, + "ZAI": 0.19, + "CUB": 0.40, + "USS": 0.34, + "CHI": 0.33, + "YUG": 0.26, + "IND": -0.04, + } + score1 = 0.28 + + # Data from Figure 3 + labels2 = [1, 2, 3, 3, 1, 1, 2, 1, 1, 3, 3, 2] + expected2 = { + "USA": 0.47, + "FRA": 0.44, + "BEL": 0.42, + "ISR": 0.37, + "EGY": 0.02, + "ZAI": 0.28, + "BRA": 0.25, + "IND": 0.17, + "CUB": 0.48, + "USS": 0.44, + "YUG": 0.31, + "CHI": 0.31, + } + score2 = 0.33 + + for labels, expected, score in [ + (labels1, expected1, score1), + (labels2, expected2, score2), + ]: + expected = [expected[name] for name in names] + # we check to 2dp because that's what's in the paper + pytest.approx( + expected, + silhouette_samples(D, np.array(labels), metric="precomputed"), + abs=1e-2, + ) + pytest.approx( + score, silhouette_score(D, np.array(labels), metric="precomputed"), abs=1e-2 + ) + + +def test_correct_labelsize(): + # Assert 1 < n_labels < n_samples + dataset = datasets.load_iris() + X = dataset.data + + # n_labels = n_samples + y = np.arange(X.shape[0]) + err_msg = ( + r"Number of labels is %d\. Valid values are 2 " + r"to n_samples - 1 \(inclusive\)" % len(np.unique(y)) + ) + with pytest.raises(ValueError, match=err_msg): + silhouette_score(X, y) + + # n_labels = 1 + y = np.zeros(X.shape[0]) + err_msg = ( + r"Number of labels is %d\. Valid values are 2 " + r"to n_samples - 1 \(inclusive\)" % len(np.unique(y)) + ) + with pytest.raises(ValueError, match=err_msg): + silhouette_score(X, y) + + +def test_non_encoded_labels(): + dataset = datasets.load_iris() + X = dataset.data + labels = dataset.target + assert silhouette_score(X, labels * 2 + 10) == silhouette_score(X, labels) + assert_array_equal( + silhouette_samples(X, labels * 2 + 10), silhouette_samples(X, labels) + ) + + +def test_non_numpy_labels(): + dataset = datasets.load_iris() + X = dataset.data + y = dataset.target + assert silhouette_score(list(X), list(y)) == silhouette_score(X, y) + + +@pytest.mark.parametrize("dtype", (np.float32, np.float64)) +def test_silhouette_nonzero_diag(dtype): + # Make sure silhouette_samples requires diagonal to be zero. + # Non-regression test for #12178 + + # Construct a zero-diagonal matrix + dists = pairwise_distances( + np.array([[0.2, 0.1, 0.12, 1.34, 1.11, 1.6]], dtype=dtype).T + ) + labels = [0, 0, 0, 1, 1, 1] + + # small values on the diagonal are OK + dists[2][2] = np.finfo(dists.dtype).eps * 10 + silhouette_samples(dists, labels, metric="precomputed") + + # values bigger than eps * 100 are not + dists[2][2] = np.finfo(dists.dtype).eps * 1000 + with pytest.raises(ValueError, match="contains non-zero"): + silhouette_samples(dists, labels, metric="precomputed") + + +@pytest.mark.parametrize( + "sparse_container", + CSC_CONTAINERS + CSR_CONTAINERS + DOK_CONTAINERS + LIL_CONTAINERS, +) +def test_silhouette_samples_precomputed_sparse(sparse_container): + """Check that silhouette_samples works for sparse matrices correctly.""" + X = np.array([[0.2, 0.1, 0.1, 0.2, 0.1, 1.6, 0.2, 0.1]], dtype=np.float32).T + y = [0, 0, 0, 0, 1, 1, 1, 1] + pdist_dense = pairwise_distances(X) + pdist_sparse = sparse_container(pdist_dense) + assert issparse(pdist_sparse) + output_with_sparse_input = silhouette_samples(pdist_sparse, y, metric="precomputed") + output_with_dense_input = silhouette_samples(pdist_dense, y, metric="precomputed") + assert_allclose(output_with_sparse_input, output_with_dense_input) + + +@pytest.mark.parametrize( + "sparse_container", + CSC_CONTAINERS + CSR_CONTAINERS + DOK_CONTAINERS + LIL_CONTAINERS, +) +def test_silhouette_samples_euclidean_sparse(sparse_container): + """Check that silhouette_samples works for sparse matrices correctly.""" + X = np.array([[0.2, 0.1, 0.1, 0.2, 0.1, 1.6, 0.2, 0.1]], dtype=np.float32).T + y = [0, 0, 0, 0, 1, 1, 1, 1] + pdist_dense = pairwise_distances(X) + pdist_sparse = sparse_container(pdist_dense) + assert issparse(pdist_sparse) + output_with_sparse_input = silhouette_samples(pdist_sparse, y) + output_with_dense_input = silhouette_samples(pdist_dense, y) + assert_allclose(output_with_sparse_input, output_with_dense_input) + + +@pytest.mark.parametrize( + "sparse_container", CSC_CONTAINERS + DOK_CONTAINERS + LIL_CONTAINERS +) +def test_silhouette_reduce(sparse_container): + """Check for non-CSR input to private method `_silhouette_reduce`.""" + X = np.array([[0.2, 0.1, 0.1, 0.2, 0.1, 1.6, 0.2, 0.1]], dtype=np.float32).T + pdist_dense = pairwise_distances(X) + pdist_sparse = sparse_container(pdist_dense) + y = [0, 0, 0, 0, 1, 1, 1, 1] + label_freqs = np.bincount(y) + with pytest.raises( + TypeError, + match="Expected CSR matrix. Please pass sparse matrix in CSR format.", + ): + _silhouette_reduce(pdist_sparse, start=0, labels=y, label_freqs=label_freqs) + + +def assert_raises_on_only_one_label(func): + """Assert message when there is only one label""" + rng = np.random.RandomState(seed=0) + with pytest.raises(ValueError, match="Number of labels is"): + func(rng.rand(10, 2), np.zeros(10)) + + +def assert_raises_on_all_points_same_cluster(func): + """Assert message when all point are in different clusters""" + rng = np.random.RandomState(seed=0) + with pytest.raises(ValueError, match="Number of labels is"): + func(rng.rand(10, 2), np.arange(10)) + + +def test_calinski_harabasz_score(): + assert_raises_on_only_one_label(calinski_harabasz_score) + + assert_raises_on_all_points_same_cluster(calinski_harabasz_score) + + # Assert the value is 1. when all samples are equals + assert 1.0 == calinski_harabasz_score(np.ones((10, 2)), [0] * 5 + [1] * 5) + + # Assert the value is 0. when all the mean cluster are equal + assert 0.0 == calinski_harabasz_score([[-1, -1], [1, 1]] * 10, [0] * 10 + [1] * 10) + + # General case (with non numpy arrays) + X = ( + [[0, 0], [1, 1]] * 5 + + [[3, 3], [4, 4]] * 5 + + [[0, 4], [1, 3]] * 5 + + [[3, 1], [4, 0]] * 5 + ) + labels = [0] * 10 + [1] * 10 + [2] * 10 + [3] * 10 + pytest.approx(calinski_harabasz_score(X, labels), 45 * (40 - 4) / (5 * (4 - 1))) + + +def test_davies_bouldin_score(): + assert_raises_on_only_one_label(davies_bouldin_score) + assert_raises_on_all_points_same_cluster(davies_bouldin_score) + + # Assert the value is 0. when all samples are equals + assert davies_bouldin_score(np.ones((10, 2)), [0] * 5 + [1] * 5) == pytest.approx( + 0.0 + ) + + # Assert the value is 0. when all the mean cluster are equal + assert davies_bouldin_score( + [[-1, -1], [1, 1]] * 10, [0] * 10 + [1] * 10 + ) == pytest.approx(0.0) + + # General case (with non numpy arrays) + X = ( + [[0, 0], [1, 1]] * 5 + + [[3, 3], [4, 4]] * 5 + + [[0, 4], [1, 3]] * 5 + + [[3, 1], [4, 0]] * 5 + ) + labels = [0] * 10 + [1] * 10 + [2] * 10 + [3] * 10 + pytest.approx(davies_bouldin_score(X, labels), 2 * np.sqrt(0.5) / 3) + + # Ensure divide by zero warning is not raised in general case + with warnings.catch_warnings(): + warnings.simplefilter("error", RuntimeWarning) + davies_bouldin_score(X, labels) + + # General case - cluster have one sample + X = [[0, 0], [2, 2], [3, 3], [5, 5]] + labels = [0, 0, 1, 2] + pytest.approx(davies_bouldin_score(X, labels), (5.0 / 4) / 3) + + +def test_silhouette_score_integer_precomputed(): + """Check that silhouette_score works for precomputed metrics that are integers. + + Non-regression test for #22107. + """ + result = silhouette_score( + [[0, 1, 2], [1, 0, 1], [2, 1, 0]], [0, 0, 1], metric="precomputed" + ) + assert result == pytest.approx(1 / 6) + + # non-zero on diagonal for ints raises an error + with pytest.raises(ValueError, match="contains non-zero"): + silhouette_score( + [[1, 1, 2], [1, 0, 1], [2, 1, 0]], [0, 0, 1], metric="precomputed" + ) diff --git a/venv/lib/python3.10/site-packages/sklearn/metrics/tests/__init__.py b/venv/lib/python3.10/site-packages/sklearn/metrics/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/sklearn/metrics/tests/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/metrics/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2730efe570a7b8573ed6ed67f13e44c29bfce7b8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/metrics/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/metrics/tests/__pycache__/test_classification.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/metrics/tests/__pycache__/test_classification.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a6b43e342038769f738da55c502888c1d81145a4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/metrics/tests/__pycache__/test_classification.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/metrics/tests/__pycache__/test_common.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/metrics/tests/__pycache__/test_common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fb5fc814f78bd0c893db8832033b1da38ea7f051 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/metrics/tests/__pycache__/test_common.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/metrics/tests/__pycache__/test_dist_metrics.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/metrics/tests/__pycache__/test_dist_metrics.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5d75a3a8aae3999e15938610a2c70f896c4b4d25 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/metrics/tests/__pycache__/test_dist_metrics.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/metrics/tests/__pycache__/test_pairwise.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/metrics/tests/__pycache__/test_pairwise.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7382673662f5aabcf91817a33fba515dfce85813 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/metrics/tests/__pycache__/test_pairwise.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/metrics/tests/__pycache__/test_pairwise_distances_reduction.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/metrics/tests/__pycache__/test_pairwise_distances_reduction.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..95ebb8bf840b68b92f6e64140479c04b592df2b7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/metrics/tests/__pycache__/test_pairwise_distances_reduction.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/metrics/tests/__pycache__/test_ranking.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/metrics/tests/__pycache__/test_ranking.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..197d6c5eb46d64da025b7cad105b98ca437df5fa Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/metrics/tests/__pycache__/test_ranking.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/metrics/tests/__pycache__/test_regression.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/metrics/tests/__pycache__/test_regression.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4625d586a1d85ef4d20641eb327f1c8437d85ed8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/metrics/tests/__pycache__/test_regression.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/metrics/tests/__pycache__/test_score_objects.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/metrics/tests/__pycache__/test_score_objects.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ffc95d87979cecacb153f137a52ea88fdee977ef Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/metrics/tests/__pycache__/test_score_objects.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/metrics/tests/test_classification.py b/venv/lib/python3.10/site-packages/sklearn/metrics/tests/test_classification.py new file mode 100644 index 0000000000000000000000000000000000000000..abf1aae48759927e0e7fd8c0883bf936174afec1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/metrics/tests/test_classification.py @@ -0,0 +1,2866 @@ +import re +import warnings +from functools import partial +from itertools import chain, permutations, product + +import numpy as np +import pytest +from scipy import linalg +from scipy.spatial.distance import hamming as sp_hamming +from scipy.stats import bernoulli + +from sklearn import datasets, svm +from sklearn.datasets import make_multilabel_classification +from sklearn.exceptions import UndefinedMetricWarning +from sklearn.metrics import ( + accuracy_score, + average_precision_score, + balanced_accuracy_score, + brier_score_loss, + class_likelihood_ratios, + classification_report, + cohen_kappa_score, + confusion_matrix, + f1_score, + fbeta_score, + hamming_loss, + hinge_loss, + jaccard_score, + log_loss, + make_scorer, + matthews_corrcoef, + multilabel_confusion_matrix, + precision_recall_fscore_support, + precision_score, + recall_score, + zero_one_loss, +) +from sklearn.metrics._classification import _check_targets +from sklearn.model_selection import cross_val_score +from sklearn.preprocessing import LabelBinarizer, label_binarize +from sklearn.tree import DecisionTreeClassifier +from sklearn.utils._mocking import MockDataFrame +from sklearn.utils._testing import ( + assert_allclose, + assert_almost_equal, + assert_array_almost_equal, + assert_array_equal, + assert_no_warnings, + ignore_warnings, +) +from sklearn.utils.extmath import _nanaverage +from sklearn.utils.fixes import CSC_CONTAINERS, CSR_CONTAINERS +from sklearn.utils.validation import check_random_state + +############################################################################### +# Utilities for testing + + +def make_prediction(dataset=None, binary=False): + """Make some classification predictions on a toy dataset using a SVC + + If binary is True restrict to a binary classification problem instead of a + multiclass classification problem + """ + + if dataset is None: + # import some data to play with + dataset = datasets.load_iris() + + X = dataset.data + y = dataset.target + + if binary: + # restrict to a binary classification task + X, y = X[y < 2], y[y < 2] + + n_samples, n_features = X.shape + p = np.arange(n_samples) + + rng = check_random_state(37) + rng.shuffle(p) + X, y = X[p], y[p] + half = int(n_samples / 2) + + # add noisy features to make the problem harder and avoid perfect results + rng = np.random.RandomState(0) + X = np.c_[X, rng.randn(n_samples, 200 * n_features)] + + # run classifier, get class probabilities and label predictions + clf = svm.SVC(kernel="linear", probability=True, random_state=0) + probas_pred = clf.fit(X[:half], y[:half]).predict_proba(X[half:]) + + if binary: + # only interested in probabilities of the positive case + # XXX: do we really want a special API for the binary case? + probas_pred = probas_pred[:, 1] + + y_pred = clf.predict(X[half:]) + y_true = y[half:] + return y_true, y_pred, probas_pred + + +############################################################################### +# Tests + + +def test_classification_report_dictionary_output(): + # Test performance report with dictionary output + iris = datasets.load_iris() + y_true, y_pred, _ = make_prediction(dataset=iris, binary=False) + + # print classification report with class names + expected_report = { + "setosa": { + "precision": 0.82608695652173914, + "recall": 0.79166666666666663, + "f1-score": 0.8085106382978724, + "support": 24, + }, + "versicolor": { + "precision": 0.33333333333333331, + "recall": 0.096774193548387094, + "f1-score": 0.15000000000000002, + "support": 31, + }, + "virginica": { + "precision": 0.41860465116279072, + "recall": 0.90000000000000002, + "f1-score": 0.57142857142857151, + "support": 20, + }, + "macro avg": { + "f1-score": 0.5099797365754813, + "precision": 0.5260083136726211, + "recall": 0.596146953405018, + "support": 75, + }, + "accuracy": 0.5333333333333333, + "weighted avg": { + "f1-score": 0.47310435663627154, + "precision": 0.5137535108414785, + "recall": 0.5333333333333333, + "support": 75, + }, + } + + report = classification_report( + y_true, + y_pred, + labels=np.arange(len(iris.target_names)), + target_names=iris.target_names, + output_dict=True, + ) + + # assert the 2 dicts are equal. + assert report.keys() == expected_report.keys() + for key in expected_report: + if key == "accuracy": + assert isinstance(report[key], float) + assert report[key] == expected_report[key] + else: + assert report[key].keys() == expected_report[key].keys() + for metric in expected_report[key]: + assert_almost_equal(expected_report[key][metric], report[key][metric]) + + assert isinstance(expected_report["setosa"]["precision"], float) + assert isinstance(expected_report["macro avg"]["precision"], float) + assert isinstance(expected_report["setosa"]["support"], int) + assert isinstance(expected_report["macro avg"]["support"], int) + + +def test_classification_report_output_dict_empty_input(): + report = classification_report(y_true=[], y_pred=[], output_dict=True) + expected_report = { + "accuracy": 0.0, + "macro avg": { + "f1-score": np.nan, + "precision": np.nan, + "recall": np.nan, + "support": 0, + }, + "weighted avg": { + "f1-score": np.nan, + "precision": np.nan, + "recall": np.nan, + "support": 0, + }, + } + assert isinstance(report, dict) + # assert the 2 dicts are equal. + assert report.keys() == expected_report.keys() + for key in expected_report: + if key == "accuracy": + assert isinstance(report[key], float) + assert report[key] == expected_report[key] + else: + assert report[key].keys() == expected_report[key].keys() + for metric in expected_report[key]: + assert_almost_equal(expected_report[key][metric], report[key][metric]) + + +@pytest.mark.parametrize("zero_division", ["warn", 0, 1, np.nan]) +def test_classification_report_zero_division_warning(zero_division): + y_true, y_pred = ["a", "b", "c"], ["a", "b", "d"] + with warnings.catch_warnings(record=True) as record: + classification_report( + y_true, y_pred, zero_division=zero_division, output_dict=True + ) + if zero_division == "warn": + assert len(record) > 1 + for item in record: + msg = "Use `zero_division` parameter to control this behavior." + assert msg in str(item.message) + else: + assert not record + + +def test_multilabel_accuracy_score_subset_accuracy(): + # Dense label indicator matrix format + y1 = np.array([[0, 1, 1], [1, 0, 1]]) + y2 = np.array([[0, 0, 1], [1, 0, 1]]) + + assert accuracy_score(y1, y2) == 0.5 + assert accuracy_score(y1, y1) == 1 + assert accuracy_score(y2, y2) == 1 + assert accuracy_score(y2, np.logical_not(y2)) == 0 + assert accuracy_score(y1, np.logical_not(y1)) == 0 + assert accuracy_score(y1, np.zeros(y1.shape)) == 0 + assert accuracy_score(y2, np.zeros(y1.shape)) == 0 + + +def test_precision_recall_f1_score_binary(): + # Test Precision Recall and F1 Score for binary classification task + y_true, y_pred, _ = make_prediction(binary=True) + + # detailed measures for each class + p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None) + assert_array_almost_equal(p, [0.73, 0.85], 2) + assert_array_almost_equal(r, [0.88, 0.68], 2) + assert_array_almost_equal(f, [0.80, 0.76], 2) + assert_array_equal(s, [25, 25]) + + # individual scoring function that can be used for grid search: in the + # binary class case the score is the value of the measure for the positive + # class (e.g. label == 1). This is deprecated for average != 'binary'. + for kwargs, my_assert in [ + ({}, assert_no_warnings), + ({"average": "binary"}, assert_no_warnings), + ]: + ps = my_assert(precision_score, y_true, y_pred, **kwargs) + assert_array_almost_equal(ps, 0.85, 2) + + rs = my_assert(recall_score, y_true, y_pred, **kwargs) + assert_array_almost_equal(rs, 0.68, 2) + + fs = my_assert(f1_score, y_true, y_pred, **kwargs) + assert_array_almost_equal(fs, 0.76, 2) + + assert_almost_equal( + my_assert(fbeta_score, y_true, y_pred, beta=2, **kwargs), + (1 + 2**2) * ps * rs / (2**2 * ps + rs), + 2, + ) + + +@ignore_warnings +def test_precision_recall_f_binary_single_class(): + # Test precision, recall and F-scores behave with a single positive or + # negative class + # Such a case may occur with non-stratified cross-validation + assert 1.0 == precision_score([1, 1], [1, 1]) + assert 1.0 == recall_score([1, 1], [1, 1]) + assert 1.0 == f1_score([1, 1], [1, 1]) + assert 1.0 == fbeta_score([1, 1], [1, 1], beta=0) + + assert 0.0 == precision_score([-1, -1], [-1, -1]) + assert 0.0 == recall_score([-1, -1], [-1, -1]) + assert 0.0 == f1_score([-1, -1], [-1, -1]) + assert 0.0 == fbeta_score([-1, -1], [-1, -1], beta=float("inf")) + assert fbeta_score([-1, -1], [-1, -1], beta=float("inf")) == pytest.approx( + fbeta_score([-1, -1], [-1, -1], beta=1e5) + ) + + +@ignore_warnings +def test_precision_recall_f_extra_labels(): + # Test handling of explicit additional (not in input) labels to PRF + y_true = [1, 3, 3, 2] + y_pred = [1, 1, 3, 2] + y_true_bin = label_binarize(y_true, classes=np.arange(5)) + y_pred_bin = label_binarize(y_pred, classes=np.arange(5)) + data = [(y_true, y_pred), (y_true_bin, y_pred_bin)] + + for i, (y_true, y_pred) in enumerate(data): + # No average: zeros in array + actual = recall_score(y_true, y_pred, labels=[0, 1, 2, 3, 4], average=None) + assert_array_almost_equal([0.0, 1.0, 1.0, 0.5, 0.0], actual) + + # Macro average is changed + actual = recall_score(y_true, y_pred, labels=[0, 1, 2, 3, 4], average="macro") + assert_array_almost_equal(np.mean([0.0, 1.0, 1.0, 0.5, 0.0]), actual) + + # No effect otherwise + for average in ["micro", "weighted", "samples"]: + if average == "samples" and i == 0: + continue + assert_almost_equal( + recall_score(y_true, y_pred, labels=[0, 1, 2, 3, 4], average=average), + recall_score(y_true, y_pred, labels=None, average=average), + ) + + # Error when introducing invalid label in multilabel case + # (although it would only affect performance if average='macro'/None) + for average in [None, "macro", "micro", "samples"]: + with pytest.raises(ValueError): + recall_score(y_true_bin, y_pred_bin, labels=np.arange(6), average=average) + with pytest.raises(ValueError): + recall_score( + y_true_bin, y_pred_bin, labels=np.arange(-1, 4), average=average + ) + + # tests non-regression on issue #10307 + y_true = np.array([[0, 1, 1], [1, 0, 0]]) + y_pred = np.array([[1, 1, 1], [1, 0, 1]]) + p, r, f, _ = precision_recall_fscore_support( + y_true, y_pred, average="samples", labels=[0, 1] + ) + assert_almost_equal(np.array([p, r, f]), np.array([3 / 4, 1, 5 / 6])) + + +@ignore_warnings +def test_precision_recall_f_ignored_labels(): + # Test a subset of labels may be requested for PRF + y_true = [1, 1, 2, 3] + y_pred = [1, 3, 3, 3] + y_true_bin = label_binarize(y_true, classes=np.arange(5)) + y_pred_bin = label_binarize(y_pred, classes=np.arange(5)) + data = [(y_true, y_pred), (y_true_bin, y_pred_bin)] + + for i, (y_true, y_pred) in enumerate(data): + recall_13 = partial(recall_score, y_true, y_pred, labels=[1, 3]) + recall_all = partial(recall_score, y_true, y_pred, labels=None) + + assert_array_almost_equal([0.5, 1.0], recall_13(average=None)) + assert_almost_equal((0.5 + 1.0) / 2, recall_13(average="macro")) + assert_almost_equal((0.5 * 2 + 1.0 * 1) / 3, recall_13(average="weighted")) + assert_almost_equal(2.0 / 3, recall_13(average="micro")) + + # ensure the above were meaningful tests: + for average in ["macro", "weighted", "micro"]: + assert recall_13(average=average) != recall_all(average=average) + + +def test_average_precision_score_non_binary_class(): + """Test multiclass-multiouptut for `average_precision_score`.""" + y_true = np.array( + [ + [2, 2, 1], + [1, 2, 0], + [0, 1, 2], + [1, 2, 1], + [2, 0, 1], + [1, 2, 1], + ] + ) + y_score = np.array( + [ + [0.7, 0.2, 0.1], + [0.4, 0.3, 0.3], + [0.1, 0.8, 0.1], + [0.2, 0.3, 0.5], + [0.4, 0.4, 0.2], + [0.1, 0.2, 0.7], + ] + ) + err_msg = "multiclass-multioutput format is not supported" + with pytest.raises(ValueError, match=err_msg): + average_precision_score(y_true, y_score, pos_label=2) + + +@pytest.mark.parametrize( + "y_true, y_score", + [ + ( + [0, 0, 1, 2], + np.array( + [ + [0.7, 0.2, 0.1], + [0.4, 0.3, 0.3], + [0.1, 0.8, 0.1], + [0.2, 0.3, 0.5], + ] + ), + ), + ( + [0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1], + [0, 0.1, 0.1, 0.4, 0.5, 0.6, 0.6, 0.9, 0.9, 1, 1], + ), + ], +) +def test_average_precision_score_duplicate_values(y_true, y_score): + """ + Duplicate values with precision-recall require a different + processing than when computing the AUC of a ROC, because the + precision-recall curve is a decreasing curve + The following situation corresponds to a perfect + test statistic, the average_precision_score should be 1. + """ + assert average_precision_score(y_true, y_score) == 1 + + +@pytest.mark.parametrize( + "y_true, y_score", + [ + ( + [2, 2, 1, 1, 0], + np.array( + [ + [0.2, 0.3, 0.5], + [0.2, 0.3, 0.5], + [0.4, 0.5, 0.3], + [0.4, 0.5, 0.3], + [0.8, 0.5, 0.3], + ] + ), + ), + ( + [0, 1, 1], + [0.5, 0.5, 0.6], + ), + ], +) +def test_average_precision_score_tied_values(y_true, y_score): + # Here if we go from left to right in y_true, the 0 values are + # separated from the 1 values, so it appears that we've + # correctly sorted our classifications. But in fact the first two + # values have the same score (0.5) and so the first two values + # could be swapped around, creating an imperfect sorting. This + # imperfection should come through in the end score, making it less + # than one. + assert average_precision_score(y_true, y_score) != 1.0 + + +def test_precision_recall_f_unused_pos_label(): + # Check warning that pos_label unused when set to non-default value + # but average != 'binary'; even if data is binary. + + msg = ( + r"Note that pos_label \(set to 2\) is " + r"ignored when average != 'binary' \(got 'macro'\). You " + r"may use labels=\[pos_label\] to specify a single " + "positive class." + ) + with pytest.warns(UserWarning, match=msg): + precision_recall_fscore_support( + [1, 2, 1], [1, 2, 2], pos_label=2, average="macro" + ) + + +def test_confusion_matrix_binary(): + # Test confusion matrix - binary classification case + y_true, y_pred, _ = make_prediction(binary=True) + + def test(y_true, y_pred): + cm = confusion_matrix(y_true, y_pred) + assert_array_equal(cm, [[22, 3], [8, 17]]) + + tp, fp, fn, tn = cm.flatten() + num = tp * tn - fp * fn + den = np.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn)) + + true_mcc = 0 if den == 0 else num / den + mcc = matthews_corrcoef(y_true, y_pred) + assert_array_almost_equal(mcc, true_mcc, decimal=2) + assert_array_almost_equal(mcc, 0.57, decimal=2) + + test(y_true, y_pred) + test([str(y) for y in y_true], [str(y) for y in y_pred]) + + +def test_multilabel_confusion_matrix_binary(): + # Test multilabel confusion matrix - binary classification case + y_true, y_pred, _ = make_prediction(binary=True) + + def test(y_true, y_pred): + cm = multilabel_confusion_matrix(y_true, y_pred) + assert_array_equal(cm, [[[17, 8], [3, 22]], [[22, 3], [8, 17]]]) + + test(y_true, y_pred) + test([str(y) for y in y_true], [str(y) for y in y_pred]) + + +def test_multilabel_confusion_matrix_multiclass(): + # Test multilabel confusion matrix - multi-class case + y_true, y_pred, _ = make_prediction(binary=False) + + def test(y_true, y_pred, string_type=False): + # compute confusion matrix with default labels introspection + cm = multilabel_confusion_matrix(y_true, y_pred) + assert_array_equal( + cm, [[[47, 4], [5, 19]], [[38, 6], [28, 3]], [[30, 25], [2, 18]]] + ) + + # compute confusion matrix with explicit label ordering + labels = ["0", "2", "1"] if string_type else [0, 2, 1] + cm = multilabel_confusion_matrix(y_true, y_pred, labels=labels) + assert_array_equal( + cm, [[[47, 4], [5, 19]], [[30, 25], [2, 18]], [[38, 6], [28, 3]]] + ) + + # compute confusion matrix with super set of present labels + labels = ["0", "2", "1", "3"] if string_type else [0, 2, 1, 3] + cm = multilabel_confusion_matrix(y_true, y_pred, labels=labels) + assert_array_equal( + cm, + [ + [[47, 4], [5, 19]], + [[30, 25], [2, 18]], + [[38, 6], [28, 3]], + [[75, 0], [0, 0]], + ], + ) + + test(y_true, y_pred) + test([str(y) for y in y_true], [str(y) for y in y_pred], string_type=True) + + +@pytest.mark.parametrize("csc_container", CSC_CONTAINERS) +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_multilabel_confusion_matrix_multilabel(csc_container, csr_container): + # Test multilabel confusion matrix - multilabel-indicator case + + y_true = np.array([[1, 0, 1], [0, 1, 0], [1, 1, 0]]) + y_pred = np.array([[1, 0, 0], [0, 1, 1], [0, 0, 1]]) + y_true_csr = csr_container(y_true) + y_pred_csr = csr_container(y_pred) + y_true_csc = csc_container(y_true) + y_pred_csc = csc_container(y_pred) + + # cross test different types + sample_weight = np.array([2, 1, 3]) + real_cm = [[[1, 0], [1, 1]], [[1, 0], [1, 1]], [[0, 2], [1, 0]]] + trues = [y_true, y_true_csr, y_true_csc] + preds = [y_pred, y_pred_csr, y_pred_csc] + + for y_true_tmp in trues: + for y_pred_tmp in preds: + cm = multilabel_confusion_matrix(y_true_tmp, y_pred_tmp) + assert_array_equal(cm, real_cm) + + # test support for samplewise + cm = multilabel_confusion_matrix(y_true, y_pred, samplewise=True) + assert_array_equal(cm, [[[1, 0], [1, 1]], [[1, 1], [0, 1]], [[0, 1], [2, 0]]]) + + # test support for labels + cm = multilabel_confusion_matrix(y_true, y_pred, labels=[2, 0]) + assert_array_equal(cm, [[[0, 2], [1, 0]], [[1, 0], [1, 1]]]) + + # test support for labels with samplewise + cm = multilabel_confusion_matrix(y_true, y_pred, labels=[2, 0], samplewise=True) + assert_array_equal(cm, [[[0, 0], [1, 1]], [[1, 1], [0, 0]], [[0, 1], [1, 0]]]) + + # test support for sample_weight with sample_wise + cm = multilabel_confusion_matrix( + y_true, y_pred, sample_weight=sample_weight, samplewise=True + ) + assert_array_equal(cm, [[[2, 0], [2, 2]], [[1, 1], [0, 1]], [[0, 3], [6, 0]]]) + + +def test_multilabel_confusion_matrix_errors(): + y_true = np.array([[1, 0, 1], [0, 1, 0], [1, 1, 0]]) + y_pred = np.array([[1, 0, 0], [0, 1, 1], [0, 0, 1]]) + + # Bad sample_weight + with pytest.raises(ValueError, match="inconsistent numbers of samples"): + multilabel_confusion_matrix(y_true, y_pred, sample_weight=[1, 2]) + with pytest.raises(ValueError, match="should be a 1d array"): + multilabel_confusion_matrix( + y_true, y_pred, sample_weight=[[1, 2, 3], [2, 3, 4], [3, 4, 5]] + ) + + # Bad labels + err_msg = r"All labels must be in \[0, n labels\)" + with pytest.raises(ValueError, match=err_msg): + multilabel_confusion_matrix(y_true, y_pred, labels=[-1]) + err_msg = r"All labels must be in \[0, n labels\)" + with pytest.raises(ValueError, match=err_msg): + multilabel_confusion_matrix(y_true, y_pred, labels=[3]) + + # Using samplewise outside multilabel + with pytest.raises(ValueError, match="Samplewise metrics"): + multilabel_confusion_matrix([0, 1, 2], [1, 2, 0], samplewise=True) + + # Bad y_type + err_msg = "multiclass-multioutput is not supported" + with pytest.raises(ValueError, match=err_msg): + multilabel_confusion_matrix([[0, 1, 2], [2, 1, 0]], [[1, 2, 0], [1, 0, 2]]) + + +@pytest.mark.parametrize( + "normalize, cm_dtype, expected_results", + [ + ("true", "f", 0.333333333), + ("pred", "f", 0.333333333), + ("all", "f", 0.1111111111), + (None, "i", 2), + ], +) +def test_confusion_matrix_normalize(normalize, cm_dtype, expected_results): + y_test = [0, 1, 2] * 6 + y_pred = list(chain(*permutations([0, 1, 2]))) + cm = confusion_matrix(y_test, y_pred, normalize=normalize) + assert_allclose(cm, expected_results) + assert cm.dtype.kind == cm_dtype + + +def test_confusion_matrix_normalize_single_class(): + y_test = [0, 0, 0, 0, 1, 1, 1, 1] + y_pred = [0, 0, 0, 0, 0, 0, 0, 0] + + cm_true = confusion_matrix(y_test, y_pred, normalize="true") + assert cm_true.sum() == pytest.approx(2.0) + + # additionally check that no warnings are raised due to a division by zero + with warnings.catch_warnings(): + warnings.simplefilter("error", RuntimeWarning) + cm_pred = confusion_matrix(y_test, y_pred, normalize="pred") + + assert cm_pred.sum() == pytest.approx(1.0) + + with warnings.catch_warnings(): + warnings.simplefilter("error", RuntimeWarning) + confusion_matrix(y_pred, y_test, normalize="true") + + +def test_confusion_matrix_single_label(): + """Test `confusion_matrix` warns when only one label found.""" + y_test = [0, 0, 0, 0] + y_pred = [0, 0, 0, 0] + + with pytest.warns(UserWarning, match="A single label was found in"): + confusion_matrix(y_pred, y_test) + + +@pytest.mark.parametrize( + "params, warn_msg", + [ + # When y_test contains one class only and y_test==y_pred, LR+ is undefined + ( + { + "y_true": np.array([0, 0, 0, 0, 0, 0]), + "y_pred": np.array([0, 0, 0, 0, 0, 0]), + }, + "samples of only one class were seen during testing", + ), + # When `fp == 0` and `tp != 0`, LR+ is undefined + ( + { + "y_true": np.array([1, 1, 1, 0, 0, 0]), + "y_pred": np.array([1, 1, 1, 0, 0, 0]), + }, + "positive_likelihood_ratio ill-defined and being set to nan", + ), + # When `fp == 0` and `tp == 0`, LR+ is undefined + ( + { + "y_true": np.array([1, 1, 1, 0, 0, 0]), + "y_pred": np.array([0, 0, 0, 0, 0, 0]), + }, + "no samples predicted for the positive class", + ), + # When `tn == 0`, LR- is undefined + ( + { + "y_true": np.array([1, 1, 1, 0, 0, 0]), + "y_pred": np.array([0, 0, 0, 1, 1, 1]), + }, + "negative_likelihood_ratio ill-defined and being set to nan", + ), + # When `tp + fn == 0` both ratios are undefined + ( + { + "y_true": np.array([0, 0, 0, 0, 0, 0]), + "y_pred": np.array([1, 1, 1, 0, 0, 0]), + }, + "no samples of the positive class were present in the testing set", + ), + ], +) +def test_likelihood_ratios_warnings(params, warn_msg): + # likelihood_ratios must raise warnings when at + # least one of the ratios is ill-defined. + + with pytest.warns(UserWarning, match=warn_msg): + class_likelihood_ratios(**params) + + +@pytest.mark.parametrize( + "params, err_msg", + [ + ( + { + "y_true": np.array([0, 1, 0, 1, 0]), + "y_pred": np.array([1, 1, 0, 0, 2]), + }, + ( + "class_likelihood_ratios only supports binary classification " + "problems, got targets of type: multiclass" + ), + ), + ], +) +def test_likelihood_ratios_errors(params, err_msg): + # likelihood_ratios must raise error when attempting + # non-binary classes to avoid Simpson's paradox + with pytest.raises(ValueError, match=err_msg): + class_likelihood_ratios(**params) + + +def test_likelihood_ratios(): + # Build confusion matrix with tn=9, fp=8, fn=1, tp=2, + # sensitivity=2/3, specificity=9/17, prevalence=3/20, + # LR+=34/24, LR-=17/27 + y_true = np.array([1] * 3 + [0] * 17) + y_pred = np.array([1] * 2 + [0] * 10 + [1] * 8) + + pos, neg = class_likelihood_ratios(y_true, y_pred) + assert_allclose(pos, 34 / 24) + assert_allclose(neg, 17 / 27) + + # Build limit case with y_pred = y_true + pos, neg = class_likelihood_ratios(y_true, y_true) + assert_array_equal(pos, np.nan * 2) + assert_allclose(neg, np.zeros(2), rtol=1e-12) + + # Ignore last 5 samples to get tn=9, fp=3, fn=1, tp=2, + # sensitivity=2/3, specificity=9/12, prevalence=3/20, + # LR+=24/9, LR-=12/27 + sample_weight = np.array([1.0] * 15 + [0.0] * 5) + pos, neg = class_likelihood_ratios(y_true, y_pred, sample_weight=sample_weight) + assert_allclose(pos, 24 / 9) + assert_allclose(neg, 12 / 27) + + +def test_cohen_kappa(): + # These label vectors reproduce the contingency matrix from Artstein and + # Poesio (2008), Table 1: np.array([[20, 20], [10, 50]]). + y1 = np.array([0] * 40 + [1] * 60) + y2 = np.array([0] * 20 + [1] * 20 + [0] * 10 + [1] * 50) + kappa = cohen_kappa_score(y1, y2) + assert_almost_equal(kappa, 0.348, decimal=3) + assert kappa == cohen_kappa_score(y2, y1) + + # Add spurious labels and ignore them. + y1 = np.append(y1, [2] * 4) + y2 = np.append(y2, [2] * 4) + assert cohen_kappa_score(y1, y2, labels=[0, 1]) == kappa + + assert_almost_equal(cohen_kappa_score(y1, y1), 1.0) + + # Multiclass example: Artstein and Poesio, Table 4. + y1 = np.array([0] * 46 + [1] * 44 + [2] * 10) + y2 = np.array([0] * 52 + [1] * 32 + [2] * 16) + assert_almost_equal(cohen_kappa_score(y1, y2), 0.8013, decimal=4) + + # Weighting example: none, linear, quadratic. + y1 = np.array([0] * 46 + [1] * 44 + [2] * 10) + y2 = np.array([0] * 50 + [1] * 40 + [2] * 10) + assert_almost_equal(cohen_kappa_score(y1, y2), 0.9315, decimal=4) + assert_almost_equal(cohen_kappa_score(y1, y2, weights="linear"), 0.9412, decimal=4) + assert_almost_equal( + cohen_kappa_score(y1, y2, weights="quadratic"), 0.9541, decimal=4 + ) + + +def test_matthews_corrcoef_nan(): + assert matthews_corrcoef([0], [1]) == 0.0 + assert matthews_corrcoef([0, 0], [0, 1]) == 0.0 + + +@pytest.mark.parametrize("zero_division", [0, 1, np.nan]) +@pytest.mark.parametrize("y_true, y_pred", [([0], [0]), ([], [])]) +@pytest.mark.parametrize( + "metric", + [ + f1_score, + partial(fbeta_score, beta=1), + precision_score, + recall_score, + ], +) +def test_zero_division_nan_no_warning(metric, y_true, y_pred, zero_division): + """Check the behaviour of `zero_division` when setting to 0, 1 or np.nan. + No warnings should be raised. + """ + with warnings.catch_warnings(): + warnings.simplefilter("error") + result = metric(y_true, y_pred, zero_division=zero_division) + + if np.isnan(zero_division): + assert np.isnan(result) + else: + assert result == zero_division + + +@pytest.mark.parametrize("y_true, y_pred", [([0], [0]), ([], [])]) +@pytest.mark.parametrize( + "metric", + [ + f1_score, + partial(fbeta_score, beta=1), + precision_score, + recall_score, + ], +) +def test_zero_division_nan_warning(metric, y_true, y_pred): + """Check the behaviour of `zero_division` when setting to "warn". + A `UndefinedMetricWarning` should be raised. + """ + with pytest.warns(UndefinedMetricWarning): + result = metric(y_true, y_pred, zero_division="warn") + assert result == 0.0 + + +def test_matthews_corrcoef_against_numpy_corrcoef(): + rng = np.random.RandomState(0) + y_true = rng.randint(0, 2, size=20) + y_pred = rng.randint(0, 2, size=20) + + assert_almost_equal( + matthews_corrcoef(y_true, y_pred), np.corrcoef(y_true, y_pred)[0, 1], 10 + ) + + +def test_matthews_corrcoef_against_jurman(): + # Check that the multiclass matthews_corrcoef agrees with the definition + # presented in Jurman, Riccadonna, Furlanello, (2012). A Comparison of MCC + # and CEN Error Measures in MultiClass Prediction + rng = np.random.RandomState(0) + y_true = rng.randint(0, 2, size=20) + y_pred = rng.randint(0, 2, size=20) + sample_weight = rng.rand(20) + + C = confusion_matrix(y_true, y_pred, sample_weight=sample_weight) + N = len(C) + cov_ytyp = sum( + [ + C[k, k] * C[m, l] - C[l, k] * C[k, m] + for k in range(N) + for m in range(N) + for l in range(N) + ] + ) + cov_ytyt = sum( + [ + C[:, k].sum() + * np.sum([C[g, f] for f in range(N) for g in range(N) if f != k]) + for k in range(N) + ] + ) + cov_ypyp = np.sum( + [ + C[k, :].sum() + * np.sum([C[f, g] for f in range(N) for g in range(N) if f != k]) + for k in range(N) + ] + ) + mcc_jurman = cov_ytyp / np.sqrt(cov_ytyt * cov_ypyp) + mcc_ours = matthews_corrcoef(y_true, y_pred, sample_weight=sample_weight) + + assert_almost_equal(mcc_ours, mcc_jurman, 10) + + +def test_matthews_corrcoef(): + rng = np.random.RandomState(0) + y_true = ["a" if i == 0 else "b" for i in rng.randint(0, 2, size=20)] + + # corrcoef of same vectors must be 1 + assert_almost_equal(matthews_corrcoef(y_true, y_true), 1.0) + + # corrcoef, when the two vectors are opposites of each other, should be -1 + y_true_inv = ["b" if i == "a" else "a" for i in y_true] + assert_almost_equal(matthews_corrcoef(y_true, y_true_inv), -1) + + y_true_inv2 = label_binarize(y_true, classes=["a", "b"]) + y_true_inv2 = np.where(y_true_inv2, "a", "b") + assert_almost_equal(matthews_corrcoef(y_true, y_true_inv2), -1) + + # For the zero vector case, the corrcoef cannot be calculated and should + # output 0 + assert_almost_equal(matthews_corrcoef([0, 0, 0, 0], [0, 0, 0, 0]), 0.0) + + # And also for any other vector with 0 variance + assert_almost_equal(matthews_corrcoef(y_true, ["a"] * len(y_true)), 0.0) + + # These two vectors have 0 correlation and hence mcc should be 0 + y_1 = [1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1] + y_2 = [1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1] + assert_almost_equal(matthews_corrcoef(y_1, y_2), 0.0) + + # Check that sample weight is able to selectively exclude + mask = [1] * 10 + [0] * 10 + # Now the first half of the vector elements are alone given a weight of 1 + # and hence the mcc will not be a perfect 0 as in the previous case + with pytest.raises(AssertionError): + assert_almost_equal(matthews_corrcoef(y_1, y_2, sample_weight=mask), 0.0) + + +def test_matthews_corrcoef_multiclass(): + rng = np.random.RandomState(0) + ord_a = ord("a") + n_classes = 4 + y_true = [chr(ord_a + i) for i in rng.randint(0, n_classes, size=20)] + + # corrcoef of same vectors must be 1 + assert_almost_equal(matthews_corrcoef(y_true, y_true), 1.0) + + # with multiclass > 2 it is not possible to achieve -1 + y_true = [0, 0, 1, 1, 2, 2] + y_pred_bad = [2, 2, 0, 0, 1, 1] + assert_almost_equal(matthews_corrcoef(y_true, y_pred_bad), -0.5) + + # Maximizing false positives and negatives minimizes the MCC + # The minimum will be different for depending on the input + y_true = [0, 0, 1, 1, 2, 2] + y_pred_min = [1, 1, 0, 0, 0, 0] + assert_almost_equal(matthews_corrcoef(y_true, y_pred_min), -12 / np.sqrt(24 * 16)) + + # Zero variance will result in an mcc of zero + y_true = [0, 1, 2] + y_pred = [3, 3, 3] + assert_almost_equal(matthews_corrcoef(y_true, y_pred), 0.0) + + # Also for ground truth with zero variance + y_true = [3, 3, 3] + y_pred = [0, 1, 2] + assert_almost_equal(matthews_corrcoef(y_true, y_pred), 0.0) + + # These two vectors have 0 correlation and hence mcc should be 0 + y_1 = [0, 1, 2, 0, 1, 2, 0, 1, 2] + y_2 = [1, 1, 1, 2, 2, 2, 0, 0, 0] + assert_almost_equal(matthews_corrcoef(y_1, y_2), 0.0) + + # We can test that binary assumptions hold using the multiclass computation + # by masking the weight of samples not in the first two classes + + # Masking the last label should let us get an MCC of -1 + y_true = [0, 0, 1, 1, 2] + y_pred = [1, 1, 0, 0, 2] + sample_weight = [1, 1, 1, 1, 0] + assert_almost_equal( + matthews_corrcoef(y_true, y_pred, sample_weight=sample_weight), -1 + ) + + # For the zero vector case, the corrcoef cannot be calculated and should + # output 0 + y_true = [0, 0, 1, 2] + y_pred = [0, 0, 1, 2] + sample_weight = [1, 1, 0, 0] + assert_almost_equal( + matthews_corrcoef(y_true, y_pred, sample_weight=sample_weight), 0.0 + ) + + +@pytest.mark.parametrize("n_points", [100, 10000]) +def test_matthews_corrcoef_overflow(n_points): + # https://github.com/scikit-learn/scikit-learn/issues/9622 + rng = np.random.RandomState(20170906) + + def mcc_safe(y_true, y_pred): + conf_matrix = confusion_matrix(y_true, y_pred) + true_pos = conf_matrix[1, 1] + false_pos = conf_matrix[1, 0] + false_neg = conf_matrix[0, 1] + n_points = len(y_true) + pos_rate = (true_pos + false_neg) / n_points + activity = (true_pos + false_pos) / n_points + mcc_numerator = true_pos / n_points - pos_rate * activity + mcc_denominator = activity * pos_rate * (1 - activity) * (1 - pos_rate) + return mcc_numerator / np.sqrt(mcc_denominator) + + def random_ys(n_points): # binary + x_true = rng.random_sample(n_points) + x_pred = x_true + 0.2 * (rng.random_sample(n_points) - 0.5) + y_true = x_true > 0.5 + y_pred = x_pred > 0.5 + return y_true, y_pred + + arr = np.repeat([0.0, 1.0], n_points) # binary + assert_almost_equal(matthews_corrcoef(arr, arr), 1.0) + arr = np.repeat([0.0, 1.0, 2.0], n_points) # multiclass + assert_almost_equal(matthews_corrcoef(arr, arr), 1.0) + + y_true, y_pred = random_ys(n_points) + assert_almost_equal(matthews_corrcoef(y_true, y_true), 1.0) + assert_almost_equal(matthews_corrcoef(y_true, y_pred), mcc_safe(y_true, y_pred)) + + +def test_precision_recall_f1_score_multiclass(): + # Test Precision Recall and F1 Score for multiclass classification task + y_true, y_pred, _ = make_prediction(binary=False) + + # compute scores with default labels introspection + p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None) + assert_array_almost_equal(p, [0.83, 0.33, 0.42], 2) + assert_array_almost_equal(r, [0.79, 0.09, 0.90], 2) + assert_array_almost_equal(f, [0.81, 0.15, 0.57], 2) + assert_array_equal(s, [24, 31, 20]) + + # averaging tests + ps = precision_score(y_true, y_pred, pos_label=1, average="micro") + assert_array_almost_equal(ps, 0.53, 2) + + rs = recall_score(y_true, y_pred, average="micro") + assert_array_almost_equal(rs, 0.53, 2) + + fs = f1_score(y_true, y_pred, average="micro") + assert_array_almost_equal(fs, 0.53, 2) + + ps = precision_score(y_true, y_pred, average="macro") + assert_array_almost_equal(ps, 0.53, 2) + + rs = recall_score(y_true, y_pred, average="macro") + assert_array_almost_equal(rs, 0.60, 2) + + fs = f1_score(y_true, y_pred, average="macro") + assert_array_almost_equal(fs, 0.51, 2) + + ps = precision_score(y_true, y_pred, average="weighted") + assert_array_almost_equal(ps, 0.51, 2) + + rs = recall_score(y_true, y_pred, average="weighted") + assert_array_almost_equal(rs, 0.53, 2) + + fs = f1_score(y_true, y_pred, average="weighted") + assert_array_almost_equal(fs, 0.47, 2) + + with pytest.raises(ValueError): + precision_score(y_true, y_pred, average="samples") + with pytest.raises(ValueError): + recall_score(y_true, y_pred, average="samples") + with pytest.raises(ValueError): + f1_score(y_true, y_pred, average="samples") + with pytest.raises(ValueError): + fbeta_score(y_true, y_pred, average="samples", beta=0.5) + + # same prediction but with and explicit label ordering + p, r, f, s = precision_recall_fscore_support( + y_true, y_pred, labels=[0, 2, 1], average=None + ) + assert_array_almost_equal(p, [0.83, 0.41, 0.33], 2) + assert_array_almost_equal(r, [0.79, 0.90, 0.10], 2) + assert_array_almost_equal(f, [0.81, 0.57, 0.15], 2) + assert_array_equal(s, [24, 20, 31]) + + +@pytest.mark.parametrize("average", ["samples", "micro", "macro", "weighted", None]) +def test_precision_refcall_f1_score_multilabel_unordered_labels(average): + # test that labels need not be sorted in the multilabel case + y_true = np.array([[1, 1, 0, 0]]) + y_pred = np.array([[0, 0, 1, 1]]) + p, r, f, s = precision_recall_fscore_support( + y_true, y_pred, labels=[3, 0, 1, 2], warn_for=[], average=average + ) + assert_array_equal(p, 0) + assert_array_equal(r, 0) + assert_array_equal(f, 0) + if average is None: + assert_array_equal(s, [0, 1, 1, 0]) + + +def test_precision_recall_f1_score_binary_averaged(): + y_true = np.array([0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1]) + y_pred = np.array([1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1]) + + # compute scores with default labels introspection + ps, rs, fs, _ = precision_recall_fscore_support(y_true, y_pred, average=None) + p, r, f, _ = precision_recall_fscore_support(y_true, y_pred, average="macro") + assert p == np.mean(ps) + assert r == np.mean(rs) + assert f == np.mean(fs) + p, r, f, _ = precision_recall_fscore_support(y_true, y_pred, average="weighted") + support = np.bincount(y_true) + assert p == np.average(ps, weights=support) + assert r == np.average(rs, weights=support) + assert f == np.average(fs, weights=support) + + +def test_zero_precision_recall(): + # Check that pathological cases do not bring NaNs + + old_error_settings = np.seterr(all="raise") + + try: + y_true = np.array([0, 1, 2, 0, 1, 2]) + y_pred = np.array([2, 0, 1, 1, 2, 0]) + + assert_almost_equal(precision_score(y_true, y_pred, average="macro"), 0.0, 2) + assert_almost_equal(recall_score(y_true, y_pred, average="macro"), 0.0, 2) + assert_almost_equal(f1_score(y_true, y_pred, average="macro"), 0.0, 2) + + finally: + np.seterr(**old_error_settings) + + +def test_confusion_matrix_multiclass_subset_labels(): + # Test confusion matrix - multi-class case with subset of labels + y_true, y_pred, _ = make_prediction(binary=False) + + # compute confusion matrix with only first two labels considered + cm = confusion_matrix(y_true, y_pred, labels=[0, 1]) + assert_array_equal(cm, [[19, 4], [4, 3]]) + + # compute confusion matrix with explicit label ordering for only subset + # of labels + cm = confusion_matrix(y_true, y_pred, labels=[2, 1]) + assert_array_equal(cm, [[18, 2], [24, 3]]) + + # a label not in y_true should result in zeros for that row/column + extra_label = np.max(y_true) + 1 + cm = confusion_matrix(y_true, y_pred, labels=[2, extra_label]) + assert_array_equal(cm, [[18, 0], [0, 0]]) + + +@pytest.mark.parametrize( + "labels, err_msg", + [ + ([], "'labels' should contains at least one label."), + ([3, 4], "At least one label specified must be in y_true"), + ], + ids=["empty list", "unknown labels"], +) +def test_confusion_matrix_error(labels, err_msg): + y_true, y_pred, _ = make_prediction(binary=False) + with pytest.raises(ValueError, match=err_msg): + confusion_matrix(y_true, y_pred, labels=labels) + + +@pytest.mark.parametrize( + "labels", (None, [0, 1], [0, 1, 2]), ids=["None", "binary", "multiclass"] +) +def test_confusion_matrix_on_zero_length_input(labels): + expected_n_classes = len(labels) if labels else 0 + expected = np.zeros((expected_n_classes, expected_n_classes), dtype=int) + cm = confusion_matrix([], [], labels=labels) + assert_array_equal(cm, expected) + + +def test_confusion_matrix_dtype(): + y = [0, 1, 1] + weight = np.ones(len(y)) + # confusion_matrix returns int64 by default + cm = confusion_matrix(y, y) + assert cm.dtype == np.int64 + # The dtype of confusion_matrix is always 64 bit + for dtype in [np.bool_, np.int32, np.uint64]: + cm = confusion_matrix(y, y, sample_weight=weight.astype(dtype, copy=False)) + assert cm.dtype == np.int64 + for dtype in [np.float32, np.float64, None, object]: + cm = confusion_matrix(y, y, sample_weight=weight.astype(dtype, copy=False)) + assert cm.dtype == np.float64 + + # np.iinfo(np.uint32).max should be accumulated correctly + weight = np.full(len(y), 4294967295, dtype=np.uint32) + cm = confusion_matrix(y, y, sample_weight=weight) + assert cm[0, 0] == 4294967295 + assert cm[1, 1] == 8589934590 + + # np.iinfo(np.int64).max should cause an overflow + weight = np.full(len(y), 9223372036854775807, dtype=np.int64) + cm = confusion_matrix(y, y, sample_weight=weight) + assert cm[0, 0] == 9223372036854775807 + assert cm[1, 1] == -2 + + +@pytest.mark.parametrize("dtype", ["Int64", "Float64", "boolean"]) +def test_confusion_matrix_pandas_nullable(dtype): + """Checks that confusion_matrix works with pandas nullable dtypes. + + Non-regression test for gh-25635. + """ + pd = pytest.importorskip("pandas") + + y_ndarray = np.array([1, 0, 0, 1, 0, 1, 1, 0, 1]) + y_true = pd.Series(y_ndarray, dtype=dtype) + y_predicted = pd.Series([0, 0, 1, 1, 0, 1, 1, 1, 1], dtype="int64") + + output = confusion_matrix(y_true, y_predicted) + expected_output = confusion_matrix(y_ndarray, y_predicted) + + assert_array_equal(output, expected_output) + + +def test_classification_report_multiclass(): + # Test performance report + iris = datasets.load_iris() + y_true, y_pred, _ = make_prediction(dataset=iris, binary=False) + + # print classification report with class names + expected_report = """\ + precision recall f1-score support + + setosa 0.83 0.79 0.81 24 + versicolor 0.33 0.10 0.15 31 + virginica 0.42 0.90 0.57 20 + + accuracy 0.53 75 + macro avg 0.53 0.60 0.51 75 +weighted avg 0.51 0.53 0.47 75 +""" + report = classification_report( + y_true, + y_pred, + labels=np.arange(len(iris.target_names)), + target_names=iris.target_names, + ) + assert report == expected_report + + +def test_classification_report_multiclass_balanced(): + y_true, y_pred = [0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2] + + expected_report = """\ + precision recall f1-score support + + 0 0.33 0.33 0.33 3 + 1 0.33 0.33 0.33 3 + 2 0.33 0.33 0.33 3 + + accuracy 0.33 9 + macro avg 0.33 0.33 0.33 9 +weighted avg 0.33 0.33 0.33 9 +""" + report = classification_report(y_true, y_pred) + assert report == expected_report + + +def test_classification_report_multiclass_with_label_detection(): + iris = datasets.load_iris() + y_true, y_pred, _ = make_prediction(dataset=iris, binary=False) + + # print classification report with label detection + expected_report = """\ + precision recall f1-score support + + 0 0.83 0.79 0.81 24 + 1 0.33 0.10 0.15 31 + 2 0.42 0.90 0.57 20 + + accuracy 0.53 75 + macro avg 0.53 0.60 0.51 75 +weighted avg 0.51 0.53 0.47 75 +""" + report = classification_report(y_true, y_pred) + assert report == expected_report + + +def test_classification_report_multiclass_with_digits(): + # Test performance report with added digits in floating point values + iris = datasets.load_iris() + y_true, y_pred, _ = make_prediction(dataset=iris, binary=False) + + # print classification report with class names + expected_report = """\ + precision recall f1-score support + + setosa 0.82609 0.79167 0.80851 24 + versicolor 0.33333 0.09677 0.15000 31 + virginica 0.41860 0.90000 0.57143 20 + + accuracy 0.53333 75 + macro avg 0.52601 0.59615 0.50998 75 +weighted avg 0.51375 0.53333 0.47310 75 +""" + report = classification_report( + y_true, + y_pred, + labels=np.arange(len(iris.target_names)), + target_names=iris.target_names, + digits=5, + ) + assert report == expected_report + + +def test_classification_report_multiclass_with_string_label(): + y_true, y_pred, _ = make_prediction(binary=False) + + y_true = np.array(["blue", "green", "red"])[y_true] + y_pred = np.array(["blue", "green", "red"])[y_pred] + + expected_report = """\ + precision recall f1-score support + + blue 0.83 0.79 0.81 24 + green 0.33 0.10 0.15 31 + red 0.42 0.90 0.57 20 + + accuracy 0.53 75 + macro avg 0.53 0.60 0.51 75 +weighted avg 0.51 0.53 0.47 75 +""" + report = classification_report(y_true, y_pred) + assert report == expected_report + + expected_report = """\ + precision recall f1-score support + + a 0.83 0.79 0.81 24 + b 0.33 0.10 0.15 31 + c 0.42 0.90 0.57 20 + + accuracy 0.53 75 + macro avg 0.53 0.60 0.51 75 +weighted avg 0.51 0.53 0.47 75 +""" + report = classification_report(y_true, y_pred, target_names=["a", "b", "c"]) + assert report == expected_report + + +def test_classification_report_multiclass_with_unicode_label(): + y_true, y_pred, _ = make_prediction(binary=False) + + labels = np.array(["blue\xa2", "green\xa2", "red\xa2"]) + y_true = labels[y_true] + y_pred = labels[y_pred] + + expected_report = """\ + precision recall f1-score support + + blue\xa2 0.83 0.79 0.81 24 + green\xa2 0.33 0.10 0.15 31 + red\xa2 0.42 0.90 0.57 20 + + accuracy 0.53 75 + macro avg 0.53 0.60 0.51 75 +weighted avg 0.51 0.53 0.47 75 +""" + report = classification_report(y_true, y_pred) + assert report == expected_report + + +def test_classification_report_multiclass_with_long_string_label(): + y_true, y_pred, _ = make_prediction(binary=False) + + labels = np.array(["blue", "green" * 5, "red"]) + y_true = labels[y_true] + y_pred = labels[y_pred] + + expected_report = """\ + precision recall f1-score support + + blue 0.83 0.79 0.81 24 +greengreengreengreengreen 0.33 0.10 0.15 31 + red 0.42 0.90 0.57 20 + + accuracy 0.53 75 + macro avg 0.53 0.60 0.51 75 + weighted avg 0.51 0.53 0.47 75 +""" + + report = classification_report(y_true, y_pred) + assert report == expected_report + + +def test_classification_report_labels_target_names_unequal_length(): + y_true = [0, 0, 2, 0, 0] + y_pred = [0, 2, 2, 0, 0] + target_names = ["class 0", "class 1", "class 2"] + + msg = "labels size, 2, does not match size of target_names, 3" + with pytest.warns(UserWarning, match=msg): + classification_report(y_true, y_pred, labels=[0, 2], target_names=target_names) + + +def test_classification_report_no_labels_target_names_unequal_length(): + y_true = [0, 0, 2, 0, 0] + y_pred = [0, 2, 2, 0, 0] + target_names = ["class 0", "class 1", "class 2"] + + err_msg = ( + "Number of classes, 2, does not " + "match size of target_names, 3. " + "Try specifying the labels parameter" + ) + with pytest.raises(ValueError, match=err_msg): + classification_report(y_true, y_pred, target_names=target_names) + + +@ignore_warnings +def test_multilabel_classification_report(): + n_classes = 4 + n_samples = 50 + + _, y_true = make_multilabel_classification( + n_features=1, n_samples=n_samples, n_classes=n_classes, random_state=0 + ) + + _, y_pred = make_multilabel_classification( + n_features=1, n_samples=n_samples, n_classes=n_classes, random_state=1 + ) + + expected_report = """\ + precision recall f1-score support + + 0 0.50 0.67 0.57 24 + 1 0.51 0.74 0.61 27 + 2 0.29 0.08 0.12 26 + 3 0.52 0.56 0.54 27 + + micro avg 0.50 0.51 0.50 104 + macro avg 0.45 0.51 0.46 104 +weighted avg 0.45 0.51 0.46 104 + samples avg 0.46 0.42 0.40 104 +""" + + report = classification_report(y_true, y_pred) + assert report == expected_report + + +def test_multilabel_zero_one_loss_subset(): + # Dense label indicator matrix format + y1 = np.array([[0, 1, 1], [1, 0, 1]]) + y2 = np.array([[0, 0, 1], [1, 0, 1]]) + + assert zero_one_loss(y1, y2) == 0.5 + assert zero_one_loss(y1, y1) == 0 + assert zero_one_loss(y2, y2) == 0 + assert zero_one_loss(y2, np.logical_not(y2)) == 1 + assert zero_one_loss(y1, np.logical_not(y1)) == 1 + assert zero_one_loss(y1, np.zeros(y1.shape)) == 1 + assert zero_one_loss(y2, np.zeros(y1.shape)) == 1 + + +def test_multilabel_hamming_loss(): + # Dense label indicator matrix format + y1 = np.array([[0, 1, 1], [1, 0, 1]]) + y2 = np.array([[0, 0, 1], [1, 0, 1]]) + w = np.array([1, 3]) + + assert hamming_loss(y1, y2) == 1 / 6 + assert hamming_loss(y1, y1) == 0 + assert hamming_loss(y2, y2) == 0 + assert hamming_loss(y2, 1 - y2) == 1 + assert hamming_loss(y1, 1 - y1) == 1 + assert hamming_loss(y1, np.zeros(y1.shape)) == 4 / 6 + assert hamming_loss(y2, np.zeros(y1.shape)) == 0.5 + assert hamming_loss(y1, y2, sample_weight=w) == 1.0 / 12 + assert hamming_loss(y1, 1 - y2, sample_weight=w) == 11.0 / 12 + assert hamming_loss(y1, np.zeros_like(y1), sample_weight=w) == 2.0 / 3 + # sp_hamming only works with 1-D arrays + assert hamming_loss(y1[0], y2[0]) == sp_hamming(y1[0], y2[0]) + + +def test_jaccard_score_validation(): + y_true = np.array([0, 1, 0, 1, 1]) + y_pred = np.array([0, 1, 0, 1, 1]) + err_msg = r"pos_label=2 is not a valid label. It should be one of \[0, 1\]" + with pytest.raises(ValueError, match=err_msg): + jaccard_score(y_true, y_pred, average="binary", pos_label=2) + + y_true = np.array([[0, 1, 1], [1, 0, 0]]) + y_pred = np.array([[1, 1, 1], [1, 0, 1]]) + msg1 = ( + r"Target is multilabel-indicator but average='binary'. " + r"Please choose another average setting, one of \[None, " + r"'micro', 'macro', 'weighted', 'samples'\]." + ) + with pytest.raises(ValueError, match=msg1): + jaccard_score(y_true, y_pred, average="binary", pos_label=-1) + + y_true = np.array([0, 1, 1, 0, 2]) + y_pred = np.array([1, 1, 1, 1, 0]) + msg2 = ( + r"Target is multiclass but average='binary'. Please choose " + r"another average setting, one of \[None, 'micro', 'macro', " + r"'weighted'\]." + ) + with pytest.raises(ValueError, match=msg2): + jaccard_score(y_true, y_pred, average="binary") + msg3 = "Samplewise metrics are not available outside of multilabel classification." + with pytest.raises(ValueError, match=msg3): + jaccard_score(y_true, y_pred, average="samples") + + msg = ( + r"Note that pos_label \(set to 3\) is ignored when " + r"average != 'binary' \(got 'micro'\). You may use " + r"labels=\[pos_label\] to specify a single positive " + "class." + ) + with pytest.warns(UserWarning, match=msg): + jaccard_score(y_true, y_pred, average="micro", pos_label=3) + + +def test_multilabel_jaccard_score(recwarn): + # Dense label indicator matrix format + y1 = np.array([[0, 1, 1], [1, 0, 1]]) + y2 = np.array([[0, 0, 1], [1, 0, 1]]) + + # size(y1 \inter y2) = [1, 2] + # size(y1 \union y2) = [2, 2] + + assert jaccard_score(y1, y2, average="samples") == 0.75 + assert jaccard_score(y1, y1, average="samples") == 1 + assert jaccard_score(y2, y2, average="samples") == 1 + assert jaccard_score(y2, np.logical_not(y2), average="samples") == 0 + assert jaccard_score(y1, np.logical_not(y1), average="samples") == 0 + assert jaccard_score(y1, np.zeros(y1.shape), average="samples") == 0 + assert jaccard_score(y2, np.zeros(y1.shape), average="samples") == 0 + + y_true = np.array([[0, 1, 1], [1, 0, 0]]) + y_pred = np.array([[1, 1, 1], [1, 0, 1]]) + # average='macro' + assert_almost_equal(jaccard_score(y_true, y_pred, average="macro"), 2.0 / 3) + # average='micro' + assert_almost_equal(jaccard_score(y_true, y_pred, average="micro"), 3.0 / 5) + # average='samples' + assert_almost_equal(jaccard_score(y_true, y_pred, average="samples"), 7.0 / 12) + assert_almost_equal( + jaccard_score(y_true, y_pred, average="samples", labels=[0, 2]), 1.0 / 2 + ) + assert_almost_equal( + jaccard_score(y_true, y_pred, average="samples", labels=[1, 2]), 1.0 / 2 + ) + # average=None + assert_array_equal( + jaccard_score(y_true, y_pred, average=None), np.array([1.0 / 2, 1.0, 1.0 / 2]) + ) + + y_true = np.array([[0, 1, 1], [1, 0, 1]]) + y_pred = np.array([[1, 1, 1], [1, 0, 1]]) + assert_almost_equal(jaccard_score(y_true, y_pred, average="macro"), 5.0 / 6) + # average='weighted' + assert_almost_equal(jaccard_score(y_true, y_pred, average="weighted"), 7.0 / 8) + + msg2 = "Got 4 > 2" + with pytest.raises(ValueError, match=msg2): + jaccard_score(y_true, y_pred, labels=[4], average="macro") + msg3 = "Got -1 < 0" + with pytest.raises(ValueError, match=msg3): + jaccard_score(y_true, y_pred, labels=[-1], average="macro") + + msg = ( + "Jaccard is ill-defined and being set to 0.0 in labels " + "with no true or predicted samples." + ) + + with pytest.warns(UndefinedMetricWarning, match=msg): + assert ( + jaccard_score(np.array([[0, 1]]), np.array([[0, 1]]), average="macro") + == 0.5 + ) + + msg = ( + "Jaccard is ill-defined and being set to 0.0 in samples " + "with no true or predicted labels." + ) + + with pytest.warns(UndefinedMetricWarning, match=msg): + assert ( + jaccard_score( + np.array([[0, 0], [1, 1]]), + np.array([[0, 0], [1, 1]]), + average="samples", + ) + == 0.5 + ) + + assert not list(recwarn) + + +def test_multiclass_jaccard_score(recwarn): + y_true = ["ant", "ant", "cat", "cat", "ant", "cat", "bird", "bird"] + y_pred = ["cat", "ant", "cat", "cat", "ant", "bird", "bird", "cat"] + labels = ["ant", "bird", "cat"] + lb = LabelBinarizer() + lb.fit(labels) + y_true_bin = lb.transform(y_true) + y_pred_bin = lb.transform(y_pred) + multi_jaccard_score = partial(jaccard_score, y_true, y_pred) + bin_jaccard_score = partial(jaccard_score, y_true_bin, y_pred_bin) + multi_labels_list = [ + ["ant", "bird"], + ["ant", "cat"], + ["cat", "bird"], + ["ant"], + ["bird"], + ["cat"], + None, + ] + bin_labels_list = [[0, 1], [0, 2], [2, 1], [0], [1], [2], None] + + # other than average='samples'/'none-samples', test everything else here + for average in ("macro", "weighted", "micro", None): + for m_label, b_label in zip(multi_labels_list, bin_labels_list): + assert_almost_equal( + multi_jaccard_score(average=average, labels=m_label), + bin_jaccard_score(average=average, labels=b_label), + ) + + y_true = np.array([[0, 0], [0, 0], [0, 0]]) + y_pred = np.array([[0, 0], [0, 0], [0, 0]]) + with ignore_warnings(): + assert jaccard_score(y_true, y_pred, average="weighted") == 0 + + assert not list(recwarn) + + +def test_average_binary_jaccard_score(recwarn): + # tp=0, fp=0, fn=1, tn=0 + assert jaccard_score([1], [0], average="binary") == 0.0 + # tp=0, fp=0, fn=0, tn=1 + msg = ( + "Jaccard is ill-defined and being set to 0.0 due to " + "no true or predicted samples" + ) + with pytest.warns(UndefinedMetricWarning, match=msg): + assert jaccard_score([0, 0], [0, 0], average="binary") == 0.0 + + # tp=1, fp=0, fn=0, tn=0 (pos_label=0) + assert jaccard_score([0], [0], pos_label=0, average="binary") == 1.0 + y_true = np.array([1, 0, 1, 1, 0]) + y_pred = np.array([1, 0, 1, 1, 1]) + assert_almost_equal(jaccard_score(y_true, y_pred, average="binary"), 3.0 / 4) + assert_almost_equal( + jaccard_score(y_true, y_pred, average="binary", pos_label=0), 1.0 / 2 + ) + + assert not list(recwarn) + + +def test_jaccard_score_zero_division_warning(): + # check that we raised a warning with default behavior if a zero division + # happens + y_true = np.array([[1, 0, 1], [0, 0, 0]]) + y_pred = np.array([[0, 0, 0], [0, 0, 0]]) + msg = ( + "Jaccard is ill-defined and being set to 0.0 in " + "samples with no true or predicted labels." + " Use `zero_division` parameter to control this behavior." + ) + with pytest.warns(UndefinedMetricWarning, match=msg): + score = jaccard_score(y_true, y_pred, average="samples", zero_division="warn") + assert score == pytest.approx(0.0) + + +@pytest.mark.parametrize("zero_division, expected_score", [(0, 0), (1, 0.5)]) +def test_jaccard_score_zero_division_set_value(zero_division, expected_score): + # check that we don't issue warning by passing the zero_division parameter + y_true = np.array([[1, 0, 1], [0, 0, 0]]) + y_pred = np.array([[0, 0, 0], [0, 0, 0]]) + with warnings.catch_warnings(): + warnings.simplefilter("error", UndefinedMetricWarning) + score = jaccard_score( + y_true, y_pred, average="samples", zero_division=zero_division + ) + assert score == pytest.approx(expected_score) + + +@ignore_warnings +def test_precision_recall_f1_score_multilabel_1(): + # Test precision_recall_f1_score on a crafted multilabel example + # First crafted example + + y_true = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 1]]) + y_pred = np.array([[0, 1, 0, 0], [0, 1, 0, 0], [1, 0, 1, 0]]) + + p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None) + + # tp = [0, 1, 1, 0] + # fn = [1, 0, 0, 1] + # fp = [1, 1, 0, 0] + # Check per class + + assert_array_almost_equal(p, [0.0, 0.5, 1.0, 0.0], 2) + assert_array_almost_equal(r, [0.0, 1.0, 1.0, 0.0], 2) + assert_array_almost_equal(f, [0.0, 1 / 1.5, 1, 0.0], 2) + assert_array_almost_equal(s, [1, 1, 1, 1], 2) + + f2 = fbeta_score(y_true, y_pred, beta=2, average=None) + support = s + assert_array_almost_equal(f2, [0, 0.83, 1, 0], 2) + + # Check macro + p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average="macro") + assert_almost_equal(p, 1.5 / 4) + assert_almost_equal(r, 0.5) + assert_almost_equal(f, 2.5 / 1.5 * 0.25) + assert s is None + assert_almost_equal( + fbeta_score(y_true, y_pred, beta=2, average="macro"), np.mean(f2) + ) + + # Check micro + p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average="micro") + assert_almost_equal(p, 0.5) + assert_almost_equal(r, 0.5) + assert_almost_equal(f, 0.5) + assert s is None + assert_almost_equal( + fbeta_score(y_true, y_pred, beta=2, average="micro"), + (1 + 4) * p * r / (4 * p + r), + ) + + # Check weighted + p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average="weighted") + assert_almost_equal(p, 1.5 / 4) + assert_almost_equal(r, 0.5) + assert_almost_equal(f, 2.5 / 1.5 * 0.25) + assert s is None + assert_almost_equal( + fbeta_score(y_true, y_pred, beta=2, average="weighted"), + np.average(f2, weights=support), + ) + # Check samples + # |h(x_i) inter y_i | = [0, 1, 1] + # |y_i| = [1, 1, 2] + # |h(x_i)| = [1, 1, 2] + p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average="samples") + assert_almost_equal(p, 0.5) + assert_almost_equal(r, 0.5) + assert_almost_equal(f, 0.5) + assert s is None + assert_almost_equal(fbeta_score(y_true, y_pred, beta=2, average="samples"), 0.5) + + +@ignore_warnings +def test_precision_recall_f1_score_multilabel_2(): + # Test precision_recall_f1_score on a crafted multilabel example 2 + # Second crafted example + y_true = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 1, 1, 0]]) + y_pred = np.array([[0, 0, 0, 1], [0, 0, 0, 1], [1, 1, 0, 0]]) + + # tp = [ 0. 1. 0. 0.] + # fp = [ 1. 0. 0. 2.] + # fn = [ 1. 1. 1. 0.] + + p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None) + assert_array_almost_equal(p, [0.0, 1.0, 0.0, 0.0], 2) + assert_array_almost_equal(r, [0.0, 0.5, 0.0, 0.0], 2) + assert_array_almost_equal(f, [0.0, 0.66, 0.0, 0.0], 2) + assert_array_almost_equal(s, [1, 2, 1, 0], 2) + + f2 = fbeta_score(y_true, y_pred, beta=2, average=None) + support = s + assert_array_almost_equal(f2, [0, 0.55, 0, 0], 2) + + p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average="micro") + assert_almost_equal(p, 0.25) + assert_almost_equal(r, 0.25) + assert_almost_equal(f, 2 * 0.25 * 0.25 / 0.5) + assert s is None + assert_almost_equal( + fbeta_score(y_true, y_pred, beta=2, average="micro"), + (1 + 4) * p * r / (4 * p + r), + ) + + p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average="macro") + assert_almost_equal(p, 0.25) + assert_almost_equal(r, 0.125) + assert_almost_equal(f, 2 / 12) + assert s is None + assert_almost_equal( + fbeta_score(y_true, y_pred, beta=2, average="macro"), np.mean(f2) + ) + + p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average="weighted") + assert_almost_equal(p, 2 / 4) + assert_almost_equal(r, 1 / 4) + assert_almost_equal(f, 2 / 3 * 2 / 4) + assert s is None + assert_almost_equal( + fbeta_score(y_true, y_pred, beta=2, average="weighted"), + np.average(f2, weights=support), + ) + + p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average="samples") + # Check samples + # |h(x_i) inter y_i | = [0, 0, 1] + # |y_i| = [1, 1, 2] + # |h(x_i)| = [1, 1, 2] + + assert_almost_equal(p, 1 / 6) + assert_almost_equal(r, 1 / 6) + assert_almost_equal(f, 2 / 4 * 1 / 3) + assert s is None + assert_almost_equal( + fbeta_score(y_true, y_pred, beta=2, average="samples"), 0.1666, 2 + ) + + +@ignore_warnings +@pytest.mark.parametrize( + "zero_division, zero_division_expected", + [("warn", 0), (0, 0), (1, 1), (np.nan, np.nan)], +) +def test_precision_recall_f1_score_with_an_empty_prediction( + zero_division, zero_division_expected +): + y_true = np.array([[0, 1, 0, 0], [1, 0, 0, 0], [0, 1, 1, 0]]) + y_pred = np.array([[0, 0, 0, 0], [0, 0, 0, 1], [0, 1, 1, 0]]) + + # true_pos = [ 0. 1. 1. 0.] + # false_pos = [ 0. 0. 0. 1.] + # false_neg = [ 1. 1. 0. 0.] + + p, r, f, s = precision_recall_fscore_support( + y_true, y_pred, average=None, zero_division=zero_division + ) + + assert_array_almost_equal(p, [zero_division_expected, 1.0, 1.0, 0.0], 2) + assert_array_almost_equal(r, [0.0, 0.5, 1.0, zero_division_expected], 2) + expected_f = 0 + assert_array_almost_equal(f, [expected_f, 1 / 1.5, 1, expected_f], 2) + assert_array_almost_equal(s, [1, 2, 1, 0], 2) + + f2 = fbeta_score(y_true, y_pred, beta=2, average=None, zero_division=zero_division) + support = s + assert_array_almost_equal(f2, [expected_f, 0.55, 1, expected_f], 2) + + p, r, f, s = precision_recall_fscore_support( + y_true, y_pred, average="macro", zero_division=zero_division + ) + + value_to_sum = 0 if np.isnan(zero_division_expected) else zero_division_expected + values_to_average = 3 + (not np.isnan(zero_division_expected)) + + assert_almost_equal(p, (2 + value_to_sum) / values_to_average) + assert_almost_equal(r, (1.5 + value_to_sum) / values_to_average) + expected_f = (2 / 3 + 1) / 4 + assert_almost_equal(f, expected_f) + assert s is None + assert_almost_equal( + fbeta_score( + y_true, + y_pred, + beta=2, + average="macro", + zero_division=zero_division, + ), + _nanaverage(f2, weights=None), + ) + + p, r, f, s = precision_recall_fscore_support( + y_true, y_pred, average="micro", zero_division=zero_division + ) + assert_almost_equal(p, 2 / 3) + assert_almost_equal(r, 0.5) + assert_almost_equal(f, 2 / 3 / (2 / 3 + 0.5)) + assert s is None + assert_almost_equal( + fbeta_score( + y_true, y_pred, beta=2, average="micro", zero_division=zero_division + ), + (1 + 4) * p * r / (4 * p + r), + ) + + p, r, f, s = precision_recall_fscore_support( + y_true, y_pred, average="weighted", zero_division=zero_division + ) + assert_almost_equal(p, 3 / 4 if zero_division_expected == 0 else 1.0) + assert_almost_equal(r, 0.5) + values_to_average = 4 + assert_almost_equal(f, (2 * 2 / 3 + 1) / values_to_average) + assert s is None + assert_almost_equal( + fbeta_score( + y_true, y_pred, beta=2, average="weighted", zero_division=zero_division + ), + _nanaverage(f2, weights=support), + ) + + p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average="samples") + # |h(x_i) inter y_i | = [0, 0, 2] + # |y_i| = [1, 1, 2] + # |h(x_i)| = [0, 1, 2] + assert_almost_equal(p, 1 / 3) + assert_almost_equal(r, 1 / 3) + assert_almost_equal(f, 1 / 3) + assert s is None + expected_result = 0.333 + assert_almost_equal( + fbeta_score( + y_true, y_pred, beta=2, average="samples", zero_division=zero_division + ), + expected_result, + 2, + ) + + +@pytest.mark.parametrize("beta", [1]) +@pytest.mark.parametrize("average", ["macro", "micro", "weighted", "samples"]) +@pytest.mark.parametrize("zero_division", [0, 1, np.nan]) +def test_precision_recall_f1_no_labels(beta, average, zero_division): + y_true = np.zeros((20, 3)) + y_pred = np.zeros_like(y_true) + + p, r, f, s = assert_no_warnings( + precision_recall_fscore_support, + y_true, + y_pred, + average=average, + beta=beta, + zero_division=zero_division, + ) + fbeta = assert_no_warnings( + fbeta_score, + y_true, + y_pred, + beta=beta, + average=average, + zero_division=zero_division, + ) + assert s is None + + # if zero_division = nan, check that all metrics are nan and exit + if np.isnan(zero_division): + for metric in [p, r, f, fbeta]: + assert np.isnan(metric) + return + + zero_division = float(zero_division) + assert_almost_equal(p, zero_division) + assert_almost_equal(r, zero_division) + assert_almost_equal(f, zero_division) + + assert_almost_equal(fbeta, float(zero_division)) + + +@pytest.mark.parametrize("average", ["macro", "micro", "weighted", "samples"]) +def test_precision_recall_f1_no_labels_check_warnings(average): + y_true = np.zeros((20, 3)) + y_pred = np.zeros_like(y_true) + + func = precision_recall_fscore_support + with pytest.warns(UndefinedMetricWarning): + p, r, f, s = func(y_true, y_pred, average=average, beta=1.0) + + assert_almost_equal(p, 0) + assert_almost_equal(r, 0) + assert_almost_equal(f, 0) + assert s is None + + with pytest.warns(UndefinedMetricWarning): + fbeta = fbeta_score(y_true, y_pred, average=average, beta=1.0) + + assert_almost_equal(fbeta, 0) + + +@pytest.mark.parametrize("zero_division", [0, 1, np.nan]) +def test_precision_recall_f1_no_labels_average_none(zero_division): + y_true = np.zeros((20, 3)) + y_pred = np.zeros_like(y_true) + + # tp = [0, 0, 0] + # fn = [0, 0, 0] + # fp = [0, 0, 0] + # support = [0, 0, 0] + # |y_hat_i inter y_i | = [0, 0, 0] + # |y_i| = [0, 0, 0] + # |y_hat_i| = [0, 0, 0] + + p, r, f, s = assert_no_warnings( + precision_recall_fscore_support, + y_true, + y_pred, + average=None, + beta=1.0, + zero_division=zero_division, + ) + fbeta = assert_no_warnings( + fbeta_score, y_true, y_pred, beta=1.0, average=None, zero_division=zero_division + ) + zero_division = np.float64(zero_division) + assert_array_almost_equal(p, [zero_division, zero_division, zero_division], 2) + assert_array_almost_equal(r, [zero_division, zero_division, zero_division], 2) + assert_array_almost_equal(f, [zero_division, zero_division, zero_division], 2) + assert_array_almost_equal(s, [0, 0, 0], 2) + + assert_array_almost_equal(fbeta, [zero_division, zero_division, zero_division], 2) + + +def test_precision_recall_f1_no_labels_average_none_warn(): + y_true = np.zeros((20, 3)) + y_pred = np.zeros_like(y_true) + + # tp = [0, 0, 0] + # fn = [0, 0, 0] + # fp = [0, 0, 0] + # support = [0, 0, 0] + # |y_hat_i inter y_i | = [0, 0, 0] + # |y_i| = [0, 0, 0] + # |y_hat_i| = [0, 0, 0] + + with pytest.warns(UndefinedMetricWarning): + p, r, f, s = precision_recall_fscore_support( + y_true, y_pred, average=None, beta=1 + ) + + assert_array_almost_equal(p, [0, 0, 0], 2) + assert_array_almost_equal(r, [0, 0, 0], 2) + assert_array_almost_equal(f, [0, 0, 0], 2) + assert_array_almost_equal(s, [0, 0, 0], 2) + + with pytest.warns(UndefinedMetricWarning): + fbeta = fbeta_score(y_true, y_pred, beta=1, average=None) + + assert_array_almost_equal(fbeta, [0, 0, 0], 2) + + +def test_prf_warnings(): + # average of per-label scores + f, w = precision_recall_fscore_support, UndefinedMetricWarning + for average in [None, "weighted", "macro"]: + msg = ( + "Precision is ill-defined and " + "being set to 0.0 in labels with no predicted samples." + " Use `zero_division` parameter to control" + " this behavior." + ) + with pytest.warns(w, match=msg): + f([0, 1, 2], [1, 1, 2], average=average) + + msg = ( + "Recall is ill-defined and " + "being set to 0.0 in labels with no true samples." + " Use `zero_division` parameter to control" + " this behavior." + ) + with pytest.warns(w, match=msg): + f([1, 1, 2], [0, 1, 2], average=average) + + # average of per-sample scores + msg = ( + "Precision is ill-defined and " + "being set to 0.0 in samples with no predicted labels." + " Use `zero_division` parameter to control" + " this behavior." + ) + with pytest.warns(w, match=msg): + f(np.array([[1, 0], [1, 0]]), np.array([[1, 0], [0, 0]]), average="samples") + + msg = ( + "Recall is ill-defined and " + "being set to 0.0 in samples with no true labels." + " Use `zero_division` parameter to control" + " this behavior." + ) + with pytest.warns(w, match=msg): + f(np.array([[1, 0], [0, 0]]), np.array([[1, 0], [1, 0]]), average="samples") + + # single score: micro-average + msg = ( + "Precision is ill-defined and " + "being set to 0.0 due to no predicted samples." + " Use `zero_division` parameter to control" + " this behavior." + ) + with pytest.warns(w, match=msg): + f(np.array([[1, 1], [1, 1]]), np.array([[0, 0], [0, 0]]), average="micro") + + msg = ( + "Recall is ill-defined and " + "being set to 0.0 due to no true samples." + " Use `zero_division` parameter to control" + " this behavior." + ) + with pytest.warns(w, match=msg): + f(np.array([[0, 0], [0, 0]]), np.array([[1, 1], [1, 1]]), average="micro") + + # single positive label + msg = ( + "Precision is ill-defined and " + "being set to 0.0 due to no predicted samples." + " Use `zero_division` parameter to control" + " this behavior." + ) + with pytest.warns(w, match=msg): + f([1, 1], [-1, -1], average="binary") + + msg = ( + "Recall is ill-defined and " + "being set to 0.0 due to no true samples." + " Use `zero_division` parameter to control" + " this behavior." + ) + with pytest.warns(w, match=msg): + f([-1, -1], [1, 1], average="binary") + + with warnings.catch_warnings(record=True) as record: + warnings.simplefilter("always") + precision_recall_fscore_support([0, 0], [0, 0], average="binary") + msg = ( + "F-score is ill-defined and being set to 0.0 due to no true nor " + "predicted samples. Use `zero_division` parameter to control this" + " behavior." + ) + assert str(record.pop().message) == msg + msg = ( + "Recall is ill-defined and " + "being set to 0.0 due to no true samples." + " Use `zero_division` parameter to control" + " this behavior." + ) + assert str(record.pop().message) == msg + msg = ( + "Precision is ill-defined and " + "being set to 0.0 due to no predicted samples." + " Use `zero_division` parameter to control" + " this behavior." + ) + assert str(record.pop().message) == msg + + +@pytest.mark.parametrize("zero_division", [0, 1, np.nan]) +def test_prf_no_warnings_if_zero_division_set(zero_division): + # average of per-label scores + f = precision_recall_fscore_support + for average in [None, "weighted", "macro"]: + assert_no_warnings( + f, [0, 1, 2], [1, 1, 2], average=average, zero_division=zero_division + ) + + assert_no_warnings( + f, [1, 1, 2], [0, 1, 2], average=average, zero_division=zero_division + ) + + # average of per-sample scores + assert_no_warnings( + f, + np.array([[1, 0], [1, 0]]), + np.array([[1, 0], [0, 0]]), + average="samples", + zero_division=zero_division, + ) + + assert_no_warnings( + f, + np.array([[1, 0], [0, 0]]), + np.array([[1, 0], [1, 0]]), + average="samples", + zero_division=zero_division, + ) + + # single score: micro-average + assert_no_warnings( + f, + np.array([[1, 1], [1, 1]]), + np.array([[0, 0], [0, 0]]), + average="micro", + zero_division=zero_division, + ) + + assert_no_warnings( + f, + np.array([[0, 0], [0, 0]]), + np.array([[1, 1], [1, 1]]), + average="micro", + zero_division=zero_division, + ) + + # single positive label + assert_no_warnings( + f, [1, 1], [-1, -1], average="binary", zero_division=zero_division + ) + + assert_no_warnings( + f, [-1, -1], [1, 1], average="binary", zero_division=zero_division + ) + + with warnings.catch_warnings(record=True) as record: + warnings.simplefilter("always") + precision_recall_fscore_support( + [0, 0], [0, 0], average="binary", zero_division=zero_division + ) + assert len(record) == 0 + + +@pytest.mark.parametrize("zero_division", ["warn", 0, 1, np.nan]) +def test_recall_warnings(zero_division): + assert_no_warnings( + recall_score, + np.array([[1, 1], [1, 1]]), + np.array([[0, 0], [0, 0]]), + average="micro", + zero_division=zero_division, + ) + with warnings.catch_warnings(record=True) as record: + warnings.simplefilter("always") + recall_score( + np.array([[0, 0], [0, 0]]), + np.array([[1, 1], [1, 1]]), + average="micro", + zero_division=zero_division, + ) + if zero_division == "warn": + assert ( + str(record.pop().message) + == "Recall is ill-defined and " + "being set to 0.0 due to no true samples." + " Use `zero_division` parameter to control" + " this behavior." + ) + else: + assert len(record) == 0 + + recall_score([0, 0], [0, 0]) + if zero_division == "warn": + assert ( + str(record.pop().message) + == "Recall is ill-defined and " + "being set to 0.0 due to no true samples." + " Use `zero_division` parameter to control" + " this behavior." + ) + + +@pytest.mark.parametrize("zero_division", ["warn", 0, 1, np.nan]) +def test_precision_warnings(zero_division): + with warnings.catch_warnings(record=True) as record: + warnings.simplefilter("always") + precision_score( + np.array([[1, 1], [1, 1]]), + np.array([[0, 0], [0, 0]]), + average="micro", + zero_division=zero_division, + ) + if zero_division == "warn": + assert ( + str(record.pop().message) + == "Precision is ill-defined and " + "being set to 0.0 due to no predicted samples." + " Use `zero_division` parameter to control" + " this behavior." + ) + else: + assert len(record) == 0 + + precision_score([0, 0], [0, 0]) + if zero_division == "warn": + assert ( + str(record.pop().message) + == "Precision is ill-defined and " + "being set to 0.0 due to no predicted samples." + " Use `zero_division` parameter to control" + " this behavior." + ) + + assert_no_warnings( + precision_score, + np.array([[0, 0], [0, 0]]), + np.array([[1, 1], [1, 1]]), + average="micro", + zero_division=zero_division, + ) + + +@pytest.mark.parametrize("zero_division", ["warn", 0, 1, np.nan]) +def test_fscore_warnings(zero_division): + with warnings.catch_warnings(record=True) as record: + warnings.simplefilter("always") + + for score in [f1_score, partial(fbeta_score, beta=2)]: + score( + np.array([[1, 1], [1, 1]]), + np.array([[0, 0], [0, 0]]), + average="micro", + zero_division=zero_division, + ) + assert len(record) == 0 + + score( + np.array([[0, 0], [0, 0]]), + np.array([[1, 1], [1, 1]]), + average="micro", + zero_division=zero_division, + ) + assert len(record) == 0 + + score( + np.array([[0, 0], [0, 0]]), + np.array([[0, 0], [0, 0]]), + average="micro", + zero_division=zero_division, + ) + if zero_division == "warn": + assert ( + str(record.pop().message) + == "F-score is ill-defined and " + "being set to 0.0 due to no true nor predicted " + "samples. Use `zero_division` parameter to " + "control this behavior." + ) + else: + assert len(record) == 0 + + +def test_prf_average_binary_data_non_binary(): + # Error if user does not explicitly set non-binary average mode + y_true_mc = [1, 2, 3, 3] + y_pred_mc = [1, 2, 3, 1] + msg_mc = ( + r"Target is multiclass but average='binary'. Please " + r"choose another average setting, one of \[" + r"None, 'micro', 'macro', 'weighted'\]." + ) + y_true_ind = np.array([[0, 1, 1], [1, 0, 0], [0, 0, 1]]) + y_pred_ind = np.array([[0, 1, 0], [1, 0, 0], [0, 0, 1]]) + msg_ind = ( + r"Target is multilabel-indicator but average='binary'. Please " + r"choose another average setting, one of \[" + r"None, 'micro', 'macro', 'weighted', 'samples'\]." + ) + + for y_true, y_pred, msg in [ + (y_true_mc, y_pred_mc, msg_mc), + (y_true_ind, y_pred_ind, msg_ind), + ]: + for metric in [ + precision_score, + recall_score, + f1_score, + partial(fbeta_score, beta=2), + ]: + with pytest.raises(ValueError, match=msg): + metric(y_true, y_pred) + + +def test__check_targets(): + # Check that _check_targets correctly merges target types, squeezes + # output and fails if input lengths differ. + IND = "multilabel-indicator" + MC = "multiclass" + BIN = "binary" + CNT = "continuous" + MMC = "multiclass-multioutput" + MCN = "continuous-multioutput" + # all of length 3 + EXAMPLES = [ + (IND, np.array([[0, 1, 1], [1, 0, 0], [0, 0, 1]])), + # must not be considered binary + (IND, np.array([[0, 1], [1, 0], [1, 1]])), + (MC, [2, 3, 1]), + (BIN, [0, 1, 1]), + (CNT, [0.0, 1.5, 1.0]), + (MC, np.array([[2], [3], [1]])), + (BIN, np.array([[0], [1], [1]])), + (CNT, np.array([[0.0], [1.5], [1.0]])), + (MMC, np.array([[0, 2], [1, 3], [2, 3]])), + (MCN, np.array([[0.5, 2.0], [1.1, 3.0], [2.0, 3.0]])), + ] + # expected type given input types, or None for error + # (types will be tried in either order) + EXPECTED = { + (IND, IND): IND, + (MC, MC): MC, + (BIN, BIN): BIN, + (MC, IND): None, + (BIN, IND): None, + (BIN, MC): MC, + # Disallowed types + (CNT, CNT): None, + (MMC, MMC): None, + (MCN, MCN): None, + (IND, CNT): None, + (MC, CNT): None, + (BIN, CNT): None, + (MMC, CNT): None, + (MCN, CNT): None, + (IND, MMC): None, + (MC, MMC): None, + (BIN, MMC): None, + (MCN, MMC): None, + (IND, MCN): None, + (MC, MCN): None, + (BIN, MCN): None, + } + + for (type1, y1), (type2, y2) in product(EXAMPLES, repeat=2): + try: + expected = EXPECTED[type1, type2] + except KeyError: + expected = EXPECTED[type2, type1] + if expected is None: + with pytest.raises(ValueError): + _check_targets(y1, y2) + + if type1 != type2: + err_msg = ( + "Classification metrics can't handle a mix " + "of {0} and {1} targets".format(type1, type2) + ) + with pytest.raises(ValueError, match=err_msg): + _check_targets(y1, y2) + + else: + if type1 not in (BIN, MC, IND): + err_msg = "{0} is not supported".format(type1) + with pytest.raises(ValueError, match=err_msg): + _check_targets(y1, y2) + + else: + merged_type, y1out, y2out = _check_targets(y1, y2) + assert merged_type == expected + if merged_type.startswith("multilabel"): + assert y1out.format == "csr" + assert y2out.format == "csr" + else: + assert_array_equal(y1out, np.squeeze(y1)) + assert_array_equal(y2out, np.squeeze(y2)) + with pytest.raises(ValueError): + _check_targets(y1[:-1], y2) + + # Make sure seq of seq is not supported + y1 = [(1, 2), (0, 2, 3)] + y2 = [(2,), (0, 2)] + msg = ( + "You appear to be using a legacy multi-label data representation. " + "Sequence of sequences are no longer supported; use a binary array" + " or sparse matrix instead - the MultiLabelBinarizer" + " transformer can convert to this format." + ) + with pytest.raises(ValueError, match=msg): + _check_targets(y1, y2) + + +def test__check_targets_multiclass_with_both_y_true_and_y_pred_binary(): + # https://github.com/scikit-learn/scikit-learn/issues/8098 + y_true = [0, 1] + y_pred = [0, -1] + assert _check_targets(y_true, y_pred)[0] == "multiclass" + + +def test_hinge_loss_binary(): + y_true = np.array([-1, 1, 1, -1]) + pred_decision = np.array([-8.5, 0.5, 1.5, -0.3]) + assert hinge_loss(y_true, pred_decision) == 1.2 / 4 + + y_true = np.array([0, 2, 2, 0]) + pred_decision = np.array([-8.5, 0.5, 1.5, -0.3]) + assert hinge_loss(y_true, pred_decision) == 1.2 / 4 + + +def test_hinge_loss_multiclass(): + pred_decision = np.array( + [ + [+0.36, -0.17, -0.58, -0.99], + [-0.54, -0.37, -0.48, -0.58], + [-1.45, -0.58, -0.38, -0.17], + [-0.54, -0.38, -0.48, -0.58], + [-2.36, -0.79, -0.27, +0.24], + [-1.45, -0.58, -0.38, -0.17], + ] + ) + y_true = np.array([0, 1, 2, 1, 3, 2]) + dummy_losses = np.array( + [ + 1 - pred_decision[0][0] + pred_decision[0][1], + 1 - pred_decision[1][1] + pred_decision[1][2], + 1 - pred_decision[2][2] + pred_decision[2][3], + 1 - pred_decision[3][1] + pred_decision[3][2], + 1 - pred_decision[4][3] + pred_decision[4][2], + 1 - pred_decision[5][2] + pred_decision[5][3], + ] + ) + np.clip(dummy_losses, 0, None, out=dummy_losses) + dummy_hinge_loss = np.mean(dummy_losses) + assert hinge_loss(y_true, pred_decision) == dummy_hinge_loss + + +def test_hinge_loss_multiclass_missing_labels_with_labels_none(): + y_true = np.array([0, 1, 2, 2]) + pred_decision = np.array( + [ + [+1.27, 0.034, -0.68, -1.40], + [-1.45, -0.58, -0.38, -0.17], + [-2.36, -0.79, -0.27, +0.24], + [-2.36, -0.79, -0.27, +0.24], + ] + ) + error_message = ( + "Please include all labels in y_true or pass labels as third argument" + ) + with pytest.raises(ValueError, match=error_message): + hinge_loss(y_true, pred_decision) + + +def test_hinge_loss_multiclass_no_consistent_pred_decision_shape(): + # test for inconsistency between multiclass problem and pred_decision + # argument + y_true = np.array([2, 1, 0, 1, 0, 1, 1]) + pred_decision = np.array([0, 1, 2, 1, 0, 2, 1]) + error_message = ( + "The shape of pred_decision cannot be 1d array" + "with a multiclass target. pred_decision shape " + "must be (n_samples, n_classes), that is " + "(7, 3). Got: (7,)" + ) + with pytest.raises(ValueError, match=re.escape(error_message)): + hinge_loss(y_true=y_true, pred_decision=pred_decision) + + # test for inconsistency between pred_decision shape and labels number + pred_decision = np.array([[0, 1], [0, 1], [0, 1], [0, 1], [2, 0], [0, 1], [1, 0]]) + labels = [0, 1, 2] + error_message = ( + "The shape of pred_decision is not " + "consistent with the number of classes. " + "With a multiclass target, pred_decision " + "shape must be (n_samples, n_classes), that is " + "(7, 3). Got: (7, 2)" + ) + with pytest.raises(ValueError, match=re.escape(error_message)): + hinge_loss(y_true=y_true, pred_decision=pred_decision, labels=labels) + + +def test_hinge_loss_multiclass_with_missing_labels(): + pred_decision = np.array( + [ + [+0.36, -0.17, -0.58, -0.99], + [-0.55, -0.38, -0.48, -0.58], + [-1.45, -0.58, -0.38, -0.17], + [-0.55, -0.38, -0.48, -0.58], + [-1.45, -0.58, -0.38, -0.17], + ] + ) + y_true = np.array([0, 1, 2, 1, 2]) + labels = np.array([0, 1, 2, 3]) + dummy_losses = np.array( + [ + 1 - pred_decision[0][0] + pred_decision[0][1], + 1 - pred_decision[1][1] + pred_decision[1][2], + 1 - pred_decision[2][2] + pred_decision[2][3], + 1 - pred_decision[3][1] + pred_decision[3][2], + 1 - pred_decision[4][2] + pred_decision[4][3], + ] + ) + np.clip(dummy_losses, 0, None, out=dummy_losses) + dummy_hinge_loss = np.mean(dummy_losses) + assert hinge_loss(y_true, pred_decision, labels=labels) == dummy_hinge_loss + + +def test_hinge_loss_multiclass_missing_labels_only_two_unq_in_y_true(): + # non-regression test for: + # https://github.com/scikit-learn/scikit-learn/issues/17630 + # check that we can compute the hinge loss when providing an array + # with labels allowing to not have all labels in y_true + pred_decision = np.array( + [ + [+0.36, -0.17, -0.58], + [-0.15, -0.58, -0.48], + [-1.45, -0.58, -0.38], + [-0.55, -0.78, -0.42], + [-1.45, -0.58, -0.38], + ] + ) + y_true = np.array([0, 2, 2, 0, 2]) + labels = np.array([0, 1, 2]) + dummy_losses = np.array( + [ + 1 - pred_decision[0][0] + pred_decision[0][1], + 1 - pred_decision[1][2] + pred_decision[1][0], + 1 - pred_decision[2][2] + pred_decision[2][1], + 1 - pred_decision[3][0] + pred_decision[3][2], + 1 - pred_decision[4][2] + pred_decision[4][1], + ] + ) + np.clip(dummy_losses, 0, None, out=dummy_losses) + dummy_hinge_loss = np.mean(dummy_losses) + assert_almost_equal( + hinge_loss(y_true, pred_decision, labels=labels), dummy_hinge_loss + ) + + +def test_hinge_loss_multiclass_invariance_lists(): + # Currently, invariance of string and integer labels cannot be tested + # in common invariance tests because invariance tests for multiclass + # decision functions is not implemented yet. + y_true = ["blue", "green", "red", "green", "white", "red"] + pred_decision = [ + [+0.36, -0.17, -0.58, -0.99], + [-0.55, -0.38, -0.48, -0.58], + [-1.45, -0.58, -0.38, -0.17], + [-0.55, -0.38, -0.48, -0.58], + [-2.36, -0.79, -0.27, +0.24], + [-1.45, -0.58, -0.38, -0.17], + ] + dummy_losses = np.array( + [ + 1 - pred_decision[0][0] + pred_decision[0][1], + 1 - pred_decision[1][1] + pred_decision[1][2], + 1 - pred_decision[2][2] + pred_decision[2][3], + 1 - pred_decision[3][1] + pred_decision[3][2], + 1 - pred_decision[4][3] + pred_decision[4][2], + 1 - pred_decision[5][2] + pred_decision[5][3], + ] + ) + np.clip(dummy_losses, 0, None, out=dummy_losses) + dummy_hinge_loss = np.mean(dummy_losses) + assert hinge_loss(y_true, pred_decision) == dummy_hinge_loss + + +def test_log_loss(): + # binary case with symbolic labels ("no" < "yes") + y_true = ["no", "no", "no", "yes", "yes", "yes"] + y_pred = np.array( + [[0.5, 0.5], [0.1, 0.9], [0.01, 0.99], [0.9, 0.1], [0.75, 0.25], [0.001, 0.999]] + ) + loss = log_loss(y_true, y_pred) + loss_true = -np.mean(bernoulli.logpmf(np.array(y_true) == "yes", y_pred[:, 1])) + assert_almost_equal(loss, loss_true) + + # multiclass case; adapted from http://bit.ly/RJJHWA + y_true = [1, 0, 2] + y_pred = [[0.2, 0.7, 0.1], [0.6, 0.2, 0.2], [0.6, 0.1, 0.3]] + loss = log_loss(y_true, y_pred, normalize=True) + assert_almost_equal(loss, 0.6904911) + + # check that we got all the shapes and axes right + # by doubling the length of y_true and y_pred + y_true *= 2 + y_pred *= 2 + loss = log_loss(y_true, y_pred, normalize=False) + assert_almost_equal(loss, 0.6904911 * 6, decimal=6) + + user_warning_msg = "y_pred values do not sum to one" + # check eps and handling of absolute zero and one probabilities + y_pred = np.asarray(y_pred) > 0.5 + with pytest.warns(FutureWarning): + loss = log_loss(y_true, y_pred, normalize=True, eps=0.1) + with pytest.warns(UserWarning, match=user_warning_msg): + assert_almost_equal(loss, log_loss(y_true, np.clip(y_pred, 0.1, 0.9))) + + # binary case: check correct boundary values for eps = 0 + with pytest.warns(FutureWarning): + assert log_loss([0, 1], [0, 1], eps=0) == 0 + with pytest.warns(FutureWarning): + assert log_loss([0, 1], [0, 0], eps=0) == np.inf + with pytest.warns(FutureWarning): + assert log_loss([0, 1], [1, 1], eps=0) == np.inf + + # multiclass case: check correct boundary values for eps = 0 + with pytest.warns(FutureWarning): + assert log_loss([0, 1, 2], [[1, 0, 0], [0, 1, 0], [0, 0, 1]], eps=0) == 0 + with pytest.warns(FutureWarning): + assert ( + log_loss([0, 1, 2], [[0, 0.5, 0.5], [0, 1, 0], [0, 0, 1]], eps=0) == np.inf + ) + + # raise error if number of classes are not equal. + y_true = [1, 0, 2] + y_pred = [[0.2, 0.7], [0.6, 0.5], [0.4, 0.1]] + with pytest.raises(ValueError): + log_loss(y_true, y_pred) + + # case when y_true is a string array object + y_true = ["ham", "spam", "spam", "ham"] + y_pred = [[0.2, 0.7], [0.6, 0.5], [0.4, 0.1], [0.7, 0.2]] + with pytest.warns(UserWarning, match=user_warning_msg): + loss = log_loss(y_true, y_pred) + assert_almost_equal(loss, 1.0383217, decimal=6) + + # test labels option + + y_true = [2, 2] + y_pred = [[0.2, 0.7], [0.6, 0.5]] + y_score = np.array([[0.1, 0.9], [0.1, 0.9]]) + error_str = ( + r"y_true contains only one label \(2\). Please provide " + r"the true labels explicitly through the labels argument." + ) + with pytest.raises(ValueError, match=error_str): + log_loss(y_true, y_pred) + + y_pred = [[0.2, 0.7], [0.6, 0.5], [0.2, 0.3]] + error_str = "Found input variables with inconsistent numbers of samples: [3, 2]" + (ValueError, error_str, log_loss, y_true, y_pred) + + # works when the labels argument is used + + true_log_loss = -np.mean(np.log(y_score[:, 1])) + calculated_log_loss = log_loss(y_true, y_score, labels=[1, 2]) + assert_almost_equal(calculated_log_loss, true_log_loss) + + # ensure labels work when len(np.unique(y_true)) != y_pred.shape[1] + y_true = [1, 2, 2] + y_score2 = [[0.2, 0.7, 0.3], [0.6, 0.5, 0.3], [0.3, 0.9, 0.1]] + with pytest.warns(UserWarning, match=user_warning_msg): + loss = log_loss(y_true, y_score2, labels=[1, 2, 3]) + assert_almost_equal(loss, 1.0630345, decimal=6) + + +def test_log_loss_eps_auto(global_dtype): + """Check the behaviour of `eps="auto"` that changes depending on the input + array dtype. + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/24315 + """ + y_true = np.array([0, 1], dtype=global_dtype) + y_pred = y_true.copy() + + loss = log_loss(y_true, y_pred, eps="auto") + assert np.isfinite(loss) + + +def test_log_loss_eps_auto_float16(): + """Check the behaviour of `eps="auto"` for np.float16""" + y_true = np.array([0, 1], dtype=np.float16) + y_pred = y_true.copy() + + loss = log_loss(y_true, y_pred, eps="auto") + assert np.isfinite(loss) + + +def test_log_loss_pandas_input(): + # case when input is a pandas series and dataframe gh-5715 + y_tr = np.array(["ham", "spam", "spam", "ham"]) + y_pr = np.array([[0.2, 0.7], [0.6, 0.5], [0.4, 0.1], [0.7, 0.2]]) + types = [(MockDataFrame, MockDataFrame)] + try: + from pandas import DataFrame, Series + + types.append((Series, DataFrame)) + except ImportError: + pass + for TrueInputType, PredInputType in types: + # y_pred dataframe, y_true series + y_true, y_pred = TrueInputType(y_tr), PredInputType(y_pr) + with pytest.warns(UserWarning, match="y_pred values do not sum to one"): + loss = log_loss(y_true, y_pred) + assert_almost_equal(loss, 1.0383217, decimal=6) + + +def test_brier_score_loss(): + # Check brier_score_loss function + y_true = np.array([0, 1, 1, 0, 1, 1]) + y_pred = np.array([0.1, 0.8, 0.9, 0.3, 1.0, 0.95]) + true_score = linalg.norm(y_true - y_pred) ** 2 / len(y_true) + + assert_almost_equal(brier_score_loss(y_true, y_true), 0.0) + assert_almost_equal(brier_score_loss(y_true, y_pred), true_score) + assert_almost_equal(brier_score_loss(1.0 + y_true, y_pred), true_score) + assert_almost_equal(brier_score_loss(2 * y_true - 1, y_pred), true_score) + with pytest.raises(ValueError): + brier_score_loss(y_true, y_pred[1:]) + with pytest.raises(ValueError): + brier_score_loss(y_true, y_pred + 1.0) + with pytest.raises(ValueError): + brier_score_loss(y_true, y_pred - 1.0) + + # ensure to raise an error for multiclass y_true + y_true = np.array([0, 1, 2, 0]) + y_pred = np.array([0.8, 0.6, 0.4, 0.2]) + error_message = ( + "Only binary classification is supported. The type of the target is multiclass" + ) + + with pytest.raises(ValueError, match=error_message): + brier_score_loss(y_true, y_pred) + + # calculate correctly when there's only one class in y_true + assert_almost_equal(brier_score_loss([-1], [0.4]), 0.16) + assert_almost_equal(brier_score_loss([0], [0.4]), 0.16) + assert_almost_equal(brier_score_loss([1], [0.4]), 0.36) + assert_almost_equal(brier_score_loss(["foo"], [0.4], pos_label="bar"), 0.16) + assert_almost_equal(brier_score_loss(["foo"], [0.4], pos_label="foo"), 0.36) + + +def test_balanced_accuracy_score_unseen(): + msg = "y_pred contains classes not in y_true" + with pytest.warns(UserWarning, match=msg): + balanced_accuracy_score([0, 0, 0], [0, 0, 1]) + + +@pytest.mark.parametrize( + "y_true,y_pred", + [ + (["a", "b", "a", "b"], ["a", "a", "a", "b"]), + (["a", "b", "c", "b"], ["a", "a", "a", "b"]), + (["a", "a", "a", "b"], ["a", "b", "c", "b"]), + ], +) +def test_balanced_accuracy_score(y_true, y_pred): + macro_recall = recall_score( + y_true, y_pred, average="macro", labels=np.unique(y_true) + ) + with ignore_warnings(): + # Warnings are tested in test_balanced_accuracy_score_unseen + balanced = balanced_accuracy_score(y_true, y_pred) + assert balanced == pytest.approx(macro_recall) + adjusted = balanced_accuracy_score(y_true, y_pred, adjusted=True) + chance = balanced_accuracy_score(y_true, np.full_like(y_true, y_true[0])) + assert adjusted == (balanced - chance) / (1 - chance) + + +@pytest.mark.parametrize( + "metric", + [ + jaccard_score, + f1_score, + partial(fbeta_score, beta=0.5), + precision_recall_fscore_support, + precision_score, + recall_score, + brier_score_loss, + ], +) +@pytest.mark.parametrize( + "classes", [(False, True), (0, 1), (0.0, 1.0), ("zero", "one")] +) +def test_classification_metric_pos_label_types(metric, classes): + """Check that the metric works with different types of `pos_label`. + + We can expect `pos_label` to be a bool, an integer, a float, a string. + No error should be raised for those types. + """ + rng = np.random.RandomState(42) + n_samples, pos_label = 10, classes[-1] + y_true = rng.choice(classes, size=n_samples, replace=True) + if metric is brier_score_loss: + # brier score loss requires probabilities + y_pred = rng.uniform(size=n_samples) + else: + y_pred = y_true.copy() + result = metric(y_true, y_pred, pos_label=pos_label) + assert not np.any(np.isnan(result)) + + +@pytest.mark.parametrize( + "y_true, y_pred, expected_score", + [ + (np.array([0, 1]), np.array([1, 0]), 0.0), + (np.array([0, 1]), np.array([0, 1]), 1.0), + (np.array([0, 1]), np.array([0, 0]), 0.0), + (np.array([0, 0]), np.array([0, 0]), 1.0), + ], +) +def test_f1_for_small_binary_inputs_with_zero_division(y_true, y_pred, expected_score): + """Check the behaviour of `zero_division` for f1-score. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/26965 + """ + assert f1_score(y_true, y_pred, zero_division=1.0) == pytest.approx(expected_score) + + +@pytest.mark.parametrize( + "scoring", + [ + make_scorer(f1_score, zero_division=np.nan), + make_scorer(fbeta_score, beta=2, zero_division=np.nan), + make_scorer(precision_score, zero_division=np.nan), + make_scorer(recall_score, zero_division=np.nan), + ], +) +def test_classification_metric_division_by_zero_nan_validaton(scoring): + """Check that we validate `np.nan` properly for classification metrics. + + With `n_jobs=2` in cross-validation, the `np.nan` used for the singleton will be + different in the sub-process and we should not use the `is` operator but + `math.isnan`. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/27563 + """ + X, y = datasets.make_classification(random_state=0) + classifier = DecisionTreeClassifier(max_depth=3, random_state=0).fit(X, y) + cross_val_score(classifier, X, y, scoring=scoring, n_jobs=2, error_score="raise") diff --git a/venv/lib/python3.10/site-packages/sklearn/metrics/tests/test_common.py b/venv/lib/python3.10/site-packages/sklearn/metrics/tests/test_common.py new file mode 100644 index 0000000000000000000000000000000000000000..8fad63870e4acd72c5dc07de29fedac4ef09ef36 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/metrics/tests/test_common.py @@ -0,0 +1,1839 @@ +from functools import partial +from inspect import signature +from itertools import chain, permutations, product + +import numpy as np +import pytest + +from sklearn._config import config_context +from sklearn.datasets import make_multilabel_classification +from sklearn.metrics import ( + accuracy_score, + average_precision_score, + balanced_accuracy_score, + brier_score_loss, + cohen_kappa_score, + confusion_matrix, + coverage_error, + d2_absolute_error_score, + d2_pinball_score, + d2_tweedie_score, + dcg_score, + det_curve, + explained_variance_score, + f1_score, + fbeta_score, + hamming_loss, + hinge_loss, + jaccard_score, + label_ranking_average_precision_score, + label_ranking_loss, + log_loss, + matthews_corrcoef, + max_error, + mean_absolute_error, + mean_absolute_percentage_error, + mean_gamma_deviance, + mean_pinball_loss, + mean_poisson_deviance, + mean_squared_error, + mean_tweedie_deviance, + median_absolute_error, + multilabel_confusion_matrix, + ndcg_score, + precision_recall_curve, + precision_score, + r2_score, + recall_score, + roc_auc_score, + roc_curve, + top_k_accuracy_score, + zero_one_loss, +) +from sklearn.metrics._base import _average_binary_score +from sklearn.preprocessing import LabelBinarizer +from sklearn.utils import shuffle +from sklearn.utils._array_api import ( + _atol_for_type, + yield_namespace_device_dtype_combinations, +) +from sklearn.utils._testing import ( + _array_api_for_tests, + assert_allclose, + assert_almost_equal, + assert_array_equal, + assert_array_less, + ignore_warnings, +) +from sklearn.utils.fixes import COO_CONTAINERS +from sklearn.utils.multiclass import type_of_target +from sklearn.utils.validation import _num_samples, check_random_state + +# Note toward developers about metric testing +# ------------------------------------------- +# It is often possible to write one general test for several metrics: +# +# - invariance properties, e.g. invariance to sample order +# - common behavior for an argument, e.g. the "normalize" with value True +# will return the mean of the metrics and with value False will return +# the sum of the metrics. +# +# In order to improve the overall metric testing, it is a good idea to write +# first a specific test for the given metric and then add a general test for +# all metrics that have the same behavior. +# +# Two types of datastructures are used in order to implement this system: +# dictionaries of metrics and lists of metrics with common properties. +# +# Dictionaries of metrics +# ------------------------ +# The goal of having those dictionaries is to have an easy way to call a +# particular metric and associate a name to each function: +# +# - REGRESSION_METRICS: all regression metrics. +# - CLASSIFICATION_METRICS: all classification metrics +# which compare a ground truth and the estimated targets as returned by a +# classifier. +# - THRESHOLDED_METRICS: all classification metrics which +# compare a ground truth and a score, e.g. estimated probabilities or +# decision function (format might vary) +# +# Those dictionaries will be used to test systematically some invariance +# properties, e.g. invariance toward several input layout. +# + +REGRESSION_METRICS = { + "max_error": max_error, + "mean_absolute_error": mean_absolute_error, + "mean_squared_error": mean_squared_error, + "mean_pinball_loss": mean_pinball_loss, + "median_absolute_error": median_absolute_error, + "mean_absolute_percentage_error": mean_absolute_percentage_error, + "explained_variance_score": explained_variance_score, + "r2_score": partial(r2_score, multioutput="variance_weighted"), + "mean_normal_deviance": partial(mean_tweedie_deviance, power=0), + "mean_poisson_deviance": mean_poisson_deviance, + "mean_gamma_deviance": mean_gamma_deviance, + "mean_compound_poisson_deviance": partial(mean_tweedie_deviance, power=1.4), + "d2_tweedie_score": partial(d2_tweedie_score, power=1.4), + "d2_pinball_score": d2_pinball_score, + "d2_absolute_error_score": d2_absolute_error_score, +} + +CLASSIFICATION_METRICS = { + "accuracy_score": accuracy_score, + "balanced_accuracy_score": balanced_accuracy_score, + "adjusted_balanced_accuracy_score": partial(balanced_accuracy_score, adjusted=True), + "unnormalized_accuracy_score": partial(accuracy_score, normalize=False), + # `confusion_matrix` returns absolute values and hence behaves unnormalized + # . Naming it with an unnormalized_ prefix is necessary for this module to + # skip sample_weight scaling checks which will fail for unnormalized + # metrics. + "unnormalized_confusion_matrix": confusion_matrix, + "normalized_confusion_matrix": lambda *args, **kwargs: ( + confusion_matrix(*args, **kwargs).astype("float") + / confusion_matrix(*args, **kwargs).sum(axis=1)[:, np.newaxis] + ), + "unnormalized_multilabel_confusion_matrix": multilabel_confusion_matrix, + "unnormalized_multilabel_confusion_matrix_sample": partial( + multilabel_confusion_matrix, samplewise=True + ), + "hamming_loss": hamming_loss, + "zero_one_loss": zero_one_loss, + "unnormalized_zero_one_loss": partial(zero_one_loss, normalize=False), + # These are needed to test averaging + "jaccard_score": jaccard_score, + "precision_score": precision_score, + "recall_score": recall_score, + "f1_score": f1_score, + "f2_score": partial(fbeta_score, beta=2), + "f0.5_score": partial(fbeta_score, beta=0.5), + "matthews_corrcoef_score": matthews_corrcoef, + "weighted_f0.5_score": partial(fbeta_score, average="weighted", beta=0.5), + "weighted_f1_score": partial(f1_score, average="weighted"), + "weighted_f2_score": partial(fbeta_score, average="weighted", beta=2), + "weighted_precision_score": partial(precision_score, average="weighted"), + "weighted_recall_score": partial(recall_score, average="weighted"), + "weighted_jaccard_score": partial(jaccard_score, average="weighted"), + "micro_f0.5_score": partial(fbeta_score, average="micro", beta=0.5), + "micro_f1_score": partial(f1_score, average="micro"), + "micro_f2_score": partial(fbeta_score, average="micro", beta=2), + "micro_precision_score": partial(precision_score, average="micro"), + "micro_recall_score": partial(recall_score, average="micro"), + "micro_jaccard_score": partial(jaccard_score, average="micro"), + "macro_f0.5_score": partial(fbeta_score, average="macro", beta=0.5), + "macro_f1_score": partial(f1_score, average="macro"), + "macro_f2_score": partial(fbeta_score, average="macro", beta=2), + "macro_precision_score": partial(precision_score, average="macro"), + "macro_recall_score": partial(recall_score, average="macro"), + "macro_jaccard_score": partial(jaccard_score, average="macro"), + "samples_f0.5_score": partial(fbeta_score, average="samples", beta=0.5), + "samples_f1_score": partial(f1_score, average="samples"), + "samples_f2_score": partial(fbeta_score, average="samples", beta=2), + "samples_precision_score": partial(precision_score, average="samples"), + "samples_recall_score": partial(recall_score, average="samples"), + "samples_jaccard_score": partial(jaccard_score, average="samples"), + "cohen_kappa_score": cohen_kappa_score, +} + + +def precision_recall_curve_padded_thresholds(*args, **kwargs): + """ + The dimensions of precision-recall pairs and the threshold array as + returned by the precision_recall_curve do not match. See + func:`sklearn.metrics.precision_recall_curve` + + This prevents implicit conversion of return value triple to an higher + dimensional np.array of dtype('float64') (it will be of dtype('object) + instead). This again is needed for assert_array_equal to work correctly. + + As a workaround we pad the threshold array with NaN values to match + the dimension of precision and recall arrays respectively. + """ + precision, recall, thresholds = precision_recall_curve(*args, **kwargs) + + pad_threshholds = len(precision) - len(thresholds) + + return np.array( + [ + precision, + recall, + np.pad( + thresholds.astype(np.float64), + pad_width=(0, pad_threshholds), + mode="constant", + constant_values=[np.nan], + ), + ] + ) + + +CURVE_METRICS = { + "roc_curve": roc_curve, + "precision_recall_curve": precision_recall_curve_padded_thresholds, + "det_curve": det_curve, +} + +THRESHOLDED_METRICS = { + "coverage_error": coverage_error, + "label_ranking_loss": label_ranking_loss, + "log_loss": log_loss, + "unnormalized_log_loss": partial(log_loss, normalize=False), + "hinge_loss": hinge_loss, + "brier_score_loss": brier_score_loss, + "roc_auc_score": roc_auc_score, # default: average="macro" + "weighted_roc_auc": partial(roc_auc_score, average="weighted"), + "samples_roc_auc": partial(roc_auc_score, average="samples"), + "micro_roc_auc": partial(roc_auc_score, average="micro"), + "ovr_roc_auc": partial(roc_auc_score, average="macro", multi_class="ovr"), + "weighted_ovr_roc_auc": partial( + roc_auc_score, average="weighted", multi_class="ovr" + ), + "ovo_roc_auc": partial(roc_auc_score, average="macro", multi_class="ovo"), + "weighted_ovo_roc_auc": partial( + roc_auc_score, average="weighted", multi_class="ovo" + ), + "partial_roc_auc": partial(roc_auc_score, max_fpr=0.5), + "average_precision_score": average_precision_score, # default: average="macro" + "weighted_average_precision_score": partial( + average_precision_score, average="weighted" + ), + "samples_average_precision_score": partial( + average_precision_score, average="samples" + ), + "micro_average_precision_score": partial(average_precision_score, average="micro"), + "label_ranking_average_precision_score": label_ranking_average_precision_score, + "ndcg_score": ndcg_score, + "dcg_score": dcg_score, + "top_k_accuracy_score": top_k_accuracy_score, +} + +ALL_METRICS = dict() +ALL_METRICS.update(THRESHOLDED_METRICS) +ALL_METRICS.update(CLASSIFICATION_METRICS) +ALL_METRICS.update(REGRESSION_METRICS) +ALL_METRICS.update(CURVE_METRICS) + +# Lists of metrics with common properties +# --------------------------------------- +# Lists of metrics with common properties are used to test systematically some +# functionalities and invariance, e.g. SYMMETRIC_METRICS lists all metrics that +# are symmetric with respect to their input argument y_true and y_pred. +# +# When you add a new metric or functionality, check if a general test +# is already written. + +# Those metrics don't support binary inputs +METRIC_UNDEFINED_BINARY = { + "samples_f0.5_score", + "samples_f1_score", + "samples_f2_score", + "samples_precision_score", + "samples_recall_score", + "samples_jaccard_score", + "coverage_error", + "unnormalized_multilabel_confusion_matrix_sample", + "label_ranking_loss", + "label_ranking_average_precision_score", + "dcg_score", + "ndcg_score", +} + +# Those metrics don't support multiclass inputs +METRIC_UNDEFINED_MULTICLASS = { + "brier_score_loss", + "micro_roc_auc", + "samples_roc_auc", + "partial_roc_auc", + "roc_auc_score", + "weighted_roc_auc", + "jaccard_score", + # with default average='binary', multiclass is prohibited + "precision_score", + "recall_score", + "f1_score", + "f2_score", + "f0.5_score", + # curves + "roc_curve", + "precision_recall_curve", + "det_curve", +} + +# Metric undefined with "binary" or "multiclass" input +METRIC_UNDEFINED_BINARY_MULTICLASS = METRIC_UNDEFINED_BINARY.union( + METRIC_UNDEFINED_MULTICLASS +) + +# Metrics with an "average" argument +METRICS_WITH_AVERAGING = { + "precision_score", + "recall_score", + "f1_score", + "f2_score", + "f0.5_score", + "jaccard_score", +} + +# Threshold-based metrics with an "average" argument +THRESHOLDED_METRICS_WITH_AVERAGING = { + "roc_auc_score", + "average_precision_score", + "partial_roc_auc", +} + +# Metrics with a "pos_label" argument +METRICS_WITH_POS_LABEL = { + "roc_curve", + "precision_recall_curve", + "det_curve", + "brier_score_loss", + "precision_score", + "recall_score", + "f1_score", + "f2_score", + "f0.5_score", + "jaccard_score", + "average_precision_score", + "weighted_average_precision_score", + "micro_average_precision_score", + "samples_average_precision_score", +} + +# Metrics with a "labels" argument +# TODO: Handle multi_class metrics that has a labels argument as well as a +# decision function argument. e.g hinge_loss +METRICS_WITH_LABELS = { + "unnormalized_confusion_matrix", + "normalized_confusion_matrix", + "roc_curve", + "precision_recall_curve", + "det_curve", + "precision_score", + "recall_score", + "f1_score", + "f2_score", + "f0.5_score", + "jaccard_score", + "weighted_f0.5_score", + "weighted_f1_score", + "weighted_f2_score", + "weighted_precision_score", + "weighted_recall_score", + "weighted_jaccard_score", + "micro_f0.5_score", + "micro_f1_score", + "micro_f2_score", + "micro_precision_score", + "micro_recall_score", + "micro_jaccard_score", + "macro_f0.5_score", + "macro_f1_score", + "macro_f2_score", + "macro_precision_score", + "macro_recall_score", + "macro_jaccard_score", + "unnormalized_multilabel_confusion_matrix", + "unnormalized_multilabel_confusion_matrix_sample", + "cohen_kappa_score", +} + +# Metrics with a "normalize" option +METRICS_WITH_NORMALIZE_OPTION = { + "accuracy_score", + "top_k_accuracy_score", + "zero_one_loss", +} + +# Threshold-based metrics with "multilabel-indicator" format support +THRESHOLDED_MULTILABEL_METRICS = { + "log_loss", + "unnormalized_log_loss", + "roc_auc_score", + "weighted_roc_auc", + "samples_roc_auc", + "micro_roc_auc", + "partial_roc_auc", + "average_precision_score", + "weighted_average_precision_score", + "samples_average_precision_score", + "micro_average_precision_score", + "coverage_error", + "label_ranking_loss", + "ndcg_score", + "dcg_score", + "label_ranking_average_precision_score", +} + +# Classification metrics with "multilabel-indicator" format +MULTILABELS_METRICS = { + "accuracy_score", + "unnormalized_accuracy_score", + "hamming_loss", + "zero_one_loss", + "unnormalized_zero_one_loss", + "weighted_f0.5_score", + "weighted_f1_score", + "weighted_f2_score", + "weighted_precision_score", + "weighted_recall_score", + "weighted_jaccard_score", + "macro_f0.5_score", + "macro_f1_score", + "macro_f2_score", + "macro_precision_score", + "macro_recall_score", + "macro_jaccard_score", + "micro_f0.5_score", + "micro_f1_score", + "micro_f2_score", + "micro_precision_score", + "micro_recall_score", + "micro_jaccard_score", + "unnormalized_multilabel_confusion_matrix", + "samples_f0.5_score", + "samples_f1_score", + "samples_f2_score", + "samples_precision_score", + "samples_recall_score", + "samples_jaccard_score", +} + +# Regression metrics with "multioutput-continuous" format support +MULTIOUTPUT_METRICS = { + "mean_absolute_error", + "median_absolute_error", + "mean_squared_error", + "r2_score", + "explained_variance_score", + "mean_absolute_percentage_error", + "mean_pinball_loss", + "d2_pinball_score", + "d2_absolute_error_score", +} + +# Symmetric with respect to their input arguments y_true and y_pred +# metric(y_true, y_pred) == metric(y_pred, y_true). +SYMMETRIC_METRICS = { + "accuracy_score", + "unnormalized_accuracy_score", + "hamming_loss", + "zero_one_loss", + "unnormalized_zero_one_loss", + "micro_jaccard_score", + "macro_jaccard_score", + "jaccard_score", + "samples_jaccard_score", + "f1_score", + "micro_f1_score", + "macro_f1_score", + "weighted_recall_score", + # P = R = F = accuracy in multiclass case + "micro_f0.5_score", + "micro_f1_score", + "micro_f2_score", + "micro_precision_score", + "micro_recall_score", + "matthews_corrcoef_score", + "mean_absolute_error", + "mean_squared_error", + "median_absolute_error", + "max_error", + # Pinball loss is only symmetric for alpha=0.5 which is the default. + "mean_pinball_loss", + "cohen_kappa_score", + "mean_normal_deviance", +} + +# Asymmetric with respect to their input arguments y_true and y_pred +# metric(y_true, y_pred) != metric(y_pred, y_true). +NOT_SYMMETRIC_METRICS = { + "balanced_accuracy_score", + "adjusted_balanced_accuracy_score", + "explained_variance_score", + "r2_score", + "unnormalized_confusion_matrix", + "normalized_confusion_matrix", + "roc_curve", + "precision_recall_curve", + "det_curve", + "precision_score", + "recall_score", + "f2_score", + "f0.5_score", + "weighted_f0.5_score", + "weighted_f1_score", + "weighted_f2_score", + "weighted_precision_score", + "weighted_jaccard_score", + "unnormalized_multilabel_confusion_matrix", + "macro_f0.5_score", + "macro_f2_score", + "macro_precision_score", + "macro_recall_score", + "hinge_loss", + "mean_gamma_deviance", + "mean_poisson_deviance", + "mean_compound_poisson_deviance", + "d2_tweedie_score", + "d2_pinball_score", + "d2_absolute_error_score", + "mean_absolute_percentage_error", +} + + +# No Sample weight support +METRICS_WITHOUT_SAMPLE_WEIGHT = { + "median_absolute_error", + "max_error", + "ovo_roc_auc", + "weighted_ovo_roc_auc", +} + +METRICS_REQUIRE_POSITIVE_Y = { + "mean_poisson_deviance", + "mean_gamma_deviance", + "mean_compound_poisson_deviance", + "d2_tweedie_score", +} + + +def _require_positive_targets(y1, y2): + """Make targets strictly positive""" + offset = abs(min(y1.min(), y2.min())) + 1 + y1 += offset + y2 += offset + return y1, y2 + + +def test_symmetry_consistency(): + # We shouldn't forget any metrics + assert ( + SYMMETRIC_METRICS + | NOT_SYMMETRIC_METRICS + | set(THRESHOLDED_METRICS) + | METRIC_UNDEFINED_BINARY_MULTICLASS + ) == set(ALL_METRICS) + + assert (SYMMETRIC_METRICS & NOT_SYMMETRIC_METRICS) == set() + + +@pytest.mark.parametrize("name", sorted(SYMMETRIC_METRICS)) +def test_symmetric_metric(name): + # Test the symmetry of score and loss functions + random_state = check_random_state(0) + y_true = random_state.randint(0, 2, size=(20,)) + y_pred = random_state.randint(0, 2, size=(20,)) + + if name in METRICS_REQUIRE_POSITIVE_Y: + y_true, y_pred = _require_positive_targets(y_true, y_pred) + + y_true_bin = random_state.randint(0, 2, size=(20, 25)) + y_pred_bin = random_state.randint(0, 2, size=(20, 25)) + + metric = ALL_METRICS[name] + if name in METRIC_UNDEFINED_BINARY: + if name in MULTILABELS_METRICS: + assert_allclose( + metric(y_true_bin, y_pred_bin), + metric(y_pred_bin, y_true_bin), + err_msg="%s is not symmetric" % name, + ) + else: + assert False, "This case is currently unhandled" + else: + assert_allclose( + metric(y_true, y_pred), + metric(y_pred, y_true), + err_msg="%s is not symmetric" % name, + ) + + +@pytest.mark.parametrize("name", sorted(NOT_SYMMETRIC_METRICS)) +def test_not_symmetric_metric(name): + # Test the symmetry of score and loss functions + random_state = check_random_state(0) + y_true = random_state.randint(0, 2, size=(20,)) + y_pred = random_state.randint(0, 2, size=(20,)) + + if name in METRICS_REQUIRE_POSITIVE_Y: + y_true, y_pred = _require_positive_targets(y_true, y_pred) + + metric = ALL_METRICS[name] + + # use context manager to supply custom error message + with pytest.raises(AssertionError): + assert_array_equal(metric(y_true, y_pred), metric(y_pred, y_true)) + raise ValueError("%s seems to be symmetric" % name) + + +@pytest.mark.parametrize( + "name", sorted(set(ALL_METRICS) - METRIC_UNDEFINED_BINARY_MULTICLASS) +) +def test_sample_order_invariance(name): + random_state = check_random_state(0) + y_true = random_state.randint(0, 2, size=(20,)) + y_pred = random_state.randint(0, 2, size=(20,)) + + if name in METRICS_REQUIRE_POSITIVE_Y: + y_true, y_pred = _require_positive_targets(y_true, y_pred) + + y_true_shuffle, y_pred_shuffle = shuffle(y_true, y_pred, random_state=0) + + with ignore_warnings(): + metric = ALL_METRICS[name] + assert_allclose( + metric(y_true, y_pred), + metric(y_true_shuffle, y_pred_shuffle), + err_msg="%s is not sample order invariant" % name, + ) + + +@ignore_warnings +def test_sample_order_invariance_multilabel_and_multioutput(): + random_state = check_random_state(0) + + # Generate some data + y_true = random_state.randint(0, 2, size=(20, 25)) + y_pred = random_state.randint(0, 2, size=(20, 25)) + y_score = random_state.normal(size=y_true.shape) + + y_true_shuffle, y_pred_shuffle, y_score_shuffle = shuffle( + y_true, y_pred, y_score, random_state=0 + ) + + for name in MULTILABELS_METRICS: + metric = ALL_METRICS[name] + assert_allclose( + metric(y_true, y_pred), + metric(y_true_shuffle, y_pred_shuffle), + err_msg="%s is not sample order invariant" % name, + ) + + for name in THRESHOLDED_MULTILABEL_METRICS: + metric = ALL_METRICS[name] + assert_allclose( + metric(y_true, y_score), + metric(y_true_shuffle, y_score_shuffle), + err_msg="%s is not sample order invariant" % name, + ) + + for name in MULTIOUTPUT_METRICS: + metric = ALL_METRICS[name] + assert_allclose( + metric(y_true, y_score), + metric(y_true_shuffle, y_score_shuffle), + err_msg="%s is not sample order invariant" % name, + ) + assert_allclose( + metric(y_true, y_pred), + metric(y_true_shuffle, y_pred_shuffle), + err_msg="%s is not sample order invariant" % name, + ) + + +@pytest.mark.parametrize( + "name", sorted(set(ALL_METRICS) - METRIC_UNDEFINED_BINARY_MULTICLASS) +) +def test_format_invariance_with_1d_vectors(name): + random_state = check_random_state(0) + y1 = random_state.randint(0, 2, size=(20,)) + y2 = random_state.randint(0, 2, size=(20,)) + + if name in METRICS_REQUIRE_POSITIVE_Y: + y1, y2 = _require_positive_targets(y1, y2) + + y1_list = list(y1) + y2_list = list(y2) + + y1_1d, y2_1d = np.array(y1), np.array(y2) + assert_array_equal(y1_1d.ndim, 1) + assert_array_equal(y2_1d.ndim, 1) + y1_column = np.reshape(y1_1d, (-1, 1)) + y2_column = np.reshape(y2_1d, (-1, 1)) + y1_row = np.reshape(y1_1d, (1, -1)) + y2_row = np.reshape(y2_1d, (1, -1)) + + with ignore_warnings(): + metric = ALL_METRICS[name] + + measure = metric(y1, y2) + + assert_allclose( + metric(y1_list, y2_list), + measure, + err_msg="%s is not representation invariant with list" % name, + ) + + assert_allclose( + metric(y1_1d, y2_1d), + measure, + err_msg="%s is not representation invariant with np-array-1d" % name, + ) + + assert_allclose( + metric(y1_column, y2_column), + measure, + err_msg="%s is not representation invariant with np-array-column" % name, + ) + + # Mix format support + assert_allclose( + metric(y1_1d, y2_list), + measure, + err_msg="%s is not representation invariant with mix np-array-1d and list" + % name, + ) + + assert_allclose( + metric(y1_list, y2_1d), + measure, + err_msg="%s is not representation invariant with mix np-array-1d and list" + % name, + ) + + assert_allclose( + metric(y1_1d, y2_column), + measure, + err_msg=( + "%s is not representation invariant with mix " + "np-array-1d and np-array-column" + ) + % name, + ) + + assert_allclose( + metric(y1_column, y2_1d), + measure, + err_msg=( + "%s is not representation invariant with mix " + "np-array-1d and np-array-column" + ) + % name, + ) + + assert_allclose( + metric(y1_list, y2_column), + measure, + err_msg=( + "%s is not representation invariant with mix list and np-array-column" + ) + % name, + ) + + assert_allclose( + metric(y1_column, y2_list), + measure, + err_msg=( + "%s is not representation invariant with mix list and np-array-column" + ) + % name, + ) + + # These mix representations aren't allowed + with pytest.raises(ValueError): + metric(y1_1d, y2_row) + with pytest.raises(ValueError): + metric(y1_row, y2_1d) + with pytest.raises(ValueError): + metric(y1_list, y2_row) + with pytest.raises(ValueError): + metric(y1_row, y2_list) + with pytest.raises(ValueError): + metric(y1_column, y2_row) + with pytest.raises(ValueError): + metric(y1_row, y2_column) + + # NB: We do not test for y1_row, y2_row as these may be + # interpreted as multilabel or multioutput data. + if name not in ( + MULTIOUTPUT_METRICS | THRESHOLDED_MULTILABEL_METRICS | MULTILABELS_METRICS + ): + with pytest.raises(ValueError): + metric(y1_row, y2_row) + + +@pytest.mark.parametrize( + "name", sorted(set(CLASSIFICATION_METRICS) - METRIC_UNDEFINED_BINARY_MULTICLASS) +) +def test_classification_invariance_string_vs_numbers_labels(name): + # Ensure that classification metrics with string labels are invariant + random_state = check_random_state(0) + y1 = random_state.randint(0, 2, size=(20,)) + y2 = random_state.randint(0, 2, size=(20,)) + + y1_str = np.array(["eggs", "spam"])[y1] + y2_str = np.array(["eggs", "spam"])[y2] + + pos_label_str = "spam" + labels_str = ["eggs", "spam"] + + with ignore_warnings(): + metric = CLASSIFICATION_METRICS[name] + measure_with_number = metric(y1, y2) + + # Ugly, but handle case with a pos_label and label + metric_str = metric + if name in METRICS_WITH_POS_LABEL: + metric_str = partial(metric_str, pos_label=pos_label_str) + + measure_with_str = metric_str(y1_str, y2_str) + + assert_array_equal( + measure_with_number, + measure_with_str, + err_msg="{0} failed string vs number invariance test".format(name), + ) + + measure_with_strobj = metric_str(y1_str.astype("O"), y2_str.astype("O")) + assert_array_equal( + measure_with_number, + measure_with_strobj, + err_msg="{0} failed string object vs number invariance test".format(name), + ) + + if name in METRICS_WITH_LABELS: + metric_str = partial(metric_str, labels=labels_str) + measure_with_str = metric_str(y1_str, y2_str) + assert_array_equal( + measure_with_number, + measure_with_str, + err_msg="{0} failed string vs number invariance test".format(name), + ) + + measure_with_strobj = metric_str(y1_str.astype("O"), y2_str.astype("O")) + assert_array_equal( + measure_with_number, + measure_with_strobj, + err_msg="{0} failed string vs number invariance test".format(name), + ) + + +@pytest.mark.parametrize("name", THRESHOLDED_METRICS) +def test_thresholded_invariance_string_vs_numbers_labels(name): + # Ensure that thresholded metrics with string labels are invariant + random_state = check_random_state(0) + y1 = random_state.randint(0, 2, size=(20,)) + y2 = random_state.randint(0, 2, size=(20,)) + + y1_str = np.array(["eggs", "spam"])[y1] + + pos_label_str = "spam" + + with ignore_warnings(): + metric = THRESHOLDED_METRICS[name] + if name not in METRIC_UNDEFINED_BINARY: + # Ugly, but handle case with a pos_label and label + metric_str = metric + if name in METRICS_WITH_POS_LABEL: + metric_str = partial(metric_str, pos_label=pos_label_str) + + measure_with_number = metric(y1, y2) + measure_with_str = metric_str(y1_str, y2) + assert_array_equal( + measure_with_number, + measure_with_str, + err_msg="{0} failed string vs number invariance test".format(name), + ) + + measure_with_strobj = metric_str(y1_str.astype("O"), y2) + assert_array_equal( + measure_with_number, + measure_with_strobj, + err_msg="{0} failed string object vs number invariance test".format( + name + ), + ) + else: + # TODO those metrics doesn't support string label yet + with pytest.raises(ValueError): + metric(y1_str, y2) + with pytest.raises(ValueError): + metric(y1_str.astype("O"), y2) + + +invalids_nan_inf = [ + ([0, 1], [np.inf, np.inf]), + ([0, 1], [np.nan, np.nan]), + ([0, 1], [np.nan, np.inf]), + ([0, 1], [np.inf, 1]), + ([0, 1], [np.nan, 1]), +] + + +@pytest.mark.parametrize( + "metric", chain(THRESHOLDED_METRICS.values(), REGRESSION_METRICS.values()) +) +@pytest.mark.parametrize("y_true, y_score", invalids_nan_inf) +def test_regression_thresholded_inf_nan_input(metric, y_true, y_score): + # Reshape since coverage_error only accepts 2D arrays. + if metric == coverage_error: + y_true = [y_true] + y_score = [y_score] + with pytest.raises(ValueError, match=r"contains (NaN|infinity)"): + metric(y_true, y_score) + + +@pytest.mark.parametrize("metric", CLASSIFICATION_METRICS.values()) +@pytest.mark.parametrize( + "y_true, y_score", + invalids_nan_inf + + # Add an additional case for classification only + # non-regression test for: + # https://github.com/scikit-learn/scikit-learn/issues/6809 + [ + ([np.nan, 1, 2], [1, 2, 3]), + ([np.inf, 1, 2], [1, 2, 3]), + ], # type: ignore +) +def test_classification_inf_nan_input(metric, y_true, y_score): + """check that classification metrics raise a message mentioning the + occurrence of non-finite values in the target vectors.""" + if not np.isfinite(y_true).all(): + input_name = "y_true" + if np.isnan(y_true).any(): + unexpected_value = "NaN" + else: + unexpected_value = "infinity or a value too large" + else: + input_name = "y_pred" + if np.isnan(y_score).any(): + unexpected_value = "NaN" + else: + unexpected_value = "infinity or a value too large" + + err_msg = f"Input {input_name} contains {unexpected_value}" + + with pytest.raises(ValueError, match=err_msg): + metric(y_true, y_score) + + +@pytest.mark.parametrize("metric", CLASSIFICATION_METRICS.values()) +def test_classification_binary_continuous_input(metric): + """check that classification metrics raise a message of mixed type data + with continuous/binary target vectors.""" + y_true, y_score = ["a", "b", "a"], [0.1, 0.2, 0.3] + err_msg = ( + "Classification metrics can't handle a mix of binary and continuous targets" + ) + with pytest.raises(ValueError, match=err_msg): + metric(y_true, y_score) + + +@ignore_warnings +def check_single_sample(name): + # Non-regression test: scores should work with a single sample. + # This is important for leave-one-out cross validation. + # Score functions tested are those that formerly called np.squeeze, + # which turns an array of size 1 into a 0-d array (!). + metric = ALL_METRICS[name] + + # assert that no exception is thrown + if name in METRICS_REQUIRE_POSITIVE_Y: + values = [1, 2] + else: + values = [0, 1] + for i, j in product(values, repeat=2): + metric([i], [j]) + + +@ignore_warnings +def check_single_sample_multioutput(name): + metric = ALL_METRICS[name] + for i, j, k, l in product([0, 1], repeat=4): + metric(np.array([[i, j]]), np.array([[k, l]])) + + +@pytest.mark.parametrize( + "name", + sorted( + set(ALL_METRICS) + # Those metrics are not always defined with one sample + # or in multiclass classification + - METRIC_UNDEFINED_BINARY_MULTICLASS + - set(THRESHOLDED_METRICS) + ), +) +def test_single_sample(name): + check_single_sample(name) + + +@pytest.mark.parametrize("name", sorted(MULTIOUTPUT_METRICS | MULTILABELS_METRICS)) +def test_single_sample_multioutput(name): + check_single_sample_multioutput(name) + + +@pytest.mark.parametrize("name", sorted(MULTIOUTPUT_METRICS)) +def test_multioutput_number_of_output_differ(name): + y_true = np.array([[1, 0, 0, 1], [0, 1, 1, 1], [1, 1, 0, 1]]) + y_pred = np.array([[0, 0], [1, 0], [0, 0]]) + + metric = ALL_METRICS[name] + with pytest.raises(ValueError): + metric(y_true, y_pred) + + +@pytest.mark.parametrize("name", sorted(MULTIOUTPUT_METRICS)) +def test_multioutput_regression_invariance_to_dimension_shuffling(name): + # test invariance to dimension shuffling + random_state = check_random_state(0) + y_true = random_state.uniform(0, 2, size=(20, 5)) + y_pred = random_state.uniform(0, 2, size=(20, 5)) + + metric = ALL_METRICS[name] + error = metric(y_true, y_pred) + + for _ in range(3): + perm = random_state.permutation(y_true.shape[1]) + assert_allclose( + metric(y_true[:, perm], y_pred[:, perm]), + error, + err_msg="%s is not dimension shuffling invariant" % (name), + ) + + +@ignore_warnings +@pytest.mark.parametrize("coo_container", COO_CONTAINERS) +def test_multilabel_representation_invariance(coo_container): + # Generate some data + n_classes = 4 + n_samples = 50 + + _, y1 = make_multilabel_classification( + n_features=1, + n_classes=n_classes, + random_state=0, + n_samples=n_samples, + allow_unlabeled=True, + ) + _, y2 = make_multilabel_classification( + n_features=1, + n_classes=n_classes, + random_state=1, + n_samples=n_samples, + allow_unlabeled=True, + ) + + # To make sure at least one empty label is present + y1 = np.vstack([y1, [[0] * n_classes]]) + y2 = np.vstack([y2, [[0] * n_classes]]) + + y1_sparse_indicator = coo_container(y1) + y2_sparse_indicator = coo_container(y2) + + y1_list_array_indicator = list(y1) + y2_list_array_indicator = list(y2) + + y1_list_list_indicator = [list(a) for a in y1_list_array_indicator] + y2_list_list_indicator = [list(a) for a in y2_list_array_indicator] + + for name in MULTILABELS_METRICS: + metric = ALL_METRICS[name] + + # XXX cruel hack to work with partial functions + if isinstance(metric, partial): + metric.__module__ = "tmp" + metric.__name__ = name + + measure = metric(y1, y2) + + # Check representation invariance + assert_allclose( + metric(y1_sparse_indicator, y2_sparse_indicator), + measure, + err_msg=( + "%s failed representation invariance between " + "dense and sparse indicator formats." + ) + % name, + ) + assert_almost_equal( + metric(y1_list_list_indicator, y2_list_list_indicator), + measure, + err_msg=( + "%s failed representation invariance " + "between dense array and list of list " + "indicator formats." + ) + % name, + ) + assert_almost_equal( + metric(y1_list_array_indicator, y2_list_array_indicator), + measure, + err_msg=( + "%s failed representation invariance " + "between dense and list of array " + "indicator formats." + ) + % name, + ) + + +@pytest.mark.parametrize("name", sorted(MULTILABELS_METRICS)) +def test_raise_value_error_multilabel_sequences(name): + # make sure the multilabel-sequence format raises ValueError + multilabel_sequences = [ + [[1], [2], [0, 1]], + [(), (2), (0, 1)], + [[]], + [()], + np.array([[], [1, 2]], dtype="object"), + ] + + metric = ALL_METRICS[name] + for seq in multilabel_sequences: + with pytest.raises(ValueError): + metric(seq, seq) + + +@pytest.mark.parametrize("name", sorted(METRICS_WITH_NORMALIZE_OPTION)) +def test_normalize_option_binary_classification(name): + # Test in the binary case + n_classes = 2 + n_samples = 20 + random_state = check_random_state(0) + + y_true = random_state.randint(0, n_classes, size=(n_samples,)) + y_pred = random_state.randint(0, n_classes, size=(n_samples,)) + y_score = random_state.normal(size=y_true.shape) + + metrics = ALL_METRICS[name] + pred = y_score if name in THRESHOLDED_METRICS else y_pred + measure_normalized = metrics(y_true, pred, normalize=True) + measure_not_normalized = metrics(y_true, pred, normalize=False) + + assert_array_less( + -1.0 * measure_normalized, + 0, + err_msg="We failed to test correctly the normalize option", + ) + + assert_allclose( + measure_normalized, + measure_not_normalized / n_samples, + err_msg=f"Failed with {name}", + ) + + +@pytest.mark.parametrize("name", sorted(METRICS_WITH_NORMALIZE_OPTION)) +def test_normalize_option_multiclass_classification(name): + # Test in the multiclass case + n_classes = 4 + n_samples = 20 + random_state = check_random_state(0) + + y_true = random_state.randint(0, n_classes, size=(n_samples,)) + y_pred = random_state.randint(0, n_classes, size=(n_samples,)) + y_score = random_state.uniform(size=(n_samples, n_classes)) + + metrics = ALL_METRICS[name] + pred = y_score if name in THRESHOLDED_METRICS else y_pred + measure_normalized = metrics(y_true, pred, normalize=True) + measure_not_normalized = metrics(y_true, pred, normalize=False) + + assert_array_less( + -1.0 * measure_normalized, + 0, + err_msg="We failed to test correctly the normalize option", + ) + + assert_allclose( + measure_normalized, + measure_not_normalized / n_samples, + err_msg=f"Failed with {name}", + ) + + +@pytest.mark.parametrize( + "name", sorted(METRICS_WITH_NORMALIZE_OPTION.intersection(MULTILABELS_METRICS)) +) +def test_normalize_option_multilabel_classification(name): + # Test in the multilabel case + n_classes = 4 + n_samples = 100 + random_state = check_random_state(0) + + # for both random_state 0 and 1, y_true and y_pred has at least one + # unlabelled entry + _, y_true = make_multilabel_classification( + n_features=1, + n_classes=n_classes, + random_state=0, + allow_unlabeled=True, + n_samples=n_samples, + ) + _, y_pred = make_multilabel_classification( + n_features=1, + n_classes=n_classes, + random_state=1, + allow_unlabeled=True, + n_samples=n_samples, + ) + + y_score = random_state.uniform(size=y_true.shape) + + # To make sure at least one empty label is present + y_true += [0] * n_classes + y_pred += [0] * n_classes + + metrics = ALL_METRICS[name] + pred = y_score if name in THRESHOLDED_METRICS else y_pred + measure_normalized = metrics(y_true, pred, normalize=True) + measure_not_normalized = metrics(y_true, pred, normalize=False) + + assert_array_less( + -1.0 * measure_normalized, + 0, + err_msg="We failed to test correctly the normalize option", + ) + + assert_allclose( + measure_normalized, + measure_not_normalized / n_samples, + err_msg=f"Failed with {name}", + ) + + +@ignore_warnings +def _check_averaging( + metric, y_true, y_pred, y_true_binarize, y_pred_binarize, is_multilabel +): + n_samples, n_classes = y_true_binarize.shape + + # No averaging + label_measure = metric(y_true, y_pred, average=None) + assert_allclose( + label_measure, + [ + metric(y_true_binarize[:, i], y_pred_binarize[:, i]) + for i in range(n_classes) + ], + ) + + # Micro measure + micro_measure = metric(y_true, y_pred, average="micro") + assert_allclose( + micro_measure, metric(y_true_binarize.ravel(), y_pred_binarize.ravel()) + ) + + # Macro measure + macro_measure = metric(y_true, y_pred, average="macro") + assert_allclose(macro_measure, np.mean(label_measure)) + + # Weighted measure + weights = np.sum(y_true_binarize, axis=0, dtype=int) + + if np.sum(weights) != 0: + weighted_measure = metric(y_true, y_pred, average="weighted") + assert_allclose(weighted_measure, np.average(label_measure, weights=weights)) + else: + weighted_measure = metric(y_true, y_pred, average="weighted") + assert_allclose(weighted_measure, 0) + + # Sample measure + if is_multilabel: + sample_measure = metric(y_true, y_pred, average="samples") + assert_allclose( + sample_measure, + np.mean( + [ + metric(y_true_binarize[i], y_pred_binarize[i]) + for i in range(n_samples) + ] + ), + ) + + with pytest.raises(ValueError): + metric(y_true, y_pred, average="unknown") + with pytest.raises(ValueError): + metric(y_true, y_pred, average="garbage") + + +def check_averaging(name, y_true, y_true_binarize, y_pred, y_pred_binarize, y_score): + is_multilabel = type_of_target(y_true).startswith("multilabel") + + metric = ALL_METRICS[name] + + if name in METRICS_WITH_AVERAGING: + _check_averaging( + metric, y_true, y_pred, y_true_binarize, y_pred_binarize, is_multilabel + ) + elif name in THRESHOLDED_METRICS_WITH_AVERAGING: + _check_averaging( + metric, y_true, y_score, y_true_binarize, y_score, is_multilabel + ) + else: + raise ValueError("Metric is not recorded as having an average option") + + +@pytest.mark.parametrize("name", sorted(METRICS_WITH_AVERAGING)) +def test_averaging_multiclass(name): + n_samples, n_classes = 50, 3 + random_state = check_random_state(0) + y_true = random_state.randint(0, n_classes, size=(n_samples,)) + y_pred = random_state.randint(0, n_classes, size=(n_samples,)) + y_score = random_state.uniform(size=(n_samples, n_classes)) + + lb = LabelBinarizer().fit(y_true) + y_true_binarize = lb.transform(y_true) + y_pred_binarize = lb.transform(y_pred) + + check_averaging(name, y_true, y_true_binarize, y_pred, y_pred_binarize, y_score) + + +@pytest.mark.parametrize( + "name", sorted(METRICS_WITH_AVERAGING | THRESHOLDED_METRICS_WITH_AVERAGING) +) +def test_averaging_multilabel(name): + n_samples, n_classes = 40, 5 + _, y = make_multilabel_classification( + n_features=1, + n_classes=n_classes, + random_state=5, + n_samples=n_samples, + allow_unlabeled=False, + ) + y_true = y[:20] + y_pred = y[20:] + y_score = check_random_state(0).normal(size=(20, n_classes)) + y_true_binarize = y_true + y_pred_binarize = y_pred + + check_averaging(name, y_true, y_true_binarize, y_pred, y_pred_binarize, y_score) + + +@pytest.mark.parametrize("name", sorted(METRICS_WITH_AVERAGING)) +def test_averaging_multilabel_all_zeroes(name): + y_true = np.zeros((20, 3)) + y_pred = np.zeros((20, 3)) + y_score = np.zeros((20, 3)) + y_true_binarize = y_true + y_pred_binarize = y_pred + + check_averaging(name, y_true, y_true_binarize, y_pred, y_pred_binarize, y_score) + + +def test_averaging_binary_multilabel_all_zeroes(): + y_true = np.zeros((20, 3)) + y_pred = np.zeros((20, 3)) + y_true_binarize = y_true + y_pred_binarize = y_pred + # Test _average_binary_score for weight.sum() == 0 + binary_metric = lambda y_true, y_score, average="macro": _average_binary_score( + precision_score, y_true, y_score, average + ) + _check_averaging( + binary_metric, + y_true, + y_pred, + y_true_binarize, + y_pred_binarize, + is_multilabel=True, + ) + + +@pytest.mark.parametrize("name", sorted(METRICS_WITH_AVERAGING)) +def test_averaging_multilabel_all_ones(name): + y_true = np.ones((20, 3)) + y_pred = np.ones((20, 3)) + y_score = np.ones((20, 3)) + y_true_binarize = y_true + y_pred_binarize = y_pred + + check_averaging(name, y_true, y_true_binarize, y_pred, y_pred_binarize, y_score) + + +@ignore_warnings +def check_sample_weight_invariance(name, metric, y1, y2): + rng = np.random.RandomState(0) + sample_weight = rng.randint(1, 10, size=len(y1)) + + # top_k_accuracy_score always lead to a perfect score for k > 1 in the + # binary case + metric = partial(metric, k=1) if name == "top_k_accuracy_score" else metric + + # check that unit weights gives the same score as no weight + unweighted_score = metric(y1, y2, sample_weight=None) + + assert_allclose( + unweighted_score, + metric(y1, y2, sample_weight=np.ones(shape=len(y1))), + err_msg="For %s sample_weight=None is not equivalent to sample_weight=ones" + % name, + ) + + # check that the weighted and unweighted scores are unequal + weighted_score = metric(y1, y2, sample_weight=sample_weight) + + # use context manager to supply custom error message + with pytest.raises(AssertionError): + assert_allclose(unweighted_score, weighted_score) + raise ValueError( + "Unweighted and weighted scores are unexpectedly " + "almost equal (%s) and (%s) " + "for %s" % (unweighted_score, weighted_score, name) + ) + + # check that sample_weight can be a list + weighted_score_list = metric(y1, y2, sample_weight=sample_weight.tolist()) + assert_allclose( + weighted_score, + weighted_score_list, + err_msg=( + "Weighted scores for array and list " + "sample_weight input are not equal (%s != %s) for %s" + ) + % (weighted_score, weighted_score_list, name), + ) + + # check that integer weights is the same as repeated samples + repeat_weighted_score = metric( + np.repeat(y1, sample_weight, axis=0), + np.repeat(y2, sample_weight, axis=0), + sample_weight=None, + ) + assert_allclose( + weighted_score, + repeat_weighted_score, + err_msg="Weighting %s is not equal to repeating samples" % name, + ) + + # check that ignoring a fraction of the samples is equivalent to setting + # the corresponding weights to zero + sample_weight_subset = sample_weight[1::2] + sample_weight_zeroed = np.copy(sample_weight) + sample_weight_zeroed[::2] = 0 + y1_subset = y1[1::2] + y2_subset = y2[1::2] + weighted_score_subset = metric( + y1_subset, y2_subset, sample_weight=sample_weight_subset + ) + weighted_score_zeroed = metric(y1, y2, sample_weight=sample_weight_zeroed) + assert_allclose( + weighted_score_subset, + weighted_score_zeroed, + err_msg=( + "Zeroing weights does not give the same result as " + "removing the corresponding samples (%s != %s) for %s" + ) + % (weighted_score_zeroed, weighted_score_subset, name), + ) + + if not name.startswith("unnormalized"): + # check that the score is invariant under scaling of the weights by a + # common factor + for scaling in [2, 0.3]: + assert_allclose( + weighted_score, + metric(y1, y2, sample_weight=sample_weight * scaling), + err_msg="%s sample_weight is not invariant under scaling" % name, + ) + + # Check that if number of samples in y_true and sample_weight are not + # equal, meaningful error is raised. + error_message = ( + r"Found input variables with inconsistent numbers of " + r"samples: \[{}, {}, {}\]".format( + _num_samples(y1), _num_samples(y2), _num_samples(sample_weight) * 2 + ) + ) + with pytest.raises(ValueError, match=error_message): + metric(y1, y2, sample_weight=np.hstack([sample_weight, sample_weight])) + + +@pytest.mark.parametrize( + "name", + sorted( + set(ALL_METRICS).intersection(set(REGRESSION_METRICS)) + - METRICS_WITHOUT_SAMPLE_WEIGHT + ), +) +def test_regression_sample_weight_invariance(name): + n_samples = 50 + random_state = check_random_state(0) + # regression + y_true = random_state.random_sample(size=(n_samples,)) + y_pred = random_state.random_sample(size=(n_samples,)) + metric = ALL_METRICS[name] + check_sample_weight_invariance(name, metric, y_true, y_pred) + + +@pytest.mark.parametrize( + "name", + sorted( + set(ALL_METRICS) + - set(REGRESSION_METRICS) + - METRICS_WITHOUT_SAMPLE_WEIGHT + - METRIC_UNDEFINED_BINARY + ), +) +def test_binary_sample_weight_invariance(name): + # binary + n_samples = 50 + random_state = check_random_state(0) + y_true = random_state.randint(0, 2, size=(n_samples,)) + y_pred = random_state.randint(0, 2, size=(n_samples,)) + y_score = random_state.random_sample(size=(n_samples,)) + metric = ALL_METRICS[name] + if name in THRESHOLDED_METRICS: + check_sample_weight_invariance(name, metric, y_true, y_score) + else: + check_sample_weight_invariance(name, metric, y_true, y_pred) + + +@pytest.mark.parametrize( + "name", + sorted( + set(ALL_METRICS) + - set(REGRESSION_METRICS) + - METRICS_WITHOUT_SAMPLE_WEIGHT + - METRIC_UNDEFINED_BINARY_MULTICLASS + ), +) +def test_multiclass_sample_weight_invariance(name): + # multiclass + n_samples = 50 + random_state = check_random_state(0) + y_true = random_state.randint(0, 5, size=(n_samples,)) + y_pred = random_state.randint(0, 5, size=(n_samples,)) + y_score = random_state.random_sample(size=(n_samples, 5)) + metric = ALL_METRICS[name] + if name in THRESHOLDED_METRICS: + # softmax + temp = np.exp(-y_score) + y_score_norm = temp / temp.sum(axis=-1).reshape(-1, 1) + check_sample_weight_invariance(name, metric, y_true, y_score_norm) + else: + check_sample_weight_invariance(name, metric, y_true, y_pred) + + +@pytest.mark.parametrize( + "name", + sorted( + (MULTILABELS_METRICS | THRESHOLDED_MULTILABEL_METRICS | MULTIOUTPUT_METRICS) + - METRICS_WITHOUT_SAMPLE_WEIGHT + ), +) +def test_multilabel_sample_weight_invariance(name): + # multilabel indicator + random_state = check_random_state(0) + _, ya = make_multilabel_classification( + n_features=1, n_classes=10, random_state=0, n_samples=50, allow_unlabeled=False + ) + _, yb = make_multilabel_classification( + n_features=1, n_classes=10, random_state=1, n_samples=50, allow_unlabeled=False + ) + y_true = np.vstack([ya, yb]) + y_pred = np.vstack([ya, ya]) + y_score = random_state.randint(1, 4, size=y_true.shape) + + metric = ALL_METRICS[name] + if name in THRESHOLDED_METRICS: + check_sample_weight_invariance(name, metric, y_true, y_score) + else: + check_sample_weight_invariance(name, metric, y_true, y_pred) + + +@ignore_warnings +def test_no_averaging_labels(): + # test labels argument when not using averaging + # in multi-class and multi-label cases + y_true_multilabel = np.array([[1, 1, 0, 0], [1, 1, 0, 0]]) + y_pred_multilabel = np.array([[0, 0, 1, 1], [0, 1, 1, 0]]) + y_true_multiclass = np.array([0, 1, 2]) + y_pred_multiclass = np.array([0, 2, 3]) + labels = np.array([3, 0, 1, 2]) + _, inverse_labels = np.unique(labels, return_inverse=True) + + for name in METRICS_WITH_AVERAGING: + for y_true, y_pred in [ + [y_true_multiclass, y_pred_multiclass], + [y_true_multilabel, y_pred_multilabel], + ]: + if name not in MULTILABELS_METRICS and y_pred.ndim > 1: + continue + + metric = ALL_METRICS[name] + + score_labels = metric(y_true, y_pred, labels=labels, average=None) + score = metric(y_true, y_pred, average=None) + assert_array_equal(score_labels, score[inverse_labels]) + + +@pytest.mark.parametrize( + "name", sorted(MULTILABELS_METRICS - {"unnormalized_multilabel_confusion_matrix"}) +) +def test_multilabel_label_permutations_invariance(name): + random_state = check_random_state(0) + n_samples, n_classes = 20, 4 + + y_true = random_state.randint(0, 2, size=(n_samples, n_classes)) + y_score = random_state.randint(0, 2, size=(n_samples, n_classes)) + + metric = ALL_METRICS[name] + score = metric(y_true, y_score) + + for perm in permutations(range(n_classes), n_classes): + y_score_perm = y_score[:, perm] + y_true_perm = y_true[:, perm] + + current_score = metric(y_true_perm, y_score_perm) + assert_almost_equal(score, current_score) + + +@pytest.mark.parametrize( + "name", sorted(THRESHOLDED_MULTILABEL_METRICS | MULTIOUTPUT_METRICS) +) +def test_thresholded_multilabel_multioutput_permutations_invariance(name): + random_state = check_random_state(0) + n_samples, n_classes = 20, 4 + y_true = random_state.randint(0, 2, size=(n_samples, n_classes)) + y_score = random_state.normal(size=y_true.shape) + + # Makes sure all samples have at least one label. This works around errors + # when running metrics where average="sample" + y_true[y_true.sum(1) == 4, 0] = 0 + y_true[y_true.sum(1) == 0, 0] = 1 + + metric = ALL_METRICS[name] + score = metric(y_true, y_score) + + for perm in permutations(range(n_classes), n_classes): + y_score_perm = y_score[:, perm] + y_true_perm = y_true[:, perm] + + current_score = metric(y_true_perm, y_score_perm) + if metric == mean_absolute_percentage_error: + assert np.isfinite(current_score) + assert current_score > 1e6 + # Here we are not comparing the values in case of MAPE because + # whenever y_true value is exactly zero, the MAPE value doesn't + # signify anything. Thus, in this case we are just expecting + # very large finite value. + else: + assert_almost_equal(score, current_score) + + +@pytest.mark.parametrize( + "name", sorted(set(THRESHOLDED_METRICS) - METRIC_UNDEFINED_BINARY_MULTICLASS) +) +def test_thresholded_metric_permutation_invariance(name): + n_samples, n_classes = 100, 3 + random_state = check_random_state(0) + + y_score = random_state.rand(n_samples, n_classes) + temp = np.exp(-y_score) + y_score = temp / temp.sum(axis=-1).reshape(-1, 1) + y_true = random_state.randint(0, n_classes, size=n_samples) + + metric = ALL_METRICS[name] + score = metric(y_true, y_score) + for perm in permutations(range(n_classes), n_classes): + inverse_perm = np.zeros(n_classes, dtype=int) + inverse_perm[list(perm)] = np.arange(n_classes) + y_score_perm = y_score[:, inverse_perm] + y_true_perm = np.take(perm, y_true) + + current_score = metric(y_true_perm, y_score_perm) + assert_almost_equal(score, current_score) + + +@pytest.mark.parametrize("metric_name", CLASSIFICATION_METRICS) +def test_metrics_consistent_type_error(metric_name): + # check that an understable message is raised when the type between y_true + # and y_pred mismatch + rng = np.random.RandomState(42) + y1 = np.array(["spam"] * 3 + ["eggs"] * 2, dtype=object) + y2 = rng.randint(0, 2, size=y1.size) + + err_msg = "Labels in y_true and y_pred should be of the same type." + with pytest.raises(TypeError, match=err_msg): + CLASSIFICATION_METRICS[metric_name](y1, y2) + + +@pytest.mark.parametrize( + "metric, y_pred_threshold", + [ + (average_precision_score, True), + (brier_score_loss, True), + (f1_score, False), + (partial(fbeta_score, beta=1), False), + (jaccard_score, False), + (precision_recall_curve, True), + (precision_score, False), + (recall_score, False), + (roc_curve, True), + ], +) +@pytest.mark.parametrize("dtype_y_str", [str, object]) +def test_metrics_pos_label_error_str(metric, y_pred_threshold, dtype_y_str): + # check that the error message if `pos_label` is not specified and the + # targets is made of strings. + rng = np.random.RandomState(42) + y1 = np.array(["spam"] * 3 + ["eggs"] * 2, dtype=dtype_y_str) + y2 = rng.randint(0, 2, size=y1.size) + + if not y_pred_threshold: + y2 = np.array(["spam", "eggs"], dtype=dtype_y_str)[y2] + + err_msg_pos_label_None = ( + "y_true takes value in {'eggs', 'spam'} and pos_label is not " + "specified: either make y_true take value in {0, 1} or {-1, 1} or " + "pass pos_label explicit" + ) + err_msg_pos_label_1 = ( + r"pos_label=1 is not a valid label. It should be one of " r"\['eggs', 'spam'\]" + ) + + pos_label_default = signature(metric).parameters["pos_label"].default + + err_msg = err_msg_pos_label_1 if pos_label_default == 1 else err_msg_pos_label_None + with pytest.raises(ValueError, match=err_msg): + metric(y1, y2) + + +def check_array_api_metric( + metric, array_namespace, device, dtype_name, y_true_np, y_pred_np, sample_weight +): + xp = _array_api_for_tests(array_namespace, device) + + y_true_xp = xp.asarray(y_true_np, device=device) + y_pred_xp = xp.asarray(y_pred_np, device=device) + + metric_np = metric(y_true_np, y_pred_np, sample_weight=sample_weight) + + if sample_weight is not None: + sample_weight = xp.asarray(sample_weight, device=device) + + with config_context(array_api_dispatch=True): + metric_xp = metric(y_true_xp, y_pred_xp, sample_weight=sample_weight) + + assert_allclose( + metric_xp, + metric_np, + atol=_atol_for_type(dtype_name), + ) + + +def check_array_api_binary_classification_metric( + metric, array_namespace, device, dtype_name +): + y_true_np = np.array([0, 0, 1, 1]) + y_pred_np = np.array([0, 1, 0, 1]) + + check_array_api_metric( + metric, + array_namespace, + device, + dtype_name, + y_true_np=y_true_np, + y_pred_np=y_pred_np, + sample_weight=None, + ) + + sample_weight = np.array([0.0, 0.1, 2.0, 1.0], dtype=dtype_name) + + check_array_api_metric( + metric, + array_namespace, + device, + dtype_name, + y_true_np=y_true_np, + y_pred_np=y_pred_np, + sample_weight=sample_weight, + ) + + +def check_array_api_multiclass_classification_metric( + metric, array_namespace, device, dtype_name +): + y_true_np = np.array([0, 1, 2, 3]) + y_pred_np = np.array([0, 1, 0, 2]) + + check_array_api_metric( + metric, + array_namespace, + device, + dtype_name, + y_true_np=y_true_np, + y_pred_np=y_pred_np, + sample_weight=None, + ) + + sample_weight = np.array([0.0, 0.1, 2.0, 1.0], dtype=dtype_name) + + check_array_api_metric( + metric, + array_namespace, + device, + dtype_name, + y_true_np=y_true_np, + y_pred_np=y_pred_np, + sample_weight=sample_weight, + ) + + +array_api_metric_checkers = { + accuracy_score: [ + check_array_api_binary_classification_metric, + check_array_api_multiclass_classification_metric, + ], + zero_one_loss: [ + check_array_api_binary_classification_metric, + check_array_api_multiclass_classification_metric, + ], +} + + +def yield_metric_checker_combinations(metric_checkers=array_api_metric_checkers): + for metric, checkers in metric_checkers.items(): + for checker in checkers: + yield metric, checker + + +@pytest.mark.parametrize( + "array_namespace, device, dtype_name", yield_namespace_device_dtype_combinations() +) +@pytest.mark.parametrize("metric, check_func", yield_metric_checker_combinations()) +def test_array_api_compliance(metric, array_namespace, device, dtype_name, check_func): + check_func(metric, array_namespace, device, dtype_name) diff --git a/venv/lib/python3.10/site-packages/sklearn/metrics/tests/test_dist_metrics.py b/venv/lib/python3.10/site-packages/sklearn/metrics/tests/test_dist_metrics.py new file mode 100644 index 0000000000000000000000000000000000000000..baaf447d3909bf72293389d938e695521e4384a1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/metrics/tests/test_dist_metrics.py @@ -0,0 +1,422 @@ +import copy +import itertools +import pickle + +import numpy as np +import pytest +from scipy.spatial.distance import cdist + +from sklearn.metrics import DistanceMetric +from sklearn.metrics._dist_metrics import ( + BOOL_METRICS, + DistanceMetric32, + DistanceMetric64, +) +from sklearn.utils import check_random_state +from sklearn.utils._testing import assert_allclose, create_memmap_backed_data +from sklearn.utils.fixes import CSR_CONTAINERS, parse_version, sp_version + + +def dist_func(x1, x2, p): + return np.sum((x1 - x2) ** p) ** (1.0 / p) + + +rng = check_random_state(0) +d = 4 +n1 = 20 +n2 = 25 +X64 = rng.random_sample((n1, d)) +Y64 = rng.random_sample((n2, d)) +X32 = X64.astype("float32") +Y32 = Y64.astype("float32") + +[X_mmap, Y_mmap] = create_memmap_backed_data([X64, Y64]) + +# make boolean arrays: ones and zeros +X_bool = (X64 < 0.3).astype(np.float64) # quite sparse +Y_bool = (Y64 < 0.7).astype(np.float64) # not too sparse + +[X_bool_mmap, Y_bool_mmap] = create_memmap_backed_data([X_bool, Y_bool]) + + +V = rng.random_sample((d, d)) +VI = np.dot(V, V.T) + +METRICS_DEFAULT_PARAMS = [ + ("euclidean", {}), + ("cityblock", {}), + ("minkowski", dict(p=(0.5, 1, 1.5, 2, 3))), + ("chebyshev", {}), + ("seuclidean", dict(V=(rng.random_sample(d),))), + ("mahalanobis", dict(VI=(VI,))), + ("hamming", {}), + ("canberra", {}), + ("braycurtis", {}), + ("minkowski", dict(p=(0.5, 1, 1.5, 3), w=(rng.random_sample(d),))), +] + + +@pytest.mark.parametrize( + "metric_param_grid", METRICS_DEFAULT_PARAMS, ids=lambda params: params[0] +) +@pytest.mark.parametrize("X, Y", [(X64, Y64), (X32, Y32), (X_mmap, Y_mmap)]) +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_cdist(metric_param_grid, X, Y, csr_container): + metric, param_grid = metric_param_grid + keys = param_grid.keys() + X_csr, Y_csr = csr_container(X), csr_container(Y) + for vals in itertools.product(*param_grid.values()): + kwargs = dict(zip(keys, vals)) + rtol_dict = {} + if metric == "mahalanobis" and X.dtype == np.float32: + # Computation of mahalanobis differs between + # the scipy and scikit-learn implementation. + # Hence, we increase the relative tolerance. + # TODO: Inspect slight numerical discrepancy + # with scipy + rtol_dict = {"rtol": 1e-6} + + # TODO: Remove when scipy minimum version >= 1.7.0 + # scipy supports 0= 1.7.0 + if metric == "minkowski": + p = kwargs["p"] + if sp_version < parse_version("1.7.0") and p < 1: + pytest.skip("scipy does not support 0= 1.7.0 + # scipy supports 0= 1.7.0 + if metric == "minkowski": + p = kwargs["p"] + if sp_version < parse_version("1.7.0") and p < 1: + pytest.skip("scipy does not support 0= parse_version("1.6.0"), + reason="wminkowski is now minkowski and it has been already tested.", + ), + ), + pytest.param( + pairwise_distances, + "wminkowski", + _wminkowski_kwds, + marks=pytest.mark.skipif( + sp_version >= parse_version("1.6.0"), + reason="wminkowski is now minkowski and it has been already tested.", + ), + ), + (pairwise_kernels, "polynomial", {"degree": 1}), + (pairwise_kernels, callable_rbf_kernel, {"gamma": 0.1}), + ], +) +@pytest.mark.parametrize("dtype", [np.float64, np.float32, int]) +def test_pairwise_parallel(func, metric, kwds, dtype): + rng = np.random.RandomState(0) + X = np.array(5 * rng.random_sample((5, 4)), dtype=dtype) + Y = np.array(5 * rng.random_sample((3, 4)), dtype=dtype) + + S = func(X, metric=metric, n_jobs=1, **kwds) + S2 = func(X, metric=metric, n_jobs=2, **kwds) + assert_allclose(S, S2) + + S = func(X, Y, metric=metric, n_jobs=1, **kwds) + S2 = func(X, Y, metric=metric, n_jobs=2, **kwds) + assert_allclose(S, S2) + + +def test_pairwise_callable_nonstrict_metric(): + # paired_distances should allow callable metric where metric(x, x) != 0 + # Knowing that the callable is a strict metric would allow the diagonal to + # be left uncalculated and set to 0. + assert pairwise_distances([[1.0]], metric=lambda x, y: 5)[0, 0] == 5 + + +# Test with all metrics that should be in PAIRWISE_KERNEL_FUNCTIONS. +@pytest.mark.parametrize( + "metric", + ["rbf", "laplacian", "sigmoid", "polynomial", "linear", "chi2", "additive_chi2"], +) +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_pairwise_kernels(metric, csr_container): + # Test the pairwise_kernels helper function. + + rng = np.random.RandomState(0) + X = rng.random_sample((5, 4)) + Y = rng.random_sample((2, 4)) + function = PAIRWISE_KERNEL_FUNCTIONS[metric] + # Test with Y=None + K1 = pairwise_kernels(X, metric=metric) + K2 = function(X) + assert_allclose(K1, K2) + # Test with Y=Y + K1 = pairwise_kernels(X, Y=Y, metric=metric) + K2 = function(X, Y=Y) + assert_allclose(K1, K2) + # Test with tuples as X and Y + X_tuples = tuple([tuple([v for v in row]) for row in X]) + Y_tuples = tuple([tuple([v for v in row]) for row in Y]) + K2 = pairwise_kernels(X_tuples, Y_tuples, metric=metric) + assert_allclose(K1, K2) + + # Test with sparse X and Y + X_sparse = csr_container(X) + Y_sparse = csr_container(Y) + if metric in ["chi2", "additive_chi2"]: + # these don't support sparse matrices yet + return + K1 = pairwise_kernels(X_sparse, Y=Y_sparse, metric=metric) + assert_allclose(K1, K2) + + +def test_pairwise_kernels_callable(): + # Test the pairwise_kernels helper function + # with a callable function, with given keywords. + rng = np.random.RandomState(0) + X = rng.random_sample((5, 4)) + Y = rng.random_sample((2, 4)) + + metric = callable_rbf_kernel + kwds = {"gamma": 0.1} + K1 = pairwise_kernels(X, Y=Y, metric=metric, **kwds) + K2 = rbf_kernel(X, Y=Y, **kwds) + assert_allclose(K1, K2) + + # callable function, X=Y + K1 = pairwise_kernels(X, Y=X, metric=metric, **kwds) + K2 = rbf_kernel(X, Y=X, **kwds) + assert_allclose(K1, K2) + + +def test_pairwise_kernels_filter_param(): + rng = np.random.RandomState(0) + X = rng.random_sample((5, 4)) + Y = rng.random_sample((2, 4)) + K = rbf_kernel(X, Y, gamma=0.1) + params = {"gamma": 0.1, "blabla": ":)"} + K2 = pairwise_kernels(X, Y, metric="rbf", filter_params=True, **params) + assert_allclose(K, K2) + + with pytest.raises(TypeError): + pairwise_kernels(X, Y, metric="rbf", **params) + + +@pytest.mark.parametrize("metric, func", PAIRED_DISTANCES.items()) +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_paired_distances(metric, func, csr_container): + # Test the pairwise_distance helper function. + rng = np.random.RandomState(0) + # Euclidean distance should be equivalent to calling the function. + X = rng.random_sample((5, 4)) + # Euclidean distance, with Y != X. + Y = rng.random_sample((5, 4)) + + S = paired_distances(X, Y, metric=metric) + S2 = func(X, Y) + assert_allclose(S, S2) + S3 = func(csr_container(X), csr_container(Y)) + assert_allclose(S, S3) + if metric in PAIRWISE_DISTANCE_FUNCTIONS: + # Check the pairwise_distances implementation + # gives the same value + distances = PAIRWISE_DISTANCE_FUNCTIONS[metric](X, Y) + distances = np.diag(distances) + assert_allclose(distances, S) + + +def test_paired_distances_callable(global_dtype): + # Test the paired_distance helper function + # with the callable implementation + rng = np.random.RandomState(0) + # Euclidean distance should be equivalent to calling the function. + X = rng.random_sample((5, 4)).astype(global_dtype, copy=False) + # Euclidean distance, with Y != X. + Y = rng.random_sample((5, 4)).astype(global_dtype, copy=False) + + S = paired_distances(X, Y, metric="manhattan") + S2 = paired_distances(X, Y, metric=lambda x, y: np.abs(x - y).sum(axis=0)) + assert_allclose(S, S2) + + # Test that a value error is raised when the lengths of X and Y should not + # differ + Y = rng.random_sample((3, 4)) + with pytest.raises(ValueError): + paired_distances(X, Y) + + +@pytest.mark.parametrize("dok_container", DOK_CONTAINERS) +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_pairwise_distances_argmin_min(dok_container, csr_container, global_dtype): + # Check pairwise minimum distances computation for any metric + X = np.asarray([[0], [1]], dtype=global_dtype) + Y = np.asarray([[-2], [3]], dtype=global_dtype) + + Xsp = dok_container(X) + Ysp = csr_container(Y, dtype=global_dtype) + + expected_idx = [0, 1] + expected_vals = [2, 2] + expected_vals_sq = [4, 4] + + # euclidean metric + idx, vals = pairwise_distances_argmin_min(X, Y, metric="euclidean") + idx2 = pairwise_distances_argmin(X, Y, metric="euclidean") + assert_allclose(idx, expected_idx) + assert_allclose(idx2, expected_idx) + assert_allclose(vals, expected_vals) + # sparse matrix case + idxsp, valssp = pairwise_distances_argmin_min(Xsp, Ysp, metric="euclidean") + idxsp2 = pairwise_distances_argmin(Xsp, Ysp, metric="euclidean") + assert_allclose(idxsp, expected_idx) + assert_allclose(idxsp2, expected_idx) + assert_allclose(valssp, expected_vals) + # We don't want np.matrix here + assert type(idxsp) == np.ndarray + assert type(valssp) == np.ndarray + + # Squared Euclidean metric + idx, vals = pairwise_distances_argmin_min(X, Y, metric="sqeuclidean") + idx2, vals2 = pairwise_distances_argmin_min( + X, Y, metric="euclidean", metric_kwargs={"squared": True} + ) + idx3 = pairwise_distances_argmin(X, Y, metric="sqeuclidean") + idx4 = pairwise_distances_argmin( + X, Y, metric="euclidean", metric_kwargs={"squared": True} + ) + + assert_allclose(vals, expected_vals_sq) + assert_allclose(vals2, expected_vals_sq) + + assert_allclose(idx, expected_idx) + assert_allclose(idx2, expected_idx) + assert_allclose(idx3, expected_idx) + assert_allclose(idx4, expected_idx) + + # Non-euclidean scikit-learn metric + idx, vals = pairwise_distances_argmin_min(X, Y, metric="manhattan") + idx2 = pairwise_distances_argmin(X, Y, metric="manhattan") + assert_allclose(idx, expected_idx) + assert_allclose(idx2, expected_idx) + assert_allclose(vals, expected_vals) + # sparse matrix case + idxsp, valssp = pairwise_distances_argmin_min(Xsp, Ysp, metric="manhattan") + idxsp2 = pairwise_distances_argmin(Xsp, Ysp, metric="manhattan") + assert_allclose(idxsp, expected_idx) + assert_allclose(idxsp2, expected_idx) + assert_allclose(valssp, expected_vals) + + # Non-euclidean Scipy distance (callable) + idx, vals = pairwise_distances_argmin_min( + X, Y, metric=minkowski, metric_kwargs={"p": 2} + ) + assert_allclose(idx, expected_idx) + assert_allclose(vals, expected_vals) + + # Non-euclidean Scipy distance (string) + idx, vals = pairwise_distances_argmin_min( + X, Y, metric="minkowski", metric_kwargs={"p": 2} + ) + assert_allclose(idx, expected_idx) + assert_allclose(vals, expected_vals) + + # Compare with naive implementation + rng = np.random.RandomState(0) + X = rng.randn(97, 149) + Y = rng.randn(111, 149) + + dist = pairwise_distances(X, Y, metric="manhattan") + dist_orig_ind = dist.argmin(axis=0) + dist_orig_val = dist[dist_orig_ind, range(len(dist_orig_ind))] + + dist_chunked_ind, dist_chunked_val = pairwise_distances_argmin_min( + X, Y, axis=0, metric="manhattan" + ) + assert_allclose(dist_orig_ind, dist_chunked_ind, rtol=1e-7) + assert_allclose(dist_orig_val, dist_chunked_val, rtol=1e-7) + + # Changing the axis and permuting datasets must give the same results + argmin_0, dist_0 = pairwise_distances_argmin_min(X, Y, axis=0) + argmin_1, dist_1 = pairwise_distances_argmin_min(Y, X, axis=1) + + assert_allclose(dist_0, dist_1) + assert_array_equal(argmin_0, argmin_1) + + argmin_0, dist_0 = pairwise_distances_argmin_min(X, X, axis=0) + argmin_1, dist_1 = pairwise_distances_argmin_min(X, X, axis=1) + + assert_allclose(dist_0, dist_1) + assert_array_equal(argmin_0, argmin_1) + + # Changing the axis and permuting datasets must give the same results + argmin_0 = pairwise_distances_argmin(X, Y, axis=0) + argmin_1 = pairwise_distances_argmin(Y, X, axis=1) + + assert_array_equal(argmin_0, argmin_1) + + argmin_0 = pairwise_distances_argmin(X, X, axis=0) + argmin_1 = pairwise_distances_argmin(X, X, axis=1) + + assert_array_equal(argmin_0, argmin_1) + + # F-contiguous arrays must be supported and must return identical results. + argmin_C_contiguous = pairwise_distances_argmin(X, Y) + argmin_F_contiguous = pairwise_distances_argmin( + np.asfortranarray(X), np.asfortranarray(Y) + ) + + assert_array_equal(argmin_C_contiguous, argmin_F_contiguous) + + +def _reduce_func(dist, start): + return dist[:, :100] + + +def test_pairwise_distances_chunked_reduce(global_dtype): + rng = np.random.RandomState(0) + X = rng.random_sample((400, 4)).astype(global_dtype, copy=False) + # Reduced Euclidean distance + S = pairwise_distances(X)[:, :100] + S_chunks = pairwise_distances_chunked( + X, None, reduce_func=_reduce_func, working_memory=2**-16 + ) + assert isinstance(S_chunks, GeneratorType) + S_chunks = list(S_chunks) + assert len(S_chunks) > 1 + assert S_chunks[0].dtype == X.dtype + + # atol is for diagonal where S is explicitly zeroed on the diagonal + assert_allclose(np.vstack(S_chunks), S, atol=1e-7) + + +def test_pairwise_distances_chunked_reduce_none(global_dtype): + # check that the reduce func is allowed to return None + rng = np.random.RandomState(0) + X = rng.random_sample((10, 4)).astype(global_dtype, copy=False) + S_chunks = pairwise_distances_chunked( + X, None, reduce_func=lambda dist, start: None, working_memory=2**-16 + ) + assert isinstance(S_chunks, GeneratorType) + S_chunks = list(S_chunks) + assert len(S_chunks) > 1 + assert all(chunk is None for chunk in S_chunks) + + +@pytest.mark.parametrize( + "good_reduce", + [ + lambda D, start: list(D), + lambda D, start: np.array(D), + lambda D, start: (list(D), list(D)), + ] + + [ + lambda D, start, scipy_csr_type=scipy_csr_type: scipy_csr_type(D) + for scipy_csr_type in CSR_CONTAINERS + ] + + [ + lambda D, start, scipy_dok_type=scipy_dok_type: ( + scipy_dok_type(D), + np.array(D), + list(D), + ) + for scipy_dok_type in DOK_CONTAINERS + ], +) +def test_pairwise_distances_chunked_reduce_valid(good_reduce): + X = np.arange(10).reshape(-1, 1) + S_chunks = pairwise_distances_chunked( + X, None, reduce_func=good_reduce, working_memory=64 + ) + next(S_chunks) + + +@pytest.mark.parametrize( + ("bad_reduce", "err_type", "message"), + [ + ( + lambda D, s: np.concatenate([D, D[-1:]]), + ValueError, + r"length 11\..* input: 10\.", + ), + ( + lambda D, s: (D, np.concatenate([D, D[-1:]])), + ValueError, + r"length \(10, 11\)\..* input: 10\.", + ), + (lambda D, s: (D[:9], D), ValueError, r"length \(9, 10\)\..* input: 10\."), + ( + lambda D, s: 7, + TypeError, + r"returned 7\. Expected sequence\(s\) of length 10\.", + ), + ( + lambda D, s: (7, 8), + TypeError, + r"returned \(7, 8\)\. Expected sequence\(s\) of length 10\.", + ), + ( + lambda D, s: (np.arange(10), 9), + TypeError, + r", 9\)\. Expected sequence\(s\) of length 10\.", + ), + ], +) +def test_pairwise_distances_chunked_reduce_invalid( + global_dtype, bad_reduce, err_type, message +): + X = np.arange(10).reshape(-1, 1).astype(global_dtype, copy=False) + S_chunks = pairwise_distances_chunked( + X, None, reduce_func=bad_reduce, working_memory=64 + ) + with pytest.raises(err_type, match=message): + next(S_chunks) + + +def check_pairwise_distances_chunked(X, Y, working_memory, metric="euclidean"): + gen = pairwise_distances_chunked(X, Y, working_memory=working_memory, metric=metric) + assert isinstance(gen, GeneratorType) + blockwise_distances = list(gen) + Y = X if Y is None else Y + min_block_mib = len(Y) * 8 * 2**-20 + + for block in blockwise_distances: + memory_used = block.nbytes + assert memory_used <= max(working_memory, min_block_mib) * 2**20 + + blockwise_distances = np.vstack(blockwise_distances) + S = pairwise_distances(X, Y, metric=metric) + assert_allclose(blockwise_distances, S, atol=1e-7) + + +@pytest.mark.parametrize("metric", ("euclidean", "l2", "sqeuclidean")) +def test_pairwise_distances_chunked_diagonal(metric, global_dtype): + rng = np.random.RandomState(0) + X = rng.normal(size=(1000, 10), scale=1e10).astype(global_dtype, copy=False) + chunks = list(pairwise_distances_chunked(X, working_memory=1, metric=metric)) + assert len(chunks) > 1 + assert_allclose(np.diag(np.vstack(chunks)), 0, rtol=1e-10) + + +@pytest.mark.parametrize("metric", ("euclidean", "l2", "sqeuclidean")) +def test_parallel_pairwise_distances_diagonal(metric, global_dtype): + rng = np.random.RandomState(0) + X = rng.normal(size=(1000, 10), scale=1e10).astype(global_dtype, copy=False) + distances = pairwise_distances(X, metric=metric, n_jobs=2) + assert_allclose(np.diag(distances), 0, atol=1e-10) + + +@ignore_warnings +def test_pairwise_distances_chunked(global_dtype): + # Test the pairwise_distance helper function. + rng = np.random.RandomState(0) + # Euclidean distance should be equivalent to calling the function. + X = rng.random_sample((200, 4)).astype(global_dtype, copy=False) + check_pairwise_distances_chunked(X, None, working_memory=1, metric="euclidean") + # Test small amounts of memory + for power in range(-16, 0): + check_pairwise_distances_chunked( + X, None, working_memory=2**power, metric="euclidean" + ) + # X as list + check_pairwise_distances_chunked( + X.tolist(), None, working_memory=1, metric="euclidean" + ) + # Euclidean distance, with Y != X. + Y = rng.random_sample((100, 4)).astype(global_dtype, copy=False) + check_pairwise_distances_chunked(X, Y, working_memory=1, metric="euclidean") + check_pairwise_distances_chunked( + X.tolist(), Y.tolist(), working_memory=1, metric="euclidean" + ) + # absurdly large working_memory + check_pairwise_distances_chunked(X, Y, working_memory=10000, metric="euclidean") + # "cityblock" uses scikit-learn metric, cityblock (function) is + # scipy.spatial. + check_pairwise_distances_chunked(X, Y, working_memory=1, metric="cityblock") + + # Test precomputed returns all at once + D = pairwise_distances(X) + gen = pairwise_distances_chunked(D, working_memory=2**-16, metric="precomputed") + assert isinstance(gen, GeneratorType) + assert next(gen) is D + with pytest.raises(StopIteration): + next(gen) + + +@pytest.mark.parametrize( + "x_array_constr", + [np.array] + CSR_CONTAINERS, + ids=["dense"] + [container.__name__ for container in CSR_CONTAINERS], +) +@pytest.mark.parametrize( + "y_array_constr", + [np.array] + CSR_CONTAINERS, + ids=["dense"] + [container.__name__ for container in CSR_CONTAINERS], +) +def test_euclidean_distances_known_result(x_array_constr, y_array_constr): + # Check the pairwise Euclidean distances computation on known result + X = x_array_constr([[0]]) + Y = y_array_constr([[1], [2]]) + D = euclidean_distances(X, Y) + assert_allclose(D, [[1.0, 2.0]]) + + +@pytest.mark.parametrize( + "y_array_constr", + [np.array] + CSR_CONTAINERS, + ids=["dense"] + [container.__name__ for container in CSR_CONTAINERS], +) +def test_euclidean_distances_with_norms(global_dtype, y_array_constr): + # check that we still get the right answers with {X,Y}_norm_squared + # and that we get a wrong answer with wrong {X,Y}_norm_squared + rng = np.random.RandomState(0) + X = rng.random_sample((10, 10)).astype(global_dtype, copy=False) + Y = rng.random_sample((20, 10)).astype(global_dtype, copy=False) + + # norms will only be used if their dtype is float64 + X_norm_sq = (X.astype(np.float64) ** 2).sum(axis=1).reshape(1, -1) + Y_norm_sq = (Y.astype(np.float64) ** 2).sum(axis=1).reshape(1, -1) + + Y = y_array_constr(Y) + + D1 = euclidean_distances(X, Y) + D2 = euclidean_distances(X, Y, X_norm_squared=X_norm_sq) + D3 = euclidean_distances(X, Y, Y_norm_squared=Y_norm_sq) + D4 = euclidean_distances(X, Y, X_norm_squared=X_norm_sq, Y_norm_squared=Y_norm_sq) + assert_allclose(D2, D1) + assert_allclose(D3, D1) + assert_allclose(D4, D1) + + # check we get the wrong answer with wrong {X,Y}_norm_squared + wrong_D = euclidean_distances( + X, + Y, + X_norm_squared=np.zeros_like(X_norm_sq), + Y_norm_squared=np.zeros_like(Y_norm_sq), + ) + with pytest.raises(AssertionError): + assert_allclose(wrong_D, D1) + + +@pytest.mark.parametrize("symmetric", [True, False]) +def test_euclidean_distances_float32_norms(global_random_seed, symmetric): + # Non-regression test for #27621 + rng = np.random.RandomState(global_random_seed) + X = rng.random_sample((10, 10)) + Y = X if symmetric else rng.random_sample((20, 10)) + X_norm_sq = (X.astype(np.float32) ** 2).sum(axis=1).reshape(1, -1) + Y_norm_sq = (Y.astype(np.float32) ** 2).sum(axis=1).reshape(1, -1) + D1 = euclidean_distances(X, Y) + D2 = euclidean_distances(X, Y, X_norm_squared=X_norm_sq) + D3 = euclidean_distances(X, Y, Y_norm_squared=Y_norm_sq) + D4 = euclidean_distances(X, Y, X_norm_squared=X_norm_sq, Y_norm_squared=Y_norm_sq) + assert_allclose(D2, D1) + assert_allclose(D3, D1) + assert_allclose(D4, D1) + + +def test_euclidean_distances_norm_shapes(): + # Check all accepted shapes for the norms or appropriate error messages. + rng = np.random.RandomState(0) + X = rng.random_sample((10, 10)) + Y = rng.random_sample((20, 10)) + + X_norm_squared = (X**2).sum(axis=1) + Y_norm_squared = (Y**2).sum(axis=1) + + D1 = euclidean_distances( + X, Y, X_norm_squared=X_norm_squared, Y_norm_squared=Y_norm_squared + ) + D2 = euclidean_distances( + X, + Y, + X_norm_squared=X_norm_squared.reshape(-1, 1), + Y_norm_squared=Y_norm_squared.reshape(-1, 1), + ) + D3 = euclidean_distances( + X, + Y, + X_norm_squared=X_norm_squared.reshape(1, -1), + Y_norm_squared=Y_norm_squared.reshape(1, -1), + ) + + assert_allclose(D2, D1) + assert_allclose(D3, D1) + + with pytest.raises(ValueError, match="Incompatible dimensions for X"): + euclidean_distances(X, Y, X_norm_squared=X_norm_squared[:5]) + with pytest.raises(ValueError, match="Incompatible dimensions for Y"): + euclidean_distances(X, Y, Y_norm_squared=Y_norm_squared[:5]) + + +@pytest.mark.parametrize( + "x_array_constr", + [np.array] + CSR_CONTAINERS, + ids=["dense"] + [container.__name__ for container in CSR_CONTAINERS], +) +@pytest.mark.parametrize( + "y_array_constr", + [np.array] + CSR_CONTAINERS, + ids=["dense"] + [container.__name__ for container in CSR_CONTAINERS], +) +def test_euclidean_distances(global_dtype, x_array_constr, y_array_constr): + # check that euclidean distances gives same result as scipy cdist + # when X and Y != X are provided + rng = np.random.RandomState(0) + X = rng.random_sample((100, 10)).astype(global_dtype, copy=False) + X[X < 0.8] = 0 + Y = rng.random_sample((10, 10)).astype(global_dtype, copy=False) + Y[Y < 0.8] = 0 + + expected = cdist(X, Y) + + X = x_array_constr(X) + Y = y_array_constr(Y) + distances = euclidean_distances(X, Y) + + # the default rtol=1e-7 is too close to the float32 precision + # and fails due to rounding errors. + assert_allclose(distances, expected, rtol=1e-6) + assert distances.dtype == global_dtype + + +@pytest.mark.parametrize( + "x_array_constr", + [np.array] + CSR_CONTAINERS, + ids=["dense"] + [container.__name__ for container in CSR_CONTAINERS], +) +def test_euclidean_distances_sym(global_dtype, x_array_constr): + # check that euclidean distances gives same result as scipy pdist + # when only X is provided + rng = np.random.RandomState(0) + X = rng.random_sample((100, 10)).astype(global_dtype, copy=False) + X[X < 0.8] = 0 + + expected = squareform(pdist(X)) + + X = x_array_constr(X) + distances = euclidean_distances(X) + + # the default rtol=1e-7 is too close to the float32 precision + # and fails due to rounding errors. + assert_allclose(distances, expected, rtol=1e-6) + assert distances.dtype == global_dtype + + +@pytest.mark.parametrize("batch_size", [None, 5, 7, 101]) +@pytest.mark.parametrize( + "x_array_constr", + [np.array] + CSR_CONTAINERS, + ids=["dense"] + [container.__name__ for container in CSR_CONTAINERS], +) +@pytest.mark.parametrize( + "y_array_constr", + [np.array] + CSR_CONTAINERS, + ids=["dense"] + [container.__name__ for container in CSR_CONTAINERS], +) +def test_euclidean_distances_upcast(batch_size, x_array_constr, y_array_constr): + # check batches handling when Y != X (#13910) + rng = np.random.RandomState(0) + X = rng.random_sample((100, 10)).astype(np.float32) + X[X < 0.8] = 0 + Y = rng.random_sample((10, 10)).astype(np.float32) + Y[Y < 0.8] = 0 + + expected = cdist(X, Y) + + X = x_array_constr(X) + Y = y_array_constr(Y) + distances = _euclidean_distances_upcast(X, Y=Y, batch_size=batch_size) + distances = np.sqrt(np.maximum(distances, 0)) + + # the default rtol=1e-7 is too close to the float32 precision + # and fails due to rounding errors. + assert_allclose(distances, expected, rtol=1e-6) + + +@pytest.mark.parametrize("batch_size", [None, 5, 7, 101]) +@pytest.mark.parametrize( + "x_array_constr", + [np.array] + CSR_CONTAINERS, + ids=["dense"] + [container.__name__ for container in CSR_CONTAINERS], +) +def test_euclidean_distances_upcast_sym(batch_size, x_array_constr): + # check batches handling when X is Y (#13910) + rng = np.random.RandomState(0) + X = rng.random_sample((100, 10)).astype(np.float32) + X[X < 0.8] = 0 + + expected = squareform(pdist(X)) + + X = x_array_constr(X) + distances = _euclidean_distances_upcast(X, Y=X, batch_size=batch_size) + distances = np.sqrt(np.maximum(distances, 0)) + + # the default rtol=1e-7 is too close to the float32 precision + # and fails due to rounding errors. + assert_allclose(distances, expected, rtol=1e-6) + + +@pytest.mark.parametrize( + "dtype, eps, rtol", + [ + (np.float32, 1e-4, 1e-5), + pytest.param( + np.float64, + 1e-8, + 0.99, + marks=pytest.mark.xfail(reason="failing due to lack of precision"), + ), + ], +) +@pytest.mark.parametrize("dim", [1, 1000000]) +def test_euclidean_distances_extreme_values(dtype, eps, rtol, dim): + # check that euclidean distances is correct with float32 input thanks to + # upcasting. On float64 there are still precision issues. + X = np.array([[1.0] * dim], dtype=dtype) + Y = np.array([[1.0 + eps] * dim], dtype=dtype) + + distances = euclidean_distances(X, Y) + expected = cdist(X, Y) + + assert_allclose(distances, expected, rtol=1e-5) + + +@pytest.mark.parametrize("squared", [True, False]) +def test_nan_euclidean_distances_equal_to_euclidean_distance(squared): + # with no nan values + rng = np.random.RandomState(1337) + X = rng.randn(3, 4) + Y = rng.randn(4, 4) + + normal_distance = euclidean_distances(X, Y=Y, squared=squared) + nan_distance = nan_euclidean_distances(X, Y=Y, squared=squared) + assert_allclose(normal_distance, nan_distance) + + +@pytest.mark.parametrize("X", [np.array([[np.inf, 0]]), np.array([[0, -np.inf]])]) +@pytest.mark.parametrize("Y", [np.array([[np.inf, 0]]), np.array([[0, -np.inf]]), None]) +def test_nan_euclidean_distances_infinite_values(X, Y): + with pytest.raises(ValueError) as excinfo: + nan_euclidean_distances(X, Y=Y) + + exp_msg = "Input contains infinity or a value too large for dtype('float64')." + assert exp_msg == str(excinfo.value) + + +@pytest.mark.parametrize( + "X, X_diag, missing_value", + [ + (np.array([[0, 1], [1, 0]]), np.sqrt(2), np.nan), + (np.array([[0, 1], [1, np.nan]]), np.sqrt(2), np.nan), + (np.array([[np.nan, 1], [1, np.nan]]), np.nan, np.nan), + (np.array([[np.nan, 1], [np.nan, 0]]), np.sqrt(2), np.nan), + (np.array([[0, np.nan], [1, np.nan]]), np.sqrt(2), np.nan), + (np.array([[0, 1], [1, 0]]), np.sqrt(2), -1), + (np.array([[0, 1], [1, -1]]), np.sqrt(2), -1), + (np.array([[-1, 1], [1, -1]]), np.nan, -1), + (np.array([[-1, 1], [-1, 0]]), np.sqrt(2), -1), + (np.array([[0, -1], [1, -1]]), np.sqrt(2), -1), + ], +) +def test_nan_euclidean_distances_2x2(X, X_diag, missing_value): + exp_dist = np.array([[0.0, X_diag], [X_diag, 0]]) + + dist = nan_euclidean_distances(X, missing_values=missing_value) + assert_allclose(exp_dist, dist) + + dist_sq = nan_euclidean_distances(X, squared=True, missing_values=missing_value) + assert_allclose(exp_dist**2, dist_sq) + + dist_two = nan_euclidean_distances(X, X, missing_values=missing_value) + assert_allclose(exp_dist, dist_two) + + dist_two_copy = nan_euclidean_distances(X, X.copy(), missing_values=missing_value) + assert_allclose(exp_dist, dist_two_copy) + + +@pytest.mark.parametrize("missing_value", [np.nan, -1]) +def test_nan_euclidean_distances_complete_nan(missing_value): + X = np.array([[missing_value, missing_value], [0, 1]]) + + exp_dist = np.array([[np.nan, np.nan], [np.nan, 0]]) + + dist = nan_euclidean_distances(X, missing_values=missing_value) + assert_allclose(exp_dist, dist) + + dist = nan_euclidean_distances(X, X.copy(), missing_values=missing_value) + assert_allclose(exp_dist, dist) + + +@pytest.mark.parametrize("missing_value", [np.nan, -1]) +def test_nan_euclidean_distances_not_trival(missing_value): + X = np.array( + [ + [1.0, missing_value, 3.0, 4.0, 2.0], + [missing_value, 4.0, 6.0, 1.0, missing_value], + [3.0, missing_value, missing_value, missing_value, 1.0], + ] + ) + + Y = np.array( + [ + [missing_value, 7.0, 7.0, missing_value, 2.0], + [missing_value, missing_value, 5.0, 4.0, 7.0], + [missing_value, missing_value, missing_value, 4.0, 5.0], + ] + ) + + # Check for symmetry + D1 = nan_euclidean_distances(X, Y, missing_values=missing_value) + D2 = nan_euclidean_distances(Y, X, missing_values=missing_value) + + assert_almost_equal(D1, D2.T) + + # Check with explicit formula and squared=True + assert_allclose( + nan_euclidean_distances( + X[:1], Y[:1], squared=True, missing_values=missing_value + ), + [[5.0 / 2.0 * ((7 - 3) ** 2 + (2 - 2) ** 2)]], + ) + + # Check with explicit formula and squared=False + assert_allclose( + nan_euclidean_distances( + X[1:2], Y[1:2], squared=False, missing_values=missing_value + ), + [[np.sqrt(5.0 / 2.0 * ((6 - 5) ** 2 + (1 - 4) ** 2))]], + ) + + # Check when Y = X is explicitly passed + D3 = nan_euclidean_distances(X, missing_values=missing_value) + D4 = nan_euclidean_distances(X, X, missing_values=missing_value) + D5 = nan_euclidean_distances(X, X.copy(), missing_values=missing_value) + assert_allclose(D3, D4) + assert_allclose(D4, D5) + + # Check copy = True against copy = False + D6 = nan_euclidean_distances(X, Y, copy=True) + D7 = nan_euclidean_distances(X, Y, copy=False) + assert_allclose(D6, D7) + + +@pytest.mark.parametrize("missing_value", [np.nan, -1]) +def test_nan_euclidean_distances_one_feature_match_positive(missing_value): + # First feature is the only feature that is non-nan and in both + # samples. The result of `nan_euclidean_distances` with squared=True + # should be non-negative. The non-squared version should all be close to 0. + X = np.array( + [ + [-122.27, 648.0, missing_value, 37.85], + [-122.27, missing_value, 2.34701493, missing_value], + ] + ) + + dist_squared = nan_euclidean_distances( + X, missing_values=missing_value, squared=True + ) + assert np.all(dist_squared >= 0) + + dist = nan_euclidean_distances(X, missing_values=missing_value, squared=False) + assert_allclose(dist, 0.0) + + +def test_cosine_distances(): + # Check the pairwise Cosine distances computation + rng = np.random.RandomState(1337) + x = np.abs(rng.rand(910)) + XA = np.vstack([x, x]) + D = cosine_distances(XA) + assert_allclose(D, [[0.0, 0.0], [0.0, 0.0]], atol=1e-10) + # check that all elements are in [0, 2] + assert np.all(D >= 0.0) + assert np.all(D <= 2.0) + # check that diagonal elements are equal to 0 + assert_allclose(D[np.diag_indices_from(D)], [0.0, 0.0]) + + XB = np.vstack([x, -x]) + D2 = cosine_distances(XB) + # check that all elements are in [0, 2] + assert np.all(D2 >= 0.0) + assert np.all(D2 <= 2.0) + # check that diagonal elements are equal to 0 and non diagonal to 2 + assert_allclose(D2, [[0.0, 2.0], [2.0, 0.0]]) + + # check large random matrix + X = np.abs(rng.rand(1000, 5000)) + D = cosine_distances(X) + # check that diagonal elements are equal to 0 + assert_allclose(D[np.diag_indices_from(D)], [0.0] * D.shape[0]) + assert np.all(D >= 0.0) + assert np.all(D <= 2.0) + + +def test_haversine_distances(): + # Check haversine distance with distances computation + def slow_haversine_distances(x, y): + diff_lat = y[0] - x[0] + diff_lon = y[1] - x[1] + a = np.sin(diff_lat / 2) ** 2 + ( + np.cos(x[0]) * np.cos(y[0]) * np.sin(diff_lon / 2) ** 2 + ) + c = 2 * np.arcsin(np.sqrt(a)) + return c + + rng = np.random.RandomState(0) + X = rng.random_sample((5, 2)) + Y = rng.random_sample((10, 2)) + D1 = np.array([[slow_haversine_distances(x, y) for y in Y] for x in X]) + D2 = haversine_distances(X, Y) + assert_allclose(D1, D2) + # Test haversine distance does not accept X where n_feature != 2 + X = rng.random_sample((10, 3)) + err_msg = "Haversine distance only valid in 2 dimensions" + with pytest.raises(ValueError, match=err_msg): + haversine_distances(X) + + +# Paired distances + + +def test_paired_euclidean_distances(): + # Check the paired Euclidean distances computation + X = [[0], [0]] + Y = [[1], [2]] + D = paired_euclidean_distances(X, Y) + assert_allclose(D, [1.0, 2.0]) + + +def test_paired_manhattan_distances(): + # Check the paired manhattan distances computation + X = [[0], [0]] + Y = [[1], [2]] + D = paired_manhattan_distances(X, Y) + assert_allclose(D, [1.0, 2.0]) + + +def test_paired_cosine_distances(): + # Check the paired manhattan distances computation + X = [[0], [0]] + Y = [[1], [2]] + D = paired_cosine_distances(X, Y) + assert_allclose(D, [0.5, 0.5]) + + +def test_chi_square_kernel(): + rng = np.random.RandomState(0) + X = rng.random_sample((5, 4)) + Y = rng.random_sample((10, 4)) + K_add = additive_chi2_kernel(X, Y) + gamma = 0.1 + K = chi2_kernel(X, Y, gamma=gamma) + assert K.dtype == float + for i, x in enumerate(X): + for j, y in enumerate(Y): + chi2 = -np.sum((x - y) ** 2 / (x + y)) + chi2_exp = np.exp(gamma * chi2) + assert_almost_equal(K_add[i, j], chi2) + assert_almost_equal(K[i, j], chi2_exp) + + # check diagonal is ones for data with itself + K = chi2_kernel(Y) + assert_array_equal(np.diag(K), 1) + # check off-diagonal is < 1 but > 0: + assert np.all(K > 0) + assert np.all(K - np.diag(np.diag(K)) < 1) + # check that float32 is preserved + X = rng.random_sample((5, 4)).astype(np.float32) + Y = rng.random_sample((10, 4)).astype(np.float32) + K = chi2_kernel(X, Y) + assert K.dtype == np.float32 + + # check integer type gets converted, + # check that zeros are handled + X = rng.random_sample((10, 4)).astype(np.int32) + K = chi2_kernel(X, X) + assert np.isfinite(K).all() + assert K.dtype == float + + # check that kernel of similar things is greater than dissimilar ones + X = [[0.3, 0.7], [1.0, 0]] + Y = [[0, 1], [0.9, 0.1]] + K = chi2_kernel(X, Y) + assert K[0, 0] > K[0, 1] + assert K[1, 1] > K[1, 0] + + # test negative input + with pytest.raises(ValueError): + chi2_kernel([[0, -1]]) + with pytest.raises(ValueError): + chi2_kernel([[0, -1]], [[-1, -1]]) + with pytest.raises(ValueError): + chi2_kernel([[0, 1]], [[-1, -1]]) + + # different n_features in X and Y + with pytest.raises(ValueError): + chi2_kernel([[0, 1]], [[0.2, 0.2, 0.6]]) + + +@pytest.mark.parametrize( + "kernel", + ( + linear_kernel, + polynomial_kernel, + rbf_kernel, + laplacian_kernel, + sigmoid_kernel, + cosine_similarity, + ), +) +def test_kernel_symmetry(kernel): + # Valid kernels should be symmetric + rng = np.random.RandomState(0) + X = rng.random_sample((5, 4)) + K = kernel(X, X) + assert_allclose(K, K.T, 15) + + +@pytest.mark.parametrize( + "kernel", + ( + linear_kernel, + polynomial_kernel, + rbf_kernel, + laplacian_kernel, + sigmoid_kernel, + cosine_similarity, + ), +) +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_kernel_sparse(kernel, csr_container): + rng = np.random.RandomState(0) + X = rng.random_sample((5, 4)) + X_sparse = csr_container(X) + K = kernel(X, X) + K2 = kernel(X_sparse, X_sparse) + assert_allclose(K, K2) + + +def test_linear_kernel(): + rng = np.random.RandomState(0) + X = rng.random_sample((5, 4)) + K = linear_kernel(X, X) + # the diagonal elements of a linear kernel are their squared norm + assert_allclose(K.flat[::6], [linalg.norm(x) ** 2 for x in X]) + + +def test_rbf_kernel(): + rng = np.random.RandomState(0) + X = rng.random_sample((5, 4)) + K = rbf_kernel(X, X) + # the diagonal elements of a rbf kernel are 1 + assert_allclose(K.flat[::6], np.ones(5)) + + +def test_laplacian_kernel(): + rng = np.random.RandomState(0) + X = rng.random_sample((5, 4)) + K = laplacian_kernel(X, X) + # the diagonal elements of a laplacian kernel are 1 + assert_allclose(np.diag(K), np.ones(5)) + + # off-diagonal elements are < 1 but > 0: + assert np.all(K > 0) + assert np.all(K - np.diag(np.diag(K)) < 1) + + +@pytest.mark.parametrize( + "metric, pairwise_func", + [("linear", linear_kernel), ("cosine", cosine_similarity)], +) +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_pairwise_similarity_sparse_output(metric, pairwise_func, csr_container): + rng = np.random.RandomState(0) + X = rng.random_sample((5, 4)) + Y = rng.random_sample((3, 4)) + Xcsr = csr_container(X) + Ycsr = csr_container(Y) + + # should be sparse + K1 = pairwise_func(Xcsr, Ycsr, dense_output=False) + assert issparse(K1) + + # should be dense, and equal to K1 + K2 = pairwise_func(X, Y, dense_output=True) + assert not issparse(K2) + assert_allclose(K1.toarray(), K2) + + # show the kernel output equal to the sparse.toarray() + K3 = pairwise_kernels(X, Y=Y, metric=metric) + assert_allclose(K1.toarray(), K3) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_cosine_similarity(csr_container): + # Test the cosine_similarity. + + rng = np.random.RandomState(0) + X = rng.random_sample((5, 4)) + Y = rng.random_sample((3, 4)) + Xcsr = csr_container(X) + Ycsr = csr_container(Y) + + for X_, Y_ in ((X, None), (X, Y), (Xcsr, None), (Xcsr, Ycsr)): + # Test that the cosine is kernel is equal to a linear kernel when data + # has been previously normalized by L2-norm. + K1 = pairwise_kernels(X_, Y=Y_, metric="cosine") + X_ = normalize(X_) + if Y_ is not None: + Y_ = normalize(Y_) + K2 = pairwise_kernels(X_, Y=Y_, metric="linear") + assert_allclose(K1, K2) + + +def test_check_dense_matrices(): + # Ensure that pairwise array check works for dense matrices. + # Check that if XB is None, XB is returned as reference to XA + XA = np.resize(np.arange(40), (5, 8)) + XA_checked, XB_checked = check_pairwise_arrays(XA, None) + assert XA_checked is XB_checked + assert_array_equal(XA, XA_checked) + + +def test_check_XB_returned(): + # Ensure that if XA and XB are given correctly, they return as equal. + # Check that if XB is not None, it is returned equal. + # Note that the second dimension of XB is the same as XA. + XA = np.resize(np.arange(40), (5, 8)) + XB = np.resize(np.arange(32), (4, 8)) + XA_checked, XB_checked = check_pairwise_arrays(XA, XB) + assert_array_equal(XA, XA_checked) + assert_array_equal(XB, XB_checked) + + XB = np.resize(np.arange(40), (5, 8)) + XA_checked, XB_checked = check_paired_arrays(XA, XB) + assert_array_equal(XA, XA_checked) + assert_array_equal(XB, XB_checked) + + +def test_check_different_dimensions(): + # Ensure an error is raised if the dimensions are different. + XA = np.resize(np.arange(45), (5, 9)) + XB = np.resize(np.arange(32), (4, 8)) + with pytest.raises(ValueError): + check_pairwise_arrays(XA, XB) + + XB = np.resize(np.arange(4 * 9), (4, 9)) + with pytest.raises(ValueError): + check_paired_arrays(XA, XB) + + +def test_check_invalid_dimensions(): + # Ensure an error is raised on 1D input arrays. + # The modified tests are not 1D. In the old test, the array was internally + # converted to 2D anyways + XA = np.arange(45).reshape(9, 5) + XB = np.arange(32).reshape(4, 8) + with pytest.raises(ValueError): + check_pairwise_arrays(XA, XB) + XA = np.arange(45).reshape(9, 5) + XB = np.arange(32).reshape(4, 8) + with pytest.raises(ValueError): + check_pairwise_arrays(XA, XB) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_check_sparse_arrays(csr_container): + # Ensures that checks return valid sparse matrices. + rng = np.random.RandomState(0) + XA = rng.random_sample((5, 4)) + XA_sparse = csr_container(XA) + XB = rng.random_sample((5, 4)) + XB_sparse = csr_container(XB) + XA_checked, XB_checked = check_pairwise_arrays(XA_sparse, XB_sparse) + # compare their difference because testing csr matrices for + # equality with '==' does not work as expected. + assert issparse(XA_checked) + assert abs(XA_sparse - XA_checked).sum() == 0 + assert issparse(XB_checked) + assert abs(XB_sparse - XB_checked).sum() == 0 + + XA_checked, XA_2_checked = check_pairwise_arrays(XA_sparse, XA_sparse) + assert issparse(XA_checked) + assert abs(XA_sparse - XA_checked).sum() == 0 + assert issparse(XA_2_checked) + assert abs(XA_2_checked - XA_checked).sum() == 0 + + +def tuplify(X): + # Turns a numpy matrix (any n-dimensional array) into tuples. + s = X.shape + if len(s) > 1: + # Tuplify each sub-array in the input. + return tuple(tuplify(row) for row in X) + else: + # Single dimension input, just return tuple of contents. + return tuple(r for r in X) + + +def test_check_tuple_input(): + # Ensures that checks return valid tuples. + rng = np.random.RandomState(0) + XA = rng.random_sample((5, 4)) + XA_tuples = tuplify(XA) + XB = rng.random_sample((5, 4)) + XB_tuples = tuplify(XB) + XA_checked, XB_checked = check_pairwise_arrays(XA_tuples, XB_tuples) + assert_array_equal(XA_tuples, XA_checked) + assert_array_equal(XB_tuples, XB_checked) + + +def test_check_preserve_type(): + # Ensures that type float32 is preserved. + XA = np.resize(np.arange(40), (5, 8)).astype(np.float32) + XB = np.resize(np.arange(40), (5, 8)).astype(np.float32) + + XA_checked, XB_checked = check_pairwise_arrays(XA, None) + assert XA_checked.dtype == np.float32 + + # both float32 + XA_checked, XB_checked = check_pairwise_arrays(XA, XB) + assert XA_checked.dtype == np.float32 + assert XB_checked.dtype == np.float32 + + # mismatched A + XA_checked, XB_checked = check_pairwise_arrays(XA.astype(float), XB) + assert XA_checked.dtype == float + assert XB_checked.dtype == float + + # mismatched B + XA_checked, XB_checked = check_pairwise_arrays(XA, XB.astype(float)) + assert XA_checked.dtype == float + assert XB_checked.dtype == float + + +@pytest.mark.parametrize("n_jobs", [1, 2]) +@pytest.mark.parametrize("metric", ["seuclidean", "mahalanobis"]) +@pytest.mark.parametrize( + "dist_function", [pairwise_distances, pairwise_distances_chunked] +) +def test_pairwise_distances_data_derived_params(n_jobs, metric, dist_function): + # check that pairwise_distances give the same result in sequential and + # parallel, when metric has data-derived parameters. + with config_context(working_memory=0.1): # to have more than 1 chunk + rng = np.random.RandomState(0) + X = rng.random_sample((100, 10)) + + expected_dist = squareform(pdist(X, metric=metric)) + dist = np.vstack(tuple(dist_function(X, metric=metric, n_jobs=n_jobs))) + + assert_allclose(dist, expected_dist) + + +@pytest.mark.parametrize("metric", ["seuclidean", "mahalanobis"]) +def test_pairwise_distances_data_derived_params_error(metric): + # check that pairwise_distances raises an error when Y is passed but + # metric has data-derived params that are not provided by the user. + rng = np.random.RandomState(0) + X = rng.random_sample((100, 10)) + Y = rng.random_sample((100, 10)) + + with pytest.raises( + ValueError, + match=rf"The '(V|VI)' parameter is required for the " rf"{metric} metric", + ): + pairwise_distances(X, Y, metric=metric) + + +@pytest.mark.parametrize( + "metric", + [ + "braycurtis", + "canberra", + "chebyshev", + "correlation", + "hamming", + "mahalanobis", + "minkowski", + "seuclidean", + "sqeuclidean", + "cityblock", + "cosine", + "euclidean", + ], +) +@pytest.mark.parametrize("y_is_x", [True, False], ids=["Y is X", "Y is not X"]) +def test_numeric_pairwise_distances_datatypes(metric, global_dtype, y_is_x): + # Check that pairwise distances gives the same result as pdist and cdist + # regardless of input datatype when using any scipy metric for comparing + # numeric vectors + # + # This test is necessary because pairwise_distances used to throw an + # error when using metric='seuclidean' and the input data was not + # of type np.float64 (#15730) + + rng = np.random.RandomState(0) + + X = rng.random_sample((5, 4)).astype(global_dtype, copy=False) + + params = {} + if y_is_x: + Y = X + expected_dist = squareform(pdist(X, metric=metric)) + else: + Y = rng.random_sample((5, 4)).astype(global_dtype, copy=False) + expected_dist = cdist(X, Y, metric=metric) + # precompute parameters for seuclidean & mahalanobis when x is not y + if metric == "seuclidean": + params = {"V": np.var(np.vstack([X, Y]), axis=0, ddof=1, dtype=np.float64)} + elif metric == "mahalanobis": + params = {"VI": np.linalg.inv(np.cov(np.vstack([X, Y]).T)).T} + + dist = pairwise_distances(X, Y, metric=metric, **params) + + assert_allclose(dist, expected_dist) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_sparse_manhattan_readonly_dataset(csr_container): + # Non-regression test for: https://github.com/scikit-learn/scikit-learn/issues/7981 + matrices1 = [csr_container(np.ones((5, 5)))] + matrices2 = [csr_container(np.ones((5, 5)))] + # Joblib memory maps datasets which makes them read-only. + # The following call was reporting as failing in #7981, but this must pass. + Parallel(n_jobs=2, max_nbytes=0)( + delayed(manhattan_distances)(m1, m2) for m1, m2 in zip(matrices1, matrices2) + ) diff --git a/venv/lib/python3.10/site-packages/sklearn/metrics/tests/test_pairwise_distances_reduction.py b/venv/lib/python3.10/site-packages/sklearn/metrics/tests/test_pairwise_distances_reduction.py new file mode 100644 index 0000000000000000000000000000000000000000..e5983f9273d9476bf31fb596966b3295b57aa4e3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/metrics/tests/test_pairwise_distances_reduction.py @@ -0,0 +1,1643 @@ +import itertools +import re +import warnings +from functools import partial + +import numpy as np +import pytest +import threadpoolctl +from scipy.spatial.distance import cdist + +from sklearn.metrics import euclidean_distances, pairwise_distances +from sklearn.metrics._pairwise_distances_reduction import ( + ArgKmin, + ArgKminClassMode, + BaseDistancesReductionDispatcher, + RadiusNeighbors, + RadiusNeighborsClassMode, + sqeuclidean_row_norms, +) +from sklearn.utils._testing import ( + assert_allclose, + assert_array_equal, + create_memmap_backed_data, +) +from sklearn.utils.fixes import CSR_CONTAINERS + +# Common supported metric between scipy.spatial.distance.cdist +# and BaseDistanceReductionDispatcher. +# This allows constructing tests to check consistency of results +# of concrete BaseDistanceReductionDispatcher on some metrics using APIs +# from scipy and numpy. +CDIST_PAIRWISE_DISTANCES_REDUCTION_COMMON_METRICS = [ + "braycurtis", + "canberra", + "chebyshev", + "cityblock", + "euclidean", + "minkowski", + "seuclidean", +] + + +def _get_metric_params_list(metric: str, n_features: int, seed: int = 1): + """Return list of dummy DistanceMetric kwargs for tests.""" + + # Distinguishing on cases not to compute unneeded datastructures. + rng = np.random.RandomState(seed) + + if metric == "minkowski": + minkowski_kwargs = [ + dict(p=1.5), + dict(p=2), + dict(p=3), + dict(p=np.inf), + dict(p=3, w=rng.rand(n_features)), + ] + + return minkowski_kwargs + + if metric == "seuclidean": + return [dict(V=rng.rand(n_features))] + + # Case of: "euclidean", "manhattan", "chebyshev", "haversine" or any other metric. + # In those cases, no kwargs is needed. + return [{}] + + +def assert_same_distances_for_common_neighbors( + query_idx, + dist_row_a, + dist_row_b, + indices_row_a, + indices_row_b, + rtol, + atol, +): + """Check that the distances of common neighbors are equal up to tolerance. + + This does not check if there are missing neighbors in either result set. + Missingness is handled by assert_no_missing_neighbors. + """ + # Compute a mapping from indices to distances for each result set and + # check that the computed neighbors with matching indices are within + # the expected distance tolerance. + indices_to_dist_a = dict(zip(indices_row_a, dist_row_a)) + indices_to_dist_b = dict(zip(indices_row_b, dist_row_b)) + + common_indices = set(indices_row_a).intersection(set(indices_row_b)) + for idx in common_indices: + dist_a = indices_to_dist_a[idx] + dist_b = indices_to_dist_b[idx] + try: + assert_allclose(dist_a, dist_b, rtol=rtol, atol=atol) + except AssertionError as e: + # Wrap exception to provide more context while also including + # the original exception with the computed absolute and + # relative differences. + raise AssertionError( + f"Query vector with index {query_idx} lead to different distances" + f" for common neighbor with index {idx}:" + f" dist_a={dist_a} vs dist_b={dist_b} (with atol={atol} and" + f" rtol={rtol})" + ) from e + + +def assert_no_missing_neighbors( + query_idx, + dist_row_a, + dist_row_b, + indices_row_a, + indices_row_b, + threshold, +): + """Compare the indices of neighbors in two results sets. + + Any neighbor index with a distance below the precision threshold should + match one in the other result set. We ignore the last few neighbors beyond + the threshold as those can typically be missing due to rounding errors. + + For radius queries, the threshold is just the radius minus the expected + precision level. + + For k-NN queries, it is the maximum distance to the k-th neighbor minus the + expected precision level. + """ + mask_a = dist_row_a < threshold + mask_b = dist_row_b < threshold + missing_from_b = np.setdiff1d(indices_row_a[mask_a], indices_row_b) + missing_from_a = np.setdiff1d(indices_row_b[mask_b], indices_row_a) + if len(missing_from_a) > 0 or len(missing_from_b) > 0: + raise AssertionError( + f"Query vector with index {query_idx} lead to mismatched result indices:\n" + f"neighbors in b missing from a: {missing_from_a}\n" + f"neighbors in a missing from b: {missing_from_b}\n" + f"dist_row_a={dist_row_a}\n" + f"dist_row_b={dist_row_b}\n" + f"indices_row_a={indices_row_a}\n" + f"indices_row_b={indices_row_b}\n" + ) + + +def assert_compatible_argkmin_results( + neighbors_dists_a, + neighbors_dists_b, + neighbors_indices_a, + neighbors_indices_b, + rtol=1e-5, + atol=1e-6, +): + """Assert that argkmin results are valid up to rounding errors. + + This function asserts that the results of argkmin queries are valid up to: + - rounding error tolerance on distance values; + - permutations of indices for distances values that differ up to the + expected precision level. + + Furthermore, the distances must be sorted. + + To be used for testing neighbors queries on float32 datasets: we accept + neighbors rank swaps only if they are caused by small rounding errors on + the distance computations. + """ + is_sorted = lambda a: np.all(a[:-1] <= a[1:]) + + assert ( + neighbors_dists_a.shape + == neighbors_dists_b.shape + == neighbors_indices_a.shape + == neighbors_indices_b.shape + ), "Arrays of results have incompatible shapes." + + n_queries, _ = neighbors_dists_a.shape + + # Asserting equality results one row at a time + for query_idx in range(n_queries): + dist_row_a = neighbors_dists_a[query_idx] + dist_row_b = neighbors_dists_b[query_idx] + indices_row_a = neighbors_indices_a[query_idx] + indices_row_b = neighbors_indices_b[query_idx] + + assert is_sorted(dist_row_a), f"Distances aren't sorted on row {query_idx}" + assert is_sorted(dist_row_b), f"Distances aren't sorted on row {query_idx}" + + assert_same_distances_for_common_neighbors( + query_idx, + dist_row_a, + dist_row_b, + indices_row_a, + indices_row_b, + rtol, + atol, + ) + + # Check that any neighbor with distances below the rounding error + # threshold have matching indices. The threshold is the distance to the + # k-th neighbors minus the expected precision level: + # + # (1 - rtol) * dist_k - atol + # + # Where dist_k is defined as the maximum distance to the kth-neighbor + # among the two result sets. This way of defining the threshold is + # stricter than taking the minimum of the two. + threshold = (1 - rtol) * np.maximum( + np.max(dist_row_a), np.max(dist_row_b) + ) - atol + assert_no_missing_neighbors( + query_idx, + dist_row_a, + dist_row_b, + indices_row_a, + indices_row_b, + threshold, + ) + + +def _non_trivial_radius( + *, + X=None, + Y=None, + metric=None, + precomputed_dists=None, + expected_n_neighbors=10, + n_subsampled_queries=10, + **metric_kwargs, +): + # Find a non-trivial radius using a small subsample of the pairwise + # distances between X and Y: we want to return around expected_n_neighbors + # on average. Yielding too many results would make the test slow (because + # checking the results is expensive for large result sets), yielding 0 most + # of the time would make the test useless. + assert ( + precomputed_dists is not None or metric is not None + ), "Either metric or precomputed_dists must be provided." + + if precomputed_dists is None: + assert X is not None + assert Y is not None + sampled_dists = pairwise_distances(X, Y, metric=metric, **metric_kwargs) + else: + sampled_dists = precomputed_dists[:n_subsampled_queries].copy() + sampled_dists.sort(axis=1) + return sampled_dists[:, expected_n_neighbors].mean() + + +def assert_compatible_radius_results( + neighbors_dists_a, + neighbors_dists_b, + neighbors_indices_a, + neighbors_indices_b, + radius, + check_sorted=True, + rtol=1e-5, + atol=1e-6, +): + """Assert that radius neighborhood results are valid up to: + + - relative and absolute tolerance on computed distance values + - permutations of indices for distances values that differ up to + a precision level + - missing or extra last elements if their distance is + close to the radius + + To be used for testing neighbors queries on float32 datasets: we + accept neighbors rank swaps only if they are caused by small + rounding errors on the distance computations. + + Input arrays must be sorted w.r.t distances. + """ + is_sorted = lambda a: np.all(a[:-1] <= a[1:]) + + assert ( + len(neighbors_dists_a) + == len(neighbors_dists_b) + == len(neighbors_indices_a) + == len(neighbors_indices_b) + ) + + n_queries = len(neighbors_dists_a) + + # Asserting equality of results one vector at a time + for query_idx in range(n_queries): + dist_row_a = neighbors_dists_a[query_idx] + dist_row_b = neighbors_dists_b[query_idx] + indices_row_a = neighbors_indices_a[query_idx] + indices_row_b = neighbors_indices_b[query_idx] + + if check_sorted: + assert is_sorted(dist_row_a), f"Distances aren't sorted on row {query_idx}" + assert is_sorted(dist_row_b), f"Distances aren't sorted on row {query_idx}" + + assert len(dist_row_a) == len(indices_row_a) + assert len(dist_row_b) == len(indices_row_b) + + # Check that all distances are within the requested radius + if len(dist_row_a) > 0: + max_dist_a = np.max(dist_row_a) + assert max_dist_a <= radius, ( + f"Largest returned distance {max_dist_a} not within requested" + f" radius {radius} on row {query_idx}" + ) + if len(dist_row_b) > 0: + max_dist_b = np.max(dist_row_b) + assert max_dist_b <= radius, ( + f"Largest returned distance {max_dist_b} not within requested" + f" radius {radius} on row {query_idx}" + ) + + assert_same_distances_for_common_neighbors( + query_idx, + dist_row_a, + dist_row_b, + indices_row_a, + indices_row_b, + rtol, + atol, + ) + + threshold = (1 - rtol) * radius - atol + assert_no_missing_neighbors( + query_idx, + dist_row_a, + dist_row_b, + indices_row_a, + indices_row_b, + threshold, + ) + + +FLOAT32_TOLS = { + "atol": 1e-7, + "rtol": 1e-5, +} +FLOAT64_TOLS = { + "atol": 1e-9, + "rtol": 1e-7, +} +ASSERT_RESULT = { + (ArgKmin, np.float64): partial(assert_compatible_argkmin_results, **FLOAT64_TOLS), + (ArgKmin, np.float32): partial(assert_compatible_argkmin_results, **FLOAT32_TOLS), + ( + RadiusNeighbors, + np.float64, + ): partial(assert_compatible_radius_results, **FLOAT64_TOLS), + ( + RadiusNeighbors, + np.float32, + ): partial(assert_compatible_radius_results, **FLOAT32_TOLS), +} + + +def test_assert_compatible_argkmin_results(): + atol = 1e-7 + rtol = 0.0 + tols = dict(atol=atol, rtol=rtol) + + eps = atol / 3 + _1m = 1.0 - eps + _1p = 1.0 + eps + + _6_1m = 6.1 - eps + _6_1p = 6.1 + eps + + ref_dist = np.array( + [ + [1.2, 2.5, _6_1m, 6.1, _6_1p], + [_1m, _1m, 1, _1p, _1p], + ] + ) + ref_indices = np.array( + [ + [1, 2, 3, 4, 5], + [6, 7, 8, 9, 10], + ] + ) + + # Sanity check: compare the reference results to themselves. + assert_compatible_argkmin_results( + ref_dist, ref_dist, ref_indices, ref_indices, rtol + ) + + # Apply valid permutation on indices: the last 3 points are all very close + # to one another so we accept any permutation on their rankings. + assert_compatible_argkmin_results( + np.array([[1.2, 2.5, _6_1m, 6.1, _6_1p]]), + np.array([[1.2, 2.5, _6_1m, 6.1, _6_1p]]), + np.array([[1, 2, 3, 4, 5]]), + np.array([[1, 2, 5, 4, 3]]), + **tols, + ) + + # The last few indices do not necessarily have to match because of the rounding + # errors on the distances: there could be tied results at the boundary. + assert_compatible_argkmin_results( + np.array([[1.2, 2.5, 3.0, 6.1, _6_1p]]), + np.array([[1.2, 2.5, 3.0, _6_1m, 6.1]]), + np.array([[1, 2, 3, 4, 5]]), + np.array([[1, 2, 3, 6, 7]]), + **tols, + ) + + # All points have close distances so any ranking permutation + # is valid for this query result. + assert_compatible_argkmin_results( + np.array([[_1m, 1, _1p, _1p, _1p]]), + np.array([[1, 1, 1, 1, _1p]]), + np.array([[7, 6, 8, 10, 9]]), + np.array([[6, 9, 7, 8, 10]]), + **tols, + ) + + # They could also be nearly truncation of very large nearly tied result + # sets hence all indices can also be distinct in this case: + assert_compatible_argkmin_results( + np.array([[_1m, 1, _1p, _1p, _1p]]), + np.array([[_1m, 1, 1, 1, _1p]]), + np.array([[34, 30, 8, 12, 24]]), + np.array([[42, 1, 21, 13, 3]]), + **tols, + ) + + # Apply invalid permutation on indices: permuting the ranks of the 2 + # nearest neighbors is invalid because the distance values are too + # different. + msg = re.escape( + "Query vector with index 0 lead to different distances for common neighbor with" + " index 1: dist_a=1.2 vs dist_b=2.5" + ) + with pytest.raises(AssertionError, match=msg): + assert_compatible_argkmin_results( + np.array([[1.2, 2.5, _6_1m, 6.1, _6_1p]]), + np.array([[1.2, 2.5, _6_1m, 6.1, _6_1p]]), + np.array([[1, 2, 3, 4, 5]]), + np.array([[2, 1, 3, 4, 5]]), + **tols, + ) + + # Detect missing indices within the expected precision level, even when the + # distances match exactly. + msg = re.escape( + "neighbors in b missing from a: [12]\nneighbors in a missing from b: [1]" + ) + with pytest.raises(AssertionError, match=msg): + assert_compatible_argkmin_results( + np.array([[1.2, 2.5, _6_1m, 6.1, _6_1p]]), + np.array([[1.2, 2.5, _6_1m, 6.1, _6_1p]]), + np.array([[1, 2, 3, 4, 5]]), + np.array([[12, 2, 4, 11, 3]]), + **tols, + ) + + # Detect missing indices outside the expected precision level. + msg = re.escape( + "neighbors in b missing from a: []\nneighbors in a missing from b: [3]" + ) + with pytest.raises(AssertionError, match=msg): + assert_compatible_argkmin_results( + np.array([[_1m, 1.0, _6_1m, 6.1, _6_1p]]), + np.array([[1.0, 1.0, _6_1m, 6.1, 7]]), + np.array([[1, 2, 3, 4, 5]]), + np.array([[2, 1, 4, 5, 12]]), + **tols, + ) + + # Detect missing indices outside the expected precision level, in the other + # direction: + msg = re.escape( + "neighbors in b missing from a: [5]\nneighbors in a missing from b: []" + ) + with pytest.raises(AssertionError, match=msg): + assert_compatible_argkmin_results( + np.array([[_1m, 1.0, _6_1m, 6.1, 7]]), + np.array([[1.0, 1.0, _6_1m, 6.1, _6_1p]]), + np.array([[1, 2, 3, 4, 12]]), + np.array([[2, 1, 5, 3, 4]]), + **tols, + ) + + # Distances aren't properly sorted + msg = "Distances aren't sorted on row 0" + with pytest.raises(AssertionError, match=msg): + assert_compatible_argkmin_results( + np.array([[1.2, 2.5, _6_1m, 6.1, _6_1p]]), + np.array([[2.5, 1.2, _6_1m, 6.1, _6_1p]]), + np.array([[1, 2, 3, 4, 5]]), + np.array([[2, 1, 4, 5, 3]]), + **tols, + ) + + +@pytest.mark.parametrize("check_sorted", [True, False]) +def test_assert_compatible_radius_results(check_sorted): + atol = 1e-7 + rtol = 0.0 + tols = dict(atol=atol, rtol=rtol) + + eps = atol / 3 + _1m = 1.0 - eps + _1p = 1.0 + eps + _6_1m = 6.1 - eps + _6_1p = 6.1 + eps + + ref_dist = [ + np.array([1.2, 2.5, _6_1m, 6.1, _6_1p]), + np.array([_1m, 1, _1p, _1p]), + ] + + ref_indices = [ + np.array([1, 2, 3, 4, 5]), + np.array([6, 7, 8, 9]), + ] + + # Sanity check: compare the reference results to themselves. + assert_compatible_radius_results( + ref_dist, + ref_dist, + ref_indices, + ref_indices, + radius=7.0, + check_sorted=check_sorted, + **tols, + ) + + # Apply valid permutation on indices + assert_compatible_radius_results( + np.array([np.array([1.2, 2.5, _6_1m, 6.1, _6_1p])]), + np.array([np.array([1.2, 2.5, _6_1m, 6.1, _6_1p])]), + np.array([np.array([1, 2, 3, 4, 5])]), + np.array([np.array([1, 2, 4, 5, 3])]), + radius=7.0, + check_sorted=check_sorted, + **tols, + ) + assert_compatible_radius_results( + np.array([np.array([_1m, _1m, 1, _1p, _1p])]), + np.array([np.array([_1m, _1m, 1, _1p, _1p])]), + np.array([np.array([6, 7, 8, 9, 10])]), + np.array([np.array([6, 9, 7, 8, 10])]), + radius=7.0, + check_sorted=check_sorted, + **tols, + ) + + # Apply invalid permutation on indices + msg = re.escape( + "Query vector with index 0 lead to different distances for common neighbor with" + " index 1: dist_a=1.2 vs dist_b=2.5" + ) + with pytest.raises(AssertionError, match=msg): + assert_compatible_radius_results( + np.array([np.array([1.2, 2.5, _6_1m, 6.1, _6_1p])]), + np.array([np.array([1.2, 2.5, _6_1m, 6.1, _6_1p])]), + np.array([np.array([1, 2, 3, 4, 5])]), + np.array([np.array([2, 1, 3, 4, 5])]), + radius=7.0, + check_sorted=check_sorted, + **tols, + ) + + # Having extra last or missing elements is valid if they are in the + # tolerated rounding error range: [(1 - rtol) * radius - atol, radius] + assert_compatible_radius_results( + np.array([np.array([1.2, 2.5, _6_1m, 6.1, _6_1p, _6_1p])]), + np.array([np.array([1.2, 2.5, _6_1m, 6.1])]), + np.array([np.array([1, 2, 3, 4, 5, 7])]), + np.array([np.array([1, 2, 3, 6])]), + radius=_6_1p, + check_sorted=check_sorted, + **tols, + ) + + # Any discrepancy outside the tolerated rounding error range is invalid and + # indicates a missing neighbor in one of the result sets. + msg = re.escape( + "Query vector with index 0 lead to mismatched result indices:\nneighbors in b" + " missing from a: []\nneighbors in a missing from b: [3]" + ) + with pytest.raises(AssertionError, match=msg): + assert_compatible_radius_results( + np.array([np.array([1.2, 2.5, 6])]), + np.array([np.array([1.2, 2.5])]), + np.array([np.array([1, 2, 3])]), + np.array([np.array([1, 2])]), + radius=6.1, + check_sorted=check_sorted, + **tols, + ) + msg = re.escape( + "Query vector with index 0 lead to mismatched result indices:\nneighbors in b" + " missing from a: [4]\nneighbors in a missing from b: [2]" + ) + with pytest.raises(AssertionError, match=msg): + assert_compatible_radius_results( + np.array([np.array([1.2, 2.1, 2.5])]), + np.array([np.array([1.2, 2, 2.5])]), + np.array([np.array([1, 2, 3])]), + np.array([np.array([1, 4, 3])]), + radius=6.1, + check_sorted=check_sorted, + **tols, + ) + + # Radius upper bound is strictly checked + msg = re.escape( + "Largest returned distance 6.100000033333333 not within requested radius 6.1 on" + " row 0" + ) + with pytest.raises(AssertionError, match=msg): + assert_compatible_radius_results( + np.array([np.array([1.2, 2.5, _6_1m, 6.1, _6_1p])]), + np.array([np.array([1.2, 2.5, _6_1m, 6.1, 6.1])]), + np.array([np.array([1, 2, 3, 4, 5])]), + np.array([np.array([2, 1, 4, 5, 3])]), + radius=6.1, + check_sorted=check_sorted, + **tols, + ) + with pytest.raises(AssertionError, match=msg): + assert_compatible_radius_results( + np.array([np.array([1.2, 2.5, _6_1m, 6.1, 6.1])]), + np.array([np.array([1.2, 2.5, _6_1m, 6.1, _6_1p])]), + np.array([np.array([1, 2, 3, 4, 5])]), + np.array([np.array([2, 1, 4, 5, 3])]), + radius=6.1, + check_sorted=check_sorted, + **tols, + ) + + if check_sorted: + # Distances aren't properly sorted + msg = "Distances aren't sorted on row 0" + with pytest.raises(AssertionError, match=msg): + assert_compatible_radius_results( + np.array([np.array([1.2, 2.5, _6_1m, 6.1, _6_1p])]), + np.array([np.array([2.5, 1.2, _6_1m, 6.1, _6_1p])]), + np.array([np.array([1, 2, 3, 4, 5])]), + np.array([np.array([2, 1, 4, 5, 3])]), + radius=_6_1p, + check_sorted=True, + **tols, + ) + else: + assert_compatible_radius_results( + np.array([np.array([1.2, 2.5, _6_1m, 6.1, _6_1p])]), + np.array([np.array([2.5, 1.2, _6_1m, 6.1, _6_1p])]), + np.array([np.array([1, 2, 3, 4, 5])]), + np.array([np.array([2, 1, 4, 5, 3])]), + radius=_6_1p, + check_sorted=False, + **tols, + ) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_pairwise_distances_reduction_is_usable_for(csr_container): + rng = np.random.RandomState(0) + X = rng.rand(100, 10) + Y = rng.rand(100, 10) + X_csr = csr_container(X) + Y_csr = csr_container(Y) + metric = "manhattan" + + # Must be usable for all possible pair of {dense, sparse} datasets + assert BaseDistancesReductionDispatcher.is_usable_for(X, Y, metric) + assert BaseDistancesReductionDispatcher.is_usable_for(X_csr, Y_csr, metric) + assert BaseDistancesReductionDispatcher.is_usable_for(X_csr, Y, metric) + assert BaseDistancesReductionDispatcher.is_usable_for(X, Y_csr, metric) + + assert BaseDistancesReductionDispatcher.is_usable_for( + X.astype(np.float64), Y.astype(np.float64), metric + ) + + assert BaseDistancesReductionDispatcher.is_usable_for( + X.astype(np.float32), Y.astype(np.float32), metric + ) + + assert not BaseDistancesReductionDispatcher.is_usable_for( + X.astype(np.int64), Y.astype(np.int64), metric + ) + + assert not BaseDistancesReductionDispatcher.is_usable_for(X, Y, metric="pyfunc") + assert not BaseDistancesReductionDispatcher.is_usable_for( + X.astype(np.float32), Y, metric + ) + assert not BaseDistancesReductionDispatcher.is_usable_for( + X, Y.astype(np.int32), metric + ) + + # F-ordered arrays are not supported + assert not BaseDistancesReductionDispatcher.is_usable_for( + np.asfortranarray(X), Y, metric + ) + + assert BaseDistancesReductionDispatcher.is_usable_for(X_csr, Y, metric="euclidean") + assert BaseDistancesReductionDispatcher.is_usable_for( + X, Y_csr, metric="sqeuclidean" + ) + + # FIXME: the current Cython implementation is too slow for a large number of + # features. We temporarily disable it to fallback on SciPy's implementation. + # See: https://github.com/scikit-learn/scikit-learn/issues/28191 + assert not BaseDistancesReductionDispatcher.is_usable_for( + X_csr, Y_csr, metric="sqeuclidean" + ) + assert not BaseDistancesReductionDispatcher.is_usable_for( + X_csr, Y_csr, metric="euclidean" + ) + + # CSR matrices without non-zeros elements aren't currently supported + # TODO: support CSR matrices without non-zeros elements + X_csr_0_nnz = csr_container(X * 0) + assert not BaseDistancesReductionDispatcher.is_usable_for(X_csr_0_nnz, Y, metric) + + # CSR matrices with int64 indices and indptr (e.g. large nnz, or large n_features) + # aren't supported as of now. + # See: https://github.com/scikit-learn/scikit-learn/issues/23653 + # TODO: support CSR matrices with int64 indices and indptr + X_csr_int64 = csr_container(X) + X_csr_int64.indices = X_csr_int64.indices.astype(np.int64) + assert not BaseDistancesReductionDispatcher.is_usable_for(X_csr_int64, Y, metric) + + +def test_argkmin_factory_method_wrong_usages(): + rng = np.random.RandomState(1) + X = rng.rand(100, 10) + Y = rng.rand(100, 10) + k = 5 + metric = "euclidean" + + msg = ( + "Only float64 or float32 datasets pairs are supported at this time, " + "got: X.dtype=float32 and Y.dtype=float64" + ) + with pytest.raises(ValueError, match=msg): + ArgKmin.compute(X=X.astype(np.float32), Y=Y, k=k, metric=metric) + + msg = ( + "Only float64 or float32 datasets pairs are supported at this time, " + "got: X.dtype=float64 and Y.dtype=int32" + ) + with pytest.raises(ValueError, match=msg): + ArgKmin.compute(X=X, Y=Y.astype(np.int32), k=k, metric=metric) + + with pytest.raises(ValueError, match="k == -1, must be >= 1."): + ArgKmin.compute(X=X, Y=Y, k=-1, metric=metric) + + with pytest.raises(ValueError, match="k == 0, must be >= 1."): + ArgKmin.compute(X=X, Y=Y, k=0, metric=metric) + + with pytest.raises(ValueError, match="Unrecognized metric"): + ArgKmin.compute(X=X, Y=Y, k=k, metric="wrong metric") + + with pytest.raises( + ValueError, match=r"Buffer has wrong number of dimensions \(expected 2, got 1\)" + ): + ArgKmin.compute(X=np.array([1.0, 2.0]), Y=Y, k=k, metric=metric) + + with pytest.raises(ValueError, match="ndarray is not C-contiguous"): + ArgKmin.compute(X=np.asfortranarray(X), Y=Y, k=k, metric=metric) + + # A UserWarning must be raised in this case. + unused_metric_kwargs = {"p": 3} + + message = r"Some metric_kwargs have been passed \({'p': 3}\) but" + + with pytest.warns(UserWarning, match=message): + ArgKmin.compute( + X=X, Y=Y, k=k, metric=metric, metric_kwargs=unused_metric_kwargs + ) + + # A UserWarning must be raised in this case. + metric_kwargs = { + "p": 3, # unused + "Y_norm_squared": sqeuclidean_row_norms(Y, num_threads=2), + } + + message = r"Some metric_kwargs have been passed \({'p': 3, 'Y_norm_squared'" + + with pytest.warns(UserWarning, match=message): + ArgKmin.compute(X=X, Y=Y, k=k, metric=metric, metric_kwargs=metric_kwargs) + + # No user warning must be raised in this case. + metric_kwargs = { + "X_norm_squared": sqeuclidean_row_norms(X, num_threads=2), + } + with warnings.catch_warnings(): + warnings.simplefilter("error", category=UserWarning) + ArgKmin.compute(X=X, Y=Y, k=k, metric=metric, metric_kwargs=metric_kwargs) + + # No user warning must be raised in this case. + metric_kwargs = { + "X_norm_squared": sqeuclidean_row_norms(X, num_threads=2), + "Y_norm_squared": sqeuclidean_row_norms(Y, num_threads=2), + } + with warnings.catch_warnings(): + warnings.simplefilter("error", category=UserWarning) + ArgKmin.compute(X=X, Y=Y, k=k, metric=metric, metric_kwargs=metric_kwargs) + + +def test_argkmin_classmode_factory_method_wrong_usages(): + rng = np.random.RandomState(1) + X = rng.rand(100, 10) + Y = rng.rand(100, 10) + k = 5 + metric = "manhattan" + + weights = "uniform" + Y_labels = rng.randint(low=0, high=10, size=100) + unique_Y_labels = np.unique(Y_labels) + + msg = ( + "Only float64 or float32 datasets pairs are supported at this time, " + "got: X.dtype=float32 and Y.dtype=float64" + ) + with pytest.raises(ValueError, match=msg): + ArgKminClassMode.compute( + X=X.astype(np.float32), + Y=Y, + k=k, + metric=metric, + weights=weights, + Y_labels=Y_labels, + unique_Y_labels=unique_Y_labels, + ) + + msg = ( + "Only float64 or float32 datasets pairs are supported at this time, " + "got: X.dtype=float64 and Y.dtype=int32" + ) + with pytest.raises(ValueError, match=msg): + ArgKminClassMode.compute( + X=X, + Y=Y.astype(np.int32), + k=k, + metric=metric, + weights=weights, + Y_labels=Y_labels, + unique_Y_labels=unique_Y_labels, + ) + + with pytest.raises(ValueError, match="k == -1, must be >= 1."): + ArgKminClassMode.compute( + X=X, + Y=Y, + k=-1, + metric=metric, + weights=weights, + Y_labels=Y_labels, + unique_Y_labels=unique_Y_labels, + ) + + with pytest.raises(ValueError, match="k == 0, must be >= 1."): + ArgKminClassMode.compute( + X=X, + Y=Y, + k=0, + metric=metric, + weights=weights, + Y_labels=Y_labels, + unique_Y_labels=unique_Y_labels, + ) + + with pytest.raises(ValueError, match="Unrecognized metric"): + ArgKminClassMode.compute( + X=X, + Y=Y, + k=k, + metric="wrong metric", + weights=weights, + Y_labels=Y_labels, + unique_Y_labels=unique_Y_labels, + ) + + with pytest.raises( + ValueError, match=r"Buffer has wrong number of dimensions \(expected 2, got 1\)" + ): + ArgKminClassMode.compute( + X=np.array([1.0, 2.0]), + Y=Y, + k=k, + metric=metric, + weights=weights, + Y_labels=Y_labels, + unique_Y_labels=unique_Y_labels, + ) + + with pytest.raises(ValueError, match="ndarray is not C-contiguous"): + ArgKminClassMode.compute( + X=np.asfortranarray(X), + Y=Y, + k=k, + metric=metric, + weights=weights, + Y_labels=Y_labels, + unique_Y_labels=unique_Y_labels, + ) + + non_existent_weights_strategy = "non_existent_weights_strategy" + message = ( + "Only the 'uniform' or 'distance' weights options are supported at this time. " + f"Got: weights='{non_existent_weights_strategy}'." + ) + with pytest.raises(ValueError, match=message): + ArgKminClassMode.compute( + X=X, + Y=Y, + k=k, + metric=metric, + weights=non_existent_weights_strategy, + Y_labels=Y_labels, + unique_Y_labels=unique_Y_labels, + ) + + # TODO: introduce assertions on UserWarnings once the Euclidean specialisation + # of ArgKminClassMode is supported. + + +def test_radius_neighbors_factory_method_wrong_usages(): + rng = np.random.RandomState(1) + X = rng.rand(100, 10) + Y = rng.rand(100, 10) + radius = 5 + metric = "euclidean" + + msg = ( + "Only float64 or float32 datasets pairs are supported at this time, " + "got: X.dtype=float32 and Y.dtype=float64" + ) + with pytest.raises( + ValueError, + match=msg, + ): + RadiusNeighbors.compute( + X=X.astype(np.float32), Y=Y, radius=radius, metric=metric + ) + + msg = ( + "Only float64 or float32 datasets pairs are supported at this time, " + "got: X.dtype=float64 and Y.dtype=int32" + ) + with pytest.raises( + ValueError, + match=msg, + ): + RadiusNeighbors.compute(X=X, Y=Y.astype(np.int32), radius=radius, metric=metric) + + with pytest.raises(ValueError, match="radius == -1.0, must be >= 0."): + RadiusNeighbors.compute(X=X, Y=Y, radius=-1, metric=metric) + + with pytest.raises(ValueError, match="Unrecognized metric"): + RadiusNeighbors.compute(X=X, Y=Y, radius=radius, metric="wrong metric") + + with pytest.raises( + ValueError, match=r"Buffer has wrong number of dimensions \(expected 2, got 1\)" + ): + RadiusNeighbors.compute( + X=np.array([1.0, 2.0]), Y=Y, radius=radius, metric=metric + ) + + with pytest.raises(ValueError, match="ndarray is not C-contiguous"): + RadiusNeighbors.compute( + X=np.asfortranarray(X), Y=Y, radius=radius, metric=metric + ) + + unused_metric_kwargs = {"p": 3} + + # A UserWarning must be raised in this case. + message = r"Some metric_kwargs have been passed \({'p': 3}\) but" + + with pytest.warns(UserWarning, match=message): + RadiusNeighbors.compute( + X=X, Y=Y, radius=radius, metric=metric, metric_kwargs=unused_metric_kwargs + ) + + # A UserWarning must be raised in this case. + metric_kwargs = { + "p": 3, # unused + "Y_norm_squared": sqeuclidean_row_norms(Y, num_threads=2), + } + + message = r"Some metric_kwargs have been passed \({'p': 3, 'Y_norm_squared'" + + with pytest.warns(UserWarning, match=message): + RadiusNeighbors.compute( + X=X, Y=Y, radius=radius, metric=metric, metric_kwargs=metric_kwargs + ) + + # No user warning must be raised in this case. + metric_kwargs = { + "X_norm_squared": sqeuclidean_row_norms(X, num_threads=2), + "Y_norm_squared": sqeuclidean_row_norms(Y, num_threads=2), + } + with warnings.catch_warnings(): + warnings.simplefilter("error", category=UserWarning) + RadiusNeighbors.compute( + X=X, Y=Y, radius=radius, metric=metric, metric_kwargs=metric_kwargs + ) + + # No user warning must be raised in this case. + metric_kwargs = { + "X_norm_squared": sqeuclidean_row_norms(X, num_threads=2), + } + with warnings.catch_warnings(): + warnings.simplefilter("error", category=UserWarning) + RadiusNeighbors.compute( + X=X, Y=Y, radius=radius, metric=metric, metric_kwargs=metric_kwargs + ) + + +def test_radius_neighbors_classmode_factory_method_wrong_usages(): + rng = np.random.RandomState(1) + X = rng.rand(100, 10) + Y = rng.rand(100, 10) + radius = 5 + metric = "manhattan" + weights = "uniform" + Y_labels = rng.randint(low=0, high=10, size=100) + unique_Y_labels = np.unique(Y_labels) + + msg = ( + "Only float64 or float32 datasets pairs are supported at this time, " + "got: X.dtype=float32 and Y.dtype=float64" + ) + with pytest.raises(ValueError, match=msg): + RadiusNeighborsClassMode.compute( + X=X.astype(np.float32), + Y=Y, + radius=radius, + metric=metric, + weights=weights, + Y_labels=Y_labels, + unique_Y_labels=unique_Y_labels, + outlier_label=None, + ) + + msg = ( + "Only float64 or float32 datasets pairs are supported at this time, " + "got: X.dtype=float64 and Y.dtype=int32" + ) + with pytest.raises(ValueError, match=msg): + RadiusNeighborsClassMode.compute( + X=X, + Y=Y.astype(np.int32), + radius=radius, + metric=metric, + weights=weights, + Y_labels=Y_labels, + unique_Y_labels=unique_Y_labels, + outlier_label=None, + ) + + with pytest.raises(ValueError, match="radius == -1.0, must be >= 0."): + RadiusNeighborsClassMode.compute( + X=X, + Y=Y, + radius=-1, + metric=metric, + weights=weights, + Y_labels=Y_labels, + unique_Y_labels=unique_Y_labels, + outlier_label=None, + ) + + with pytest.raises(ValueError, match="Unrecognized metric"): + RadiusNeighborsClassMode.compute( + X=X, + Y=Y, + radius=-1, + metric="wrong_metric", + weights=weights, + Y_labels=Y_labels, + unique_Y_labels=unique_Y_labels, + outlier_label=None, + ) + + with pytest.raises( + ValueError, match=r"Buffer has wrong number of dimensions \(expected 2, got 1\)" + ): + RadiusNeighborsClassMode.compute( + X=np.array([1.0, 2.0]), + Y=Y, + radius=radius, + metric=metric, + weights=weights, + Y_labels=Y_labels, + unique_Y_labels=unique_Y_labels, + outlier_label=None, + ) + + with pytest.raises(ValueError, match="ndarray is not C-contiguous"): + RadiusNeighborsClassMode.compute( + X=np.asfortranarray(X), + Y=Y, + radius=radius, + metric=metric, + weights=weights, + Y_labels=Y_labels, + unique_Y_labels=unique_Y_labels, + outlier_label=None, + ) + + non_existent_weights_strategy = "non_existent_weights_strategy" + msg = ( + "Only the 'uniform' or 'distance' weights options are supported at this time. " + f"Got: weights='{non_existent_weights_strategy}'." + ) + with pytest.raises(ValueError, match=msg): + RadiusNeighborsClassMode.compute( + X=X, + Y=Y, + radius=radius, + metric="wrong_metric", + weights=non_existent_weights_strategy, + Y_labels=Y_labels, + unique_Y_labels=unique_Y_labels, + outlier_label=None, + ) + + +@pytest.mark.parametrize("Dispatcher", [ArgKmin, RadiusNeighbors]) +@pytest.mark.parametrize("dtype", [np.float64, np.float32]) +def test_chunk_size_agnosticism( + global_random_seed, + Dispatcher, + dtype, + n_features=100, +): + """Check that results do not depend on the chunk size.""" + rng = np.random.RandomState(global_random_seed) + spread = 100 + n_samples_X, n_samples_Y = rng.choice([97, 100, 101, 500], size=2, replace=False) + X = rng.rand(n_samples_X, n_features).astype(dtype) * spread + Y = rng.rand(n_samples_Y, n_features).astype(dtype) * spread + + if Dispatcher is ArgKmin: + parameter = 10 + check_parameters = {} + compute_parameters = {} + else: + radius = _non_trivial_radius(X=X, Y=Y, metric="euclidean") + parameter = radius + check_parameters = {"radius": radius} + compute_parameters = {"sort_results": True} + + ref_dist, ref_indices = Dispatcher.compute( + X, + Y, + parameter, + chunk_size=256, # default + metric="manhattan", + return_distance=True, + **compute_parameters, + ) + + dist, indices = Dispatcher.compute( + X, + Y, + parameter, + chunk_size=41, + metric="manhattan", + return_distance=True, + **compute_parameters, + ) + + ASSERT_RESULT[(Dispatcher, dtype)]( + ref_dist, dist, ref_indices, indices, **check_parameters + ) + + +@pytest.mark.parametrize("Dispatcher", [ArgKmin, RadiusNeighbors]) +@pytest.mark.parametrize("dtype", [np.float64, np.float32]) +def test_n_threads_agnosticism( + global_random_seed, + Dispatcher, + dtype, + n_features=100, +): + """Check that results do not depend on the number of threads.""" + rng = np.random.RandomState(global_random_seed) + n_samples_X, n_samples_Y = rng.choice([97, 100, 101, 500], size=2, replace=False) + spread = 100 + X = rng.rand(n_samples_X, n_features).astype(dtype) * spread + Y = rng.rand(n_samples_Y, n_features).astype(dtype) * spread + + if Dispatcher is ArgKmin: + parameter = 10 + check_parameters = {} + compute_parameters = {} + else: + radius = _non_trivial_radius(X=X, Y=Y, metric="euclidean") + parameter = radius + check_parameters = {"radius": radius} + compute_parameters = {"sort_results": True} + + ref_dist, ref_indices = Dispatcher.compute( + X, + Y, + parameter, + chunk_size=25, # make sure we use multiple threads + return_distance=True, + **compute_parameters, + ) + + with threadpoolctl.threadpool_limits(limits=1, user_api="openmp"): + dist, indices = Dispatcher.compute( + X, + Y, + parameter, + chunk_size=25, + return_distance=True, + **compute_parameters, + ) + + ASSERT_RESULT[(Dispatcher, dtype)]( + ref_dist, dist, ref_indices, indices, **check_parameters + ) + + +@pytest.mark.parametrize( + "Dispatcher, dtype", + [ + (ArgKmin, np.float64), + (RadiusNeighbors, np.float32), + (ArgKmin, np.float32), + (RadiusNeighbors, np.float64), + ], +) +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_format_agnosticism( + global_random_seed, + Dispatcher, + dtype, + csr_container, +): + """Check that results do not depend on the format (dense, sparse) of the input.""" + rng = np.random.RandomState(global_random_seed) + spread = 100 + n_samples, n_features = 100, 100 + + X = rng.rand(n_samples, n_features).astype(dtype) * spread + Y = rng.rand(n_samples, n_features).astype(dtype) * spread + + X_csr = csr_container(X) + Y_csr = csr_container(Y) + + if Dispatcher is ArgKmin: + parameter = 10 + check_parameters = {} + compute_parameters = {} + else: + # Adjusting the radius to ensure that the expected results is neither + # trivially empty nor too large. + radius = _non_trivial_radius(X=X, Y=Y, metric="euclidean") + parameter = radius + check_parameters = {"radius": radius} + compute_parameters = {"sort_results": True} + + dist_dense, indices_dense = Dispatcher.compute( + X, + Y, + parameter, + chunk_size=50, + return_distance=True, + **compute_parameters, + ) + + for _X, _Y in itertools.product((X, X_csr), (Y, Y_csr)): + if _X is X and _Y is Y: + continue + dist, indices = Dispatcher.compute( + _X, + _Y, + parameter, + chunk_size=50, + return_distance=True, + **compute_parameters, + ) + ASSERT_RESULT[(Dispatcher, dtype)]( + dist_dense, + dist, + indices_dense, + indices, + **check_parameters, + ) + + +@pytest.mark.parametrize("Dispatcher", [ArgKmin, RadiusNeighbors]) +def test_strategies_consistency( + global_random_seed, + global_dtype, + Dispatcher, + n_features=10, +): + """Check that the results do not depend on the strategy used.""" + rng = np.random.RandomState(global_random_seed) + metric = rng.choice( + np.array( + [ + "euclidean", + "minkowski", + "manhattan", + "haversine", + ], + dtype=object, + ) + ) + n_samples_X, n_samples_Y = rng.choice([97, 100, 101, 500], size=2, replace=False) + spread = 100 + X = rng.rand(n_samples_X, n_features).astype(global_dtype) * spread + Y = rng.rand(n_samples_Y, n_features).astype(global_dtype) * spread + + # Haversine distance only accepts 2D data + if metric == "haversine": + X = np.ascontiguousarray(X[:, :2]) + Y = np.ascontiguousarray(Y[:, :2]) + + if Dispatcher is ArgKmin: + parameter = 10 + check_parameters = {} + compute_parameters = {} + else: + radius = _non_trivial_radius(X=X, Y=Y, metric=metric) + parameter = radius + check_parameters = {"radius": radius} + compute_parameters = {"sort_results": True} + + dist_par_X, indices_par_X = Dispatcher.compute( + X, + Y, + parameter, + metric=metric, + # Taking the first + metric_kwargs=_get_metric_params_list( + metric, n_features, seed=global_random_seed + )[0], + # To be sure to use parallelization + chunk_size=n_samples_X // 4, + strategy="parallel_on_X", + return_distance=True, + **compute_parameters, + ) + + dist_par_Y, indices_par_Y = Dispatcher.compute( + X, + Y, + parameter, + metric=metric, + # Taking the first + metric_kwargs=_get_metric_params_list( + metric, n_features, seed=global_random_seed + )[0], + # To be sure to use parallelization + chunk_size=n_samples_Y // 4, + strategy="parallel_on_Y", + return_distance=True, + **compute_parameters, + ) + + ASSERT_RESULT[(Dispatcher, global_dtype)]( + dist_par_X, dist_par_Y, indices_par_X, indices_par_Y, **check_parameters + ) + + +# "Concrete Dispatchers"-specific tests + + +@pytest.mark.parametrize("metric", CDIST_PAIRWISE_DISTANCES_REDUCTION_COMMON_METRICS) +@pytest.mark.parametrize("strategy", ("parallel_on_X", "parallel_on_Y")) +@pytest.mark.parametrize("dtype", [np.float64, np.float32]) +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_pairwise_distances_argkmin( + global_random_seed, + metric, + strategy, + dtype, + csr_container, + n_queries=5, + n_samples=100, + k=10, +): + rng = np.random.RandomState(global_random_seed) + n_features = rng.choice([50, 500]) + translation = rng.choice([0, 1e6]) + spread = 1000 + X = translation + rng.rand(n_queries, n_features).astype(dtype) * spread + Y = translation + rng.rand(n_samples, n_features).astype(dtype) * spread + + X_csr = csr_container(X) + Y_csr = csr_container(Y) + + # Haversine distance only accepts 2D data + if metric == "haversine": + X = np.ascontiguousarray(X[:, :2]) + Y = np.ascontiguousarray(Y[:, :2]) + + metric_kwargs = _get_metric_params_list(metric, n_features)[0] + + # Reference for argkmin results + if metric == "euclidean": + # Compare to scikit-learn GEMM optimized implementation + dist_matrix = euclidean_distances(X, Y) + else: + dist_matrix = cdist(X, Y, metric=metric, **metric_kwargs) + # Taking argkmin (indices of the k smallest values) + argkmin_indices_ref = np.argsort(dist_matrix, axis=1)[:, :k] + # Getting the associated distances + argkmin_distances_ref = np.zeros(argkmin_indices_ref.shape, dtype=np.float64) + for row_idx in range(argkmin_indices_ref.shape[0]): + argkmin_distances_ref[row_idx] = dist_matrix[ + row_idx, argkmin_indices_ref[row_idx] + ] + + for _X, _Y in itertools.product((X, X_csr), (Y, Y_csr)): + argkmin_distances, argkmin_indices = ArgKmin.compute( + _X, + _Y, + k, + metric=metric, + metric_kwargs=metric_kwargs, + return_distance=True, + # So as to have more than a chunk, forcing parallelism. + chunk_size=n_samples // 4, + strategy=strategy, + ) + + ASSERT_RESULT[(ArgKmin, dtype)]( + argkmin_distances, + argkmin_distances_ref, + argkmin_indices, + argkmin_indices_ref, + ) + + +@pytest.mark.parametrize("metric", CDIST_PAIRWISE_DISTANCES_REDUCTION_COMMON_METRICS) +@pytest.mark.parametrize("strategy", ("parallel_on_X", "parallel_on_Y")) +@pytest.mark.parametrize("dtype", [np.float64, np.float32]) +def test_pairwise_distances_radius_neighbors( + global_random_seed, + metric, + strategy, + dtype, + n_queries=5, + n_samples=100, +): + rng = np.random.RandomState(global_random_seed) + n_features = rng.choice([50, 500]) + translation = rng.choice([0, 1e6]) + spread = 1000 + X = translation + rng.rand(n_queries, n_features).astype(dtype) * spread + Y = translation + rng.rand(n_samples, n_features).astype(dtype) * spread + + metric_kwargs = _get_metric_params_list( + metric, n_features, seed=global_random_seed + )[0] + + # Reference for argkmin results + if metric == "euclidean": + # Compare to scikit-learn GEMM optimized implementation + dist_matrix = euclidean_distances(X, Y) + else: + dist_matrix = cdist(X, Y, metric=metric, **metric_kwargs) + + radius = _non_trivial_radius(precomputed_dists=dist_matrix) + + # Getting the neighbors for a given radius + neigh_indices_ref = [] + neigh_distances_ref = [] + + for row in dist_matrix: + ind = np.arange(row.shape[0])[row <= radius] + dist = row[ind] + + sort = np.argsort(dist) + ind, dist = ind[sort], dist[sort] + + neigh_indices_ref.append(ind) + neigh_distances_ref.append(dist) + + neigh_distances, neigh_indices = RadiusNeighbors.compute( + X, + Y, + radius, + metric=metric, + metric_kwargs=metric_kwargs, + return_distance=True, + # So as to have more than a chunk, forcing parallelism. + chunk_size=n_samples // 4, + strategy=strategy, + sort_results=True, + ) + + ASSERT_RESULT[(RadiusNeighbors, dtype)]( + neigh_distances, neigh_distances_ref, neigh_indices, neigh_indices_ref, radius + ) + + +@pytest.mark.parametrize("Dispatcher", [ArgKmin, RadiusNeighbors]) +@pytest.mark.parametrize("metric", ["manhattan", "euclidean"]) +@pytest.mark.parametrize("dtype", [np.float64, np.float32]) +def test_memmap_backed_data( + metric, + Dispatcher, + dtype, +): + """Check that the results do not depend on the datasets writability.""" + rng = np.random.RandomState(0) + spread = 100 + n_samples, n_features = 128, 10 + X = rng.rand(n_samples, n_features).astype(dtype) * spread + Y = rng.rand(n_samples, n_features).astype(dtype) * spread + + # Create read only datasets + X_mm, Y_mm = create_memmap_backed_data([X, Y]) + + if Dispatcher is ArgKmin: + parameter = 10 + check_parameters = {} + compute_parameters = {} + else: + # Scaling the radius slightly with the numbers of dimensions + radius = 10 ** np.log(n_features) + parameter = radius + check_parameters = {"radius": radius} + compute_parameters = {"sort_results": True} + + ref_dist, ref_indices = Dispatcher.compute( + X, + Y, + parameter, + metric=metric, + return_distance=True, + **compute_parameters, + ) + + dist_mm, indices_mm = Dispatcher.compute( + X_mm, + Y_mm, + parameter, + metric=metric, + return_distance=True, + **compute_parameters, + ) + + ASSERT_RESULT[(Dispatcher, dtype)]( + ref_dist, dist_mm, ref_indices, indices_mm, **check_parameters + ) + + +@pytest.mark.parametrize("dtype", [np.float64, np.float32]) +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_sqeuclidean_row_norms( + global_random_seed, + dtype, + csr_container, +): + rng = np.random.RandomState(global_random_seed) + spread = 100 + n_samples = rng.choice([97, 100, 101, 1000]) + n_features = rng.choice([5, 10, 100]) + num_threads = rng.choice([1, 2, 8]) + X = rng.rand(n_samples, n_features).astype(dtype) * spread + + X_csr = csr_container(X) + + sq_row_norm_reference = np.linalg.norm(X, axis=1) ** 2 + sq_row_norm = sqeuclidean_row_norms(X, num_threads=num_threads) + + sq_row_norm_csr = sqeuclidean_row_norms(X_csr, num_threads=num_threads) + + assert_allclose(sq_row_norm_reference, sq_row_norm) + assert_allclose(sq_row_norm_reference, sq_row_norm_csr) + + with pytest.raises(ValueError): + X = np.asfortranarray(X) + sqeuclidean_row_norms(X, num_threads=num_threads) + + +def test_argkmin_classmode_strategy_consistent(): + rng = np.random.RandomState(1) + X = rng.rand(100, 10) + Y = rng.rand(100, 10) + k = 5 + metric = "manhattan" + + weights = "uniform" + Y_labels = rng.randint(low=0, high=10, size=100) + unique_Y_labels = np.unique(Y_labels) + results_X = ArgKminClassMode.compute( + X=X, + Y=Y, + k=k, + metric=metric, + weights=weights, + Y_labels=Y_labels, + unique_Y_labels=unique_Y_labels, + strategy="parallel_on_X", + ) + results_Y = ArgKminClassMode.compute( + X=X, + Y=Y, + k=k, + metric=metric, + weights=weights, + Y_labels=Y_labels, + unique_Y_labels=unique_Y_labels, + strategy="parallel_on_Y", + ) + assert_array_equal(results_X, results_Y) + + +@pytest.mark.parametrize("outlier_label", [None, 0, 3, 6, 9]) +def test_radius_neighbors_classmode_strategy_consistent(outlier_label): + rng = np.random.RandomState(1) + X = rng.rand(100, 10) + Y = rng.rand(100, 10) + radius = 5 + metric = "manhattan" + + weights = "uniform" + Y_labels = rng.randint(low=0, high=10, size=100) + unique_Y_labels = np.unique(Y_labels) + results_X = RadiusNeighborsClassMode.compute( + X=X, + Y=Y, + radius=radius, + metric=metric, + weights=weights, + Y_labels=Y_labels, + unique_Y_labels=unique_Y_labels, + outlier_label=outlier_label, + strategy="parallel_on_X", + ) + results_Y = RadiusNeighborsClassMode.compute( + X=X, + Y=Y, + radius=radius, + metric=metric, + weights=weights, + Y_labels=Y_labels, + unique_Y_labels=unique_Y_labels, + outlier_label=outlier_label, + strategy="parallel_on_Y", + ) + assert_allclose(results_X, results_Y) diff --git a/venv/lib/python3.10/site-packages/sklearn/metrics/tests/test_ranking.py b/venv/lib/python3.10/site-packages/sklearn/metrics/tests/test_ranking.py new file mode 100644 index 0000000000000000000000000000000000000000..d49d96e1d82d7a64949a178f9813951976d66014 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/metrics/tests/test_ranking.py @@ -0,0 +1,2244 @@ +import re +import warnings + +import numpy as np +import pytest +from scipy import stats + +from sklearn import datasets, svm +from sklearn.datasets import make_multilabel_classification +from sklearn.exceptions import UndefinedMetricWarning +from sklearn.linear_model import LogisticRegression +from sklearn.metrics import ( + accuracy_score, + auc, + average_precision_score, + coverage_error, + dcg_score, + det_curve, + label_ranking_average_precision_score, + label_ranking_loss, + ndcg_score, + precision_recall_curve, + roc_auc_score, + roc_curve, + top_k_accuracy_score, +) +from sklearn.metrics._ranking import _dcg_sample_scores, _ndcg_sample_scores +from sklearn.model_selection import train_test_split +from sklearn.preprocessing import label_binarize +from sklearn.random_projection import _sparse_random_matrix +from sklearn.utils._testing import ( + assert_allclose, + assert_almost_equal, + assert_array_almost_equal, + assert_array_equal, +) +from sklearn.utils.extmath import softmax +from sklearn.utils.fixes import CSR_CONTAINERS +from sklearn.utils.validation import ( + check_array, + check_consistent_length, + check_random_state, +) + +############################################################################### +# Utilities for testing + +CURVE_FUNCS = [ + det_curve, + precision_recall_curve, + roc_curve, +] + + +def make_prediction(dataset=None, binary=False): + """Make some classification predictions on a toy dataset using a SVC + + If binary is True restrict to a binary classification problem instead of a + multiclass classification problem + """ + + if dataset is None: + # import some data to play with + dataset = datasets.load_iris() + + X = dataset.data + y = dataset.target + + if binary: + # restrict to a binary classification task + X, y = X[y < 2], y[y < 2] + + n_samples, n_features = X.shape + p = np.arange(n_samples) + + rng = check_random_state(37) + rng.shuffle(p) + X, y = X[p], y[p] + half = int(n_samples / 2) + + # add noisy features to make the problem harder and avoid perfect results + rng = np.random.RandomState(0) + X = np.c_[X, rng.randn(n_samples, 200 * n_features)] + + # run classifier, get class probabilities and label predictions + clf = svm.SVC(kernel="linear", probability=True, random_state=0) + y_score = clf.fit(X[:half], y[:half]).predict_proba(X[half:]) + + if binary: + # only interested in probabilities of the positive case + # XXX: do we really want a special API for the binary case? + y_score = y_score[:, 1] + + y_pred = clf.predict(X[half:]) + y_true = y[half:] + return y_true, y_pred, y_score + + +############################################################################### +# Tests + + +def _auc(y_true, y_score): + """Alternative implementation to check for correctness of + `roc_auc_score`.""" + pos_label = np.unique(y_true)[1] + + # Count the number of times positive samples are correctly ranked above + # negative samples. + pos = y_score[y_true == pos_label] + neg = y_score[y_true != pos_label] + diff_matrix = pos.reshape(1, -1) - neg.reshape(-1, 1) + n_correct = np.sum(diff_matrix > 0) + + return n_correct / float(len(pos) * len(neg)) + + +def _average_precision(y_true, y_score): + """Alternative implementation to check for correctness of + `average_precision_score`. + + Note that this implementation fails on some edge cases. + For example, for constant predictions e.g. [0.5, 0.5, 0.5], + y_true = [1, 0, 0] returns an average precision of 0.33... + but y_true = [0, 0, 1] returns 1.0. + """ + pos_label = np.unique(y_true)[1] + n_pos = np.sum(y_true == pos_label) + order = np.argsort(y_score)[::-1] + y_score = y_score[order] + y_true = y_true[order] + + score = 0 + for i in range(len(y_score)): + if y_true[i] == pos_label: + # Compute precision up to document i + # i.e, percentage of relevant documents up to document i. + prec = 0 + for j in range(0, i + 1): + if y_true[j] == pos_label: + prec += 1.0 + prec /= i + 1.0 + score += prec + + return score / n_pos + + +def _average_precision_slow(y_true, y_score): + """A second alternative implementation of average precision that closely + follows the Wikipedia article's definition (see References). This should + give identical results as `average_precision_score` for all inputs. + + References + ---------- + .. [1] `Wikipedia entry for the Average precision + `_ + """ + precision, recall, threshold = precision_recall_curve(y_true, y_score) + precision = list(reversed(precision)) + recall = list(reversed(recall)) + average_precision = 0 + for i in range(1, len(precision)): + average_precision += precision[i] * (recall[i] - recall[i - 1]) + return average_precision + + +def _partial_roc_auc_score(y_true, y_predict, max_fpr): + """Alternative implementation to check for correctness of `roc_auc_score` + with `max_fpr` set. + """ + + def _partial_roc(y_true, y_predict, max_fpr): + fpr, tpr, _ = roc_curve(y_true, y_predict) + new_fpr = fpr[fpr <= max_fpr] + new_fpr = np.append(new_fpr, max_fpr) + new_tpr = tpr[fpr <= max_fpr] + idx_out = np.argmax(fpr > max_fpr) + idx_in = idx_out - 1 + x_interp = [fpr[idx_in], fpr[idx_out]] + y_interp = [tpr[idx_in], tpr[idx_out]] + new_tpr = np.append(new_tpr, np.interp(max_fpr, x_interp, y_interp)) + return (new_fpr, new_tpr) + + new_fpr, new_tpr = _partial_roc(y_true, y_predict, max_fpr) + partial_auc = auc(new_fpr, new_tpr) + + # Formula (5) from McClish 1989 + fpr1 = 0 + fpr2 = max_fpr + min_area = 0.5 * (fpr2 - fpr1) * (fpr2 + fpr1) + max_area = fpr2 - fpr1 + return 0.5 * (1 + (partial_auc - min_area) / (max_area - min_area)) + + +@pytest.mark.parametrize("drop", [True, False]) +def test_roc_curve(drop): + # Test Area under Receiver Operating Characteristic (ROC) curve + y_true, _, y_score = make_prediction(binary=True) + expected_auc = _auc(y_true, y_score) + + fpr, tpr, thresholds = roc_curve(y_true, y_score, drop_intermediate=drop) + roc_auc = auc(fpr, tpr) + assert_array_almost_equal(roc_auc, expected_auc, decimal=2) + assert_almost_equal(roc_auc, roc_auc_score(y_true, y_score)) + assert fpr.shape == tpr.shape + assert fpr.shape == thresholds.shape + + +def test_roc_curve_end_points(): + # Make sure that roc_curve returns a curve start at 0 and ending and + # 1 even in corner cases + rng = np.random.RandomState(0) + y_true = np.array([0] * 50 + [1] * 50) + y_pred = rng.randint(3, size=100) + fpr, tpr, thr = roc_curve(y_true, y_pred, drop_intermediate=True) + assert fpr[0] == 0 + assert fpr[-1] == 1 + assert fpr.shape == tpr.shape + assert fpr.shape == thr.shape + + +def test_roc_returns_consistency(): + # Test whether the returned threshold matches up with tpr + # make small toy dataset + y_true, _, y_score = make_prediction(binary=True) + fpr, tpr, thresholds = roc_curve(y_true, y_score) + + # use the given thresholds to determine the tpr + tpr_correct = [] + for t in thresholds: + tp = np.sum((y_score >= t) & y_true) + p = np.sum(y_true) + tpr_correct.append(1.0 * tp / p) + + # compare tpr and tpr_correct to see if the thresholds' order was correct + assert_array_almost_equal(tpr, tpr_correct, decimal=2) + assert fpr.shape == tpr.shape + assert fpr.shape == thresholds.shape + + +def test_roc_curve_multi(): + # roc_curve not applicable for multi-class problems + y_true, _, y_score = make_prediction(binary=False) + + with pytest.raises(ValueError): + roc_curve(y_true, y_score) + + +def test_roc_curve_confidence(): + # roc_curve for confidence scores + y_true, _, y_score = make_prediction(binary=True) + + fpr, tpr, thresholds = roc_curve(y_true, y_score - 0.5) + roc_auc = auc(fpr, tpr) + assert_array_almost_equal(roc_auc, 0.90, decimal=2) + assert fpr.shape == tpr.shape + assert fpr.shape == thresholds.shape + + +def test_roc_curve_hard(): + # roc_curve for hard decisions + y_true, pred, y_score = make_prediction(binary=True) + + # always predict one + trivial_pred = np.ones(y_true.shape) + fpr, tpr, thresholds = roc_curve(y_true, trivial_pred) + roc_auc = auc(fpr, tpr) + assert_array_almost_equal(roc_auc, 0.50, decimal=2) + assert fpr.shape == tpr.shape + assert fpr.shape == thresholds.shape + + # always predict zero + trivial_pred = np.zeros(y_true.shape) + fpr, tpr, thresholds = roc_curve(y_true, trivial_pred) + roc_auc = auc(fpr, tpr) + assert_array_almost_equal(roc_auc, 0.50, decimal=2) + assert fpr.shape == tpr.shape + assert fpr.shape == thresholds.shape + + # hard decisions + fpr, tpr, thresholds = roc_curve(y_true, pred) + roc_auc = auc(fpr, tpr) + assert_array_almost_equal(roc_auc, 0.78, decimal=2) + assert fpr.shape == tpr.shape + assert fpr.shape == thresholds.shape + + +def test_roc_curve_one_label(): + y_true = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1] + y_pred = [0, 1, 0, 1, 0, 1, 0, 1, 0, 1] + # assert there are warnings + expected_message = ( + "No negative samples in y_true, false positive value should be meaningless" + ) + with pytest.warns(UndefinedMetricWarning, match=expected_message): + fpr, tpr, thresholds = roc_curve(y_true, y_pred) + + # all true labels, all fpr should be nan + assert_array_equal(fpr, np.full(len(thresholds), np.nan)) + assert fpr.shape == tpr.shape + assert fpr.shape == thresholds.shape + + # assert there are warnings + expected_message = ( + "No positive samples in y_true, true positive value should be meaningless" + ) + with pytest.warns(UndefinedMetricWarning, match=expected_message): + fpr, tpr, thresholds = roc_curve([1 - x for x in y_true], y_pred) + # all negative labels, all tpr should be nan + assert_array_equal(tpr, np.full(len(thresholds), np.nan)) + assert fpr.shape == tpr.shape + assert fpr.shape == thresholds.shape + + +def test_roc_curve_toydata(): + # Binary classification + y_true = [0, 1] + y_score = [0, 1] + tpr, fpr, _ = roc_curve(y_true, y_score) + roc_auc = roc_auc_score(y_true, y_score) + assert_array_almost_equal(tpr, [0, 0, 1]) + assert_array_almost_equal(fpr, [0, 1, 1]) + assert_almost_equal(roc_auc, 1.0) + + y_true = [0, 1] + y_score = [1, 0] + tpr, fpr, _ = roc_curve(y_true, y_score) + roc_auc = roc_auc_score(y_true, y_score) + assert_array_almost_equal(tpr, [0, 1, 1]) + assert_array_almost_equal(fpr, [0, 0, 1]) + assert_almost_equal(roc_auc, 0.0) + + y_true = [1, 0] + y_score = [1, 1] + tpr, fpr, _ = roc_curve(y_true, y_score) + roc_auc = roc_auc_score(y_true, y_score) + assert_array_almost_equal(tpr, [0, 1]) + assert_array_almost_equal(fpr, [0, 1]) + assert_almost_equal(roc_auc, 0.5) + + y_true = [1, 0] + y_score = [1, 0] + tpr, fpr, _ = roc_curve(y_true, y_score) + roc_auc = roc_auc_score(y_true, y_score) + assert_array_almost_equal(tpr, [0, 0, 1]) + assert_array_almost_equal(fpr, [0, 1, 1]) + assert_almost_equal(roc_auc, 1.0) + + y_true = [1, 0] + y_score = [0.5, 0.5] + tpr, fpr, _ = roc_curve(y_true, y_score) + roc_auc = roc_auc_score(y_true, y_score) + assert_array_almost_equal(tpr, [0, 1]) + assert_array_almost_equal(fpr, [0, 1]) + assert_almost_equal(roc_auc, 0.5) + + y_true = [0, 0] + y_score = [0.25, 0.75] + # assert UndefinedMetricWarning because of no positive sample in y_true + expected_message = ( + "No positive samples in y_true, true positive value should be meaningless" + ) + with pytest.warns(UndefinedMetricWarning, match=expected_message): + tpr, fpr, _ = roc_curve(y_true, y_score) + + with pytest.raises(ValueError): + roc_auc_score(y_true, y_score) + assert_array_almost_equal(tpr, [0.0, 0.5, 1.0]) + assert_array_almost_equal(fpr, [np.nan, np.nan, np.nan]) + + y_true = [1, 1] + y_score = [0.25, 0.75] + # assert UndefinedMetricWarning because of no negative sample in y_true + expected_message = ( + "No negative samples in y_true, false positive value should be meaningless" + ) + with pytest.warns(UndefinedMetricWarning, match=expected_message): + tpr, fpr, _ = roc_curve(y_true, y_score) + + with pytest.raises(ValueError): + roc_auc_score(y_true, y_score) + assert_array_almost_equal(tpr, [np.nan, np.nan, np.nan]) + assert_array_almost_equal(fpr, [0.0, 0.5, 1.0]) + + # Multi-label classification task + y_true = np.array([[0, 1], [0, 1]]) + y_score = np.array([[0, 1], [0, 1]]) + with pytest.raises(ValueError): + roc_auc_score(y_true, y_score, average="macro") + with pytest.raises(ValueError): + roc_auc_score(y_true, y_score, average="weighted") + assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 1.0) + assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 1.0) + + y_true = np.array([[0, 1], [0, 1]]) + y_score = np.array([[0, 1], [1, 0]]) + with pytest.raises(ValueError): + roc_auc_score(y_true, y_score, average="macro") + with pytest.raises(ValueError): + roc_auc_score(y_true, y_score, average="weighted") + assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 0.5) + assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 0.5) + + y_true = np.array([[1, 0], [0, 1]]) + y_score = np.array([[0, 1], [1, 0]]) + assert_almost_equal(roc_auc_score(y_true, y_score, average="macro"), 0) + assert_almost_equal(roc_auc_score(y_true, y_score, average="weighted"), 0) + assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 0) + assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 0) + + y_true = np.array([[1, 0], [0, 1]]) + y_score = np.array([[0.5, 0.5], [0.5, 0.5]]) + assert_almost_equal(roc_auc_score(y_true, y_score, average="macro"), 0.5) + assert_almost_equal(roc_auc_score(y_true, y_score, average="weighted"), 0.5) + assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 0.5) + assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 0.5) + + +def test_roc_curve_drop_intermediate(): + # Test that drop_intermediate drops the correct thresholds + y_true = [0, 0, 0, 0, 1, 1] + y_score = [0.0, 0.2, 0.5, 0.6, 0.7, 1.0] + tpr, fpr, thresholds = roc_curve(y_true, y_score, drop_intermediate=True) + assert_array_almost_equal(thresholds, [np.inf, 1.0, 0.7, 0.0]) + + # Test dropping thresholds with repeating scores + y_true = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1] + y_score = [0.0, 0.1, 0.6, 0.6, 0.7, 0.8, 0.9, 0.6, 0.7, 0.8, 0.9, 0.9, 1.0] + tpr, fpr, thresholds = roc_curve(y_true, y_score, drop_intermediate=True) + assert_array_almost_equal(thresholds, [np.inf, 1.0, 0.9, 0.7, 0.6, 0.0]) + + +def test_roc_curve_fpr_tpr_increasing(): + # Ensure that fpr and tpr returned by roc_curve are increasing. + # Construct an edge case with float y_score and sample_weight + # when some adjacent values of fpr and tpr are actually the same. + y_true = [0, 0, 1, 1, 1] + y_score = [0.1, 0.7, 0.3, 0.4, 0.5] + sample_weight = np.repeat(0.2, 5) + fpr, tpr, _ = roc_curve(y_true, y_score, sample_weight=sample_weight) + assert (np.diff(fpr) < 0).sum() == 0 + assert (np.diff(tpr) < 0).sum() == 0 + + +def test_auc(): + # Test Area Under Curve (AUC) computation + x = [0, 1] + y = [0, 1] + assert_array_almost_equal(auc(x, y), 0.5) + x = [1, 0] + y = [0, 1] + assert_array_almost_equal(auc(x, y), 0.5) + x = [1, 0, 0] + y = [0, 1, 1] + assert_array_almost_equal(auc(x, y), 0.5) + x = [0, 1] + y = [1, 1] + assert_array_almost_equal(auc(x, y), 1) + x = [0, 0.5, 1] + y = [0, 0.5, 1] + assert_array_almost_equal(auc(x, y), 0.5) + + +def test_auc_errors(): + # Incompatible shapes + with pytest.raises(ValueError): + auc([0.0, 0.5, 1.0], [0.1, 0.2]) + + # Too few x values + with pytest.raises(ValueError): + auc([0.0], [0.1]) + + # x is not in order + x = [2, 1, 3, 4] + y = [5, 6, 7, 8] + error_message = "x is neither increasing nor decreasing : {}".format(np.array(x)) + with pytest.raises(ValueError, match=re.escape(error_message)): + auc(x, y) + + +@pytest.mark.parametrize( + "y_true, labels", + [ + (np.array([0, 1, 0, 2]), [0, 1, 2]), + (np.array([0, 1, 0, 2]), None), + (["a", "b", "a", "c"], ["a", "b", "c"]), + (["a", "b", "a", "c"], None), + ], +) +def test_multiclass_ovo_roc_auc_toydata(y_true, labels): + # Tests the one-vs-one multiclass ROC AUC algorithm + # on a small example, representative of an expected use case. + y_scores = np.array( + [[0.1, 0.8, 0.1], [0.3, 0.4, 0.3], [0.35, 0.5, 0.15], [0, 0.2, 0.8]] + ) + + # Used to compute the expected output. + # Consider labels 0 and 1: + # positive label is 0, negative label is 1 + score_01 = roc_auc_score([1, 0, 1], [0.1, 0.3, 0.35]) + # positive label is 1, negative label is 0 + score_10 = roc_auc_score([0, 1, 0], [0.8, 0.4, 0.5]) + average_score_01 = (score_01 + score_10) / 2 + + # Consider labels 0 and 2: + score_02 = roc_auc_score([1, 1, 0], [0.1, 0.35, 0]) + score_20 = roc_auc_score([0, 0, 1], [0.1, 0.15, 0.8]) + average_score_02 = (score_02 + score_20) / 2 + + # Consider labels 1 and 2: + score_12 = roc_auc_score([1, 0], [0.4, 0.2]) + score_21 = roc_auc_score([0, 1], [0.3, 0.8]) + average_score_12 = (score_12 + score_21) / 2 + + # Unweighted, one-vs-one multiclass ROC AUC algorithm + ovo_unweighted_score = (average_score_01 + average_score_02 + average_score_12) / 3 + assert_almost_equal( + roc_auc_score(y_true, y_scores, labels=labels, multi_class="ovo"), + ovo_unweighted_score, + ) + + # Weighted, one-vs-one multiclass ROC AUC algorithm + # Each term is weighted by the prevalence for the positive label. + pair_scores = [average_score_01, average_score_02, average_score_12] + prevalence = [0.75, 0.75, 0.50] + ovo_weighted_score = np.average(pair_scores, weights=prevalence) + assert_almost_equal( + roc_auc_score( + y_true, y_scores, labels=labels, multi_class="ovo", average="weighted" + ), + ovo_weighted_score, + ) + + # Check that average=None raises NotImplemented error + error_message = "average=None is not implemented for multi_class='ovo'." + with pytest.raises(NotImplementedError, match=error_message): + roc_auc_score(y_true, y_scores, labels=labels, multi_class="ovo", average=None) + + +@pytest.mark.parametrize( + "y_true, labels", + [ + (np.array([0, 2, 0, 2]), [0, 1, 2]), + (np.array(["a", "d", "a", "d"]), ["a", "b", "d"]), + ], +) +def test_multiclass_ovo_roc_auc_toydata_binary(y_true, labels): + # Tests the one-vs-one multiclass ROC AUC algorithm for binary y_true + # + # on a small example, representative of an expected use case. + y_scores = np.array( + [[0.2, 0.0, 0.8], [0.6, 0.0, 0.4], [0.55, 0.0, 0.45], [0.4, 0.0, 0.6]] + ) + + # Used to compute the expected output. + # Consider labels 0 and 1: + # positive label is 0, negative label is 1 + score_01 = roc_auc_score([1, 0, 1, 0], [0.2, 0.6, 0.55, 0.4]) + # positive label is 1, negative label is 0 + score_10 = roc_auc_score([0, 1, 0, 1], [0.8, 0.4, 0.45, 0.6]) + ovo_score = (score_01 + score_10) / 2 + + assert_almost_equal( + roc_auc_score(y_true, y_scores, labels=labels, multi_class="ovo"), ovo_score + ) + + # Weighted, one-vs-one multiclass ROC AUC algorithm + assert_almost_equal( + roc_auc_score( + y_true, y_scores, labels=labels, multi_class="ovo", average="weighted" + ), + ovo_score, + ) + + +@pytest.mark.parametrize( + "y_true, labels", + [ + (np.array([0, 1, 2, 2]), None), + (["a", "b", "c", "c"], None), + ([0, 1, 2, 2], [0, 1, 2]), + (["a", "b", "c", "c"], ["a", "b", "c"]), + ], +) +def test_multiclass_ovr_roc_auc_toydata(y_true, labels): + # Tests the unweighted, one-vs-rest multiclass ROC AUC algorithm + # on a small example, representative of an expected use case. + y_scores = np.array( + [[1.0, 0.0, 0.0], [0.1, 0.5, 0.4], [0.1, 0.1, 0.8], [0.3, 0.3, 0.4]] + ) + # Compute the expected result by individually computing the 'one-vs-rest' + # ROC AUC scores for classes 0, 1, and 2. + out_0 = roc_auc_score([1, 0, 0, 0], y_scores[:, 0]) + out_1 = roc_auc_score([0, 1, 0, 0], y_scores[:, 1]) + out_2 = roc_auc_score([0, 0, 1, 1], y_scores[:, 2]) + assert_almost_equal( + roc_auc_score(y_true, y_scores, multi_class="ovr", labels=labels, average=None), + [out_0, out_1, out_2], + ) + + # Compute unweighted results (default behaviour is average="macro") + result_unweighted = (out_0 + out_1 + out_2) / 3.0 + assert_almost_equal( + roc_auc_score(y_true, y_scores, multi_class="ovr", labels=labels), + result_unweighted, + ) + + # Tests the weighted, one-vs-rest multiclass ROC AUC algorithm + # on the same input (Provost & Domingos, 2000) + result_weighted = out_0 * 0.25 + out_1 * 0.25 + out_2 * 0.5 + assert_almost_equal( + roc_auc_score( + y_true, y_scores, multi_class="ovr", labels=labels, average="weighted" + ), + result_weighted, + ) + + +@pytest.mark.parametrize( + "multi_class, average", + [ + ("ovr", "macro"), + ("ovr", "micro"), + ("ovo", "macro"), + ], +) +def test_perfect_imperfect_chance_multiclass_roc_auc(multi_class, average): + y_true = np.array([3, 1, 2, 0]) + + # Perfect classifier (from a ranking point of view) has roc_auc_score = 1.0 + y_perfect = [ + [0.0, 0.0, 0.0, 1.0], + [0.0, 1.0, 0.0, 0.0], + [0.0, 0.0, 1.0, 0.0], + [0.75, 0.05, 0.05, 0.15], + ] + assert_almost_equal( + roc_auc_score(y_true, y_perfect, multi_class=multi_class, average=average), + 1.0, + ) + + # Imperfect classifier has roc_auc_score < 1.0 + y_imperfect = [ + [0.0, 0.0, 0.0, 1.0], + [0.0, 1.0, 0.0, 0.0], + [0.0, 0.0, 1.0, 0.0], + [0.0, 0.0, 0.0, 1.0], + ] + assert ( + roc_auc_score(y_true, y_imperfect, multi_class=multi_class, average=average) + < 1.0 + ) + + # Chance level classifier has roc_auc_score = 5.0 + y_chance = 0.25 * np.ones((4, 4)) + assert roc_auc_score( + y_true, y_chance, multi_class=multi_class, average=average + ) == pytest.approx(0.5) + + +def test_micro_averaged_ovr_roc_auc(global_random_seed): + seed = global_random_seed + # Let's generate a set of random predictions and matching true labels such + # that the predictions are not perfect. To make the problem more interesting, + # we use an imbalanced class distribution (by using different parameters + # in the Dirichlet prior (conjugate prior of the multinomial distribution). + y_pred = stats.dirichlet.rvs([2.0, 1.0, 0.5], size=1000, random_state=seed) + y_true = np.asarray( + [ + stats.multinomial.rvs(n=1, p=y_pred_i, random_state=seed).argmax() + for y_pred_i in y_pred + ] + ) + y_onehot = label_binarize(y_true, classes=[0, 1, 2]) + fpr, tpr, _ = roc_curve(y_onehot.ravel(), y_pred.ravel()) + roc_auc_by_hand = auc(fpr, tpr) + roc_auc_auto = roc_auc_score(y_true, y_pred, multi_class="ovr", average="micro") + assert roc_auc_by_hand == pytest.approx(roc_auc_auto) + + +@pytest.mark.parametrize( + "msg, y_true, labels", + [ + ("Parameter 'labels' must be unique", np.array([0, 1, 2, 2]), [0, 2, 0]), + ( + "Parameter 'labels' must be unique", + np.array(["a", "b", "c", "c"]), + ["a", "a", "b"], + ), + ( + ( + "Number of classes in y_true not equal to the number of columns " + "in 'y_score'" + ), + np.array([0, 2, 0, 2]), + None, + ), + ( + "Parameter 'labels' must be ordered", + np.array(["a", "b", "c", "c"]), + ["a", "c", "b"], + ), + ( + ( + "Number of given labels, 2, not equal to the number of columns in " + "'y_score', 3" + ), + np.array([0, 1, 2, 2]), + [0, 1], + ), + ( + ( + "Number of given labels, 2, not equal to the number of columns in " + "'y_score', 3" + ), + np.array(["a", "b", "c", "c"]), + ["a", "b"], + ), + ( + ( + "Number of given labels, 4, not equal to the number of columns in " + "'y_score', 3" + ), + np.array([0, 1, 2, 2]), + [0, 1, 2, 3], + ), + ( + ( + "Number of given labels, 4, not equal to the number of columns in " + "'y_score', 3" + ), + np.array(["a", "b", "c", "c"]), + ["a", "b", "c", "d"], + ), + ( + "'y_true' contains labels not in parameter 'labels'", + np.array(["a", "b", "c", "e"]), + ["a", "b", "c"], + ), + ( + "'y_true' contains labels not in parameter 'labels'", + np.array(["a", "b", "c", "d"]), + ["a", "b", "c"], + ), + ( + "'y_true' contains labels not in parameter 'labels'", + np.array([0, 1, 2, 3]), + [0, 1, 2], + ), + ], +) +@pytest.mark.parametrize("multi_class", ["ovo", "ovr"]) +def test_roc_auc_score_multiclass_labels_error(msg, y_true, labels, multi_class): + y_scores = np.array( + [[0.1, 0.8, 0.1], [0.3, 0.4, 0.3], [0.35, 0.5, 0.15], [0, 0.2, 0.8]] + ) + + with pytest.raises(ValueError, match=msg): + roc_auc_score(y_true, y_scores, labels=labels, multi_class=multi_class) + + +@pytest.mark.parametrize( + "msg, kwargs", + [ + ( + ( + r"average must be one of \('macro', 'weighted', None\) for " + r"multiclass problems" + ), + {"average": "samples", "multi_class": "ovo"}, + ), + ( + ( + r"average must be one of \('micro', 'macro', 'weighted', None\) for " + r"multiclass problems" + ), + {"average": "samples", "multi_class": "ovr"}, + ), + ( + ( + r"sample_weight is not supported for multiclass one-vs-one " + r"ROC AUC, 'sample_weight' must be None in this case" + ), + {"multi_class": "ovo", "sample_weight": []}, + ), + ( + ( + r"Partial AUC computation not available in multiclass setting, " + r"'max_fpr' must be set to `None`, received `max_fpr=0.5` " + r"instead" + ), + {"multi_class": "ovo", "max_fpr": 0.5}, + ), + (r"multi_class must be in \('ovo', 'ovr'\)", {}), + ], +) +def test_roc_auc_score_multiclass_error(msg, kwargs): + # Test that roc_auc_score function returns an error when trying + # to compute multiclass AUC for parameters where an output + # is not defined. + rng = check_random_state(404) + y_score = rng.rand(20, 3) + y_prob = softmax(y_score) + y_true = rng.randint(0, 3, size=20) + with pytest.raises(ValueError, match=msg): + roc_auc_score(y_true, y_prob, **kwargs) + + +def test_auc_score_non_binary_class(): + # Test that roc_auc_score function returns an error when trying + # to compute AUC for non-binary class values. + rng = check_random_state(404) + y_pred = rng.rand(10) + # y_true contains only one class value + y_true = np.zeros(10, dtype="int") + err_msg = "ROC AUC score is not defined" + with pytest.raises(ValueError, match=err_msg): + roc_auc_score(y_true, y_pred) + y_true = np.ones(10, dtype="int") + with pytest.raises(ValueError, match=err_msg): + roc_auc_score(y_true, y_pred) + y_true = np.full(10, -1, dtype="int") + with pytest.raises(ValueError, match=err_msg): + roc_auc_score(y_true, y_pred) + + with warnings.catch_warnings(record=True): + rng = check_random_state(404) + y_pred = rng.rand(10) + # y_true contains only one class value + y_true = np.zeros(10, dtype="int") + with pytest.raises(ValueError, match=err_msg): + roc_auc_score(y_true, y_pred) + y_true = np.ones(10, dtype="int") + with pytest.raises(ValueError, match=err_msg): + roc_auc_score(y_true, y_pred) + y_true = np.full(10, -1, dtype="int") + with pytest.raises(ValueError, match=err_msg): + roc_auc_score(y_true, y_pred) + + +@pytest.mark.parametrize("curve_func", CURVE_FUNCS) +def test_binary_clf_curve_multiclass_error(curve_func): + rng = check_random_state(404) + y_true = rng.randint(0, 3, size=10) + y_pred = rng.rand(10) + msg = "multiclass format is not supported" + with pytest.raises(ValueError, match=msg): + curve_func(y_true, y_pred) + + +@pytest.mark.parametrize("curve_func", CURVE_FUNCS) +def test_binary_clf_curve_implicit_pos_label(curve_func): + # Check that using string class labels raises an informative + # error for any supported string dtype: + msg = ( + "y_true takes value in {'a', 'b'} and pos_label is " + "not specified: either make y_true take " + "value in {0, 1} or {-1, 1} or pass pos_label " + "explicitly." + ) + with pytest.raises(ValueError, match=msg): + curve_func(np.array(["a", "b"], dtype="= 0 and y_score.max() <= 1 else 0 + y_pred = (y_score > threshold).astype(np.int64) if k == 1 else y_true + + score = top_k_accuracy_score(y_true, y_score, k=k) + score_acc = accuracy_score(y_true, y_pred) + + assert score == score_acc == pytest.approx(true_score) + + +@pytest.mark.parametrize( + "y_true, true_score, labels", + [ + (np.array([0, 1, 1, 2]), 0.75, [0, 1, 2, 3]), + (np.array([0, 1, 1, 1]), 0.5, [0, 1, 2, 3]), + (np.array([1, 1, 1, 1]), 0.5, [0, 1, 2, 3]), + (np.array(["a", "e", "e", "a"]), 0.75, ["a", "b", "d", "e"]), + ], +) +@pytest.mark.parametrize("labels_as_ndarray", [True, False]) +def test_top_k_accuracy_score_multiclass_with_labels( + y_true, true_score, labels, labels_as_ndarray +): + """Test when labels and y_score are multiclass.""" + if labels_as_ndarray: + labels = np.asarray(labels) + y_score = np.array( + [ + [0.4, 0.3, 0.2, 0.1], + [0.1, 0.3, 0.4, 0.2], + [0.4, 0.1, 0.2, 0.3], + [0.3, 0.2, 0.4, 0.1], + ] + ) + + score = top_k_accuracy_score(y_true, y_score, k=2, labels=labels) + assert score == pytest.approx(true_score) + + +def test_top_k_accuracy_score_increasing(): + # Make sure increasing k leads to a higher score + X, y = datasets.make_classification( + n_classes=10, n_samples=1000, n_informative=10, random_state=0 + ) + + X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) + + clf = LogisticRegression(random_state=0) + clf.fit(X_train, y_train) + + for X, y in zip((X_train, X_test), (y_train, y_test)): + scores = [ + top_k_accuracy_score(y, clf.predict_proba(X), k=k) for k in range(2, 10) + ] + + assert np.all(np.diff(scores) > 0) + + +@pytest.mark.parametrize( + "y_true, k, true_score", + [ + ([0, 1, 2, 3], 1, 0.25), + ([0, 1, 2, 3], 2, 0.5), + ([0, 1, 2, 3], 3, 1), + ], +) +def test_top_k_accuracy_score_ties(y_true, k, true_score): + # Make sure highest indices labels are chosen first in case of ties + y_score = np.array( + [ + [5, 5, 7, 0], + [1, 5, 5, 5], + [0, 0, 3, 3], + [1, 1, 1, 1], + ] + ) + assert top_k_accuracy_score(y_true, y_score, k=k) == pytest.approx(true_score) + + +@pytest.mark.parametrize( + "y_true, k", + [ + ([0, 1, 2, 3], 4), + ([0, 1, 2, 3], 5), + ], +) +def test_top_k_accuracy_score_warning(y_true, k): + y_score = np.array( + [ + [0.4, 0.3, 0.2, 0.1], + [0.1, 0.4, 0.3, 0.2], + [0.2, 0.1, 0.4, 0.3], + [0.3, 0.2, 0.1, 0.4], + ] + ) + expected_message = ( + r"'k' \(\d+\) greater than or equal to 'n_classes' \(\d+\) will result in a " + "perfect score and is therefore meaningless." + ) + with pytest.warns(UndefinedMetricWarning, match=expected_message): + score = top_k_accuracy_score(y_true, y_score, k=k) + assert score == 1 + + +@pytest.mark.parametrize( + "y_true, y_score, labels, msg", + [ + ( + [0, 0.57, 1, 2], + [ + [0.2, 0.1, 0.7], + [0.4, 0.3, 0.3], + [0.3, 0.4, 0.3], + [0.4, 0.5, 0.1], + ], + None, + "y type must be 'binary' or 'multiclass', got 'continuous'", + ), + ( + [0, 1, 2, 3], + [ + [0.2, 0.1, 0.7], + [0.4, 0.3, 0.3], + [0.3, 0.4, 0.3], + [0.4, 0.5, 0.1], + ], + None, + r"Number of classes in 'y_true' \(4\) not equal to the number of " + r"classes in 'y_score' \(3\).", + ), + ( + ["c", "c", "a", "b"], + [ + [0.2, 0.1, 0.7], + [0.4, 0.3, 0.3], + [0.3, 0.4, 0.3], + [0.4, 0.5, 0.1], + ], + ["a", "b", "c", "c"], + "Parameter 'labels' must be unique.", + ), + ( + ["c", "c", "a", "b"], + [ + [0.2, 0.1, 0.7], + [0.4, 0.3, 0.3], + [0.3, 0.4, 0.3], + [0.4, 0.5, 0.1], + ], + ["a", "c", "b"], + "Parameter 'labels' must be ordered.", + ), + ( + [0, 0, 1, 2], + [ + [0.2, 0.1, 0.7], + [0.4, 0.3, 0.3], + [0.3, 0.4, 0.3], + [0.4, 0.5, 0.1], + ], + [0, 1, 2, 3], + r"Number of given labels \(4\) not equal to the number of classes in " + r"'y_score' \(3\).", + ), + ( + [0, 0, 1, 2], + [ + [0.2, 0.1, 0.7], + [0.4, 0.3, 0.3], + [0.3, 0.4, 0.3], + [0.4, 0.5, 0.1], + ], + [0, 1, 3], + "'y_true' contains labels not in parameter 'labels'.", + ), + ( + [0, 1], + [[0.5, 0.2, 0.2], [0.3, 0.4, 0.2]], + None, + ( + "`y_true` is binary while y_score is 2d with 3 classes. If" + " `y_true` does not contain all the labels, `labels` must be provided" + ), + ), + ], +) +def test_top_k_accuracy_score_error(y_true, y_score, labels, msg): + with pytest.raises(ValueError, match=msg): + top_k_accuracy_score(y_true, y_score, k=2, labels=labels) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_label_ranking_avg_precision_score_should_allow_csr_matrix_for_y_true_input( + csr_container, +): + # Test that label_ranking_avg_precision_score accept sparse y_true. + # Non-regression test for #22575 + y_true = csr_container([[1, 0, 0], [0, 0, 1]]) + y_score = np.array([[0.5, 0.9, 0.6], [0, 0, 1]]) + result = label_ranking_average_precision_score(y_true, y_score) + assert result == pytest.approx(2 / 3) + + +@pytest.mark.parametrize( + "metric", [average_precision_score, det_curve, precision_recall_curve, roc_curve] +) +@pytest.mark.parametrize( + "classes", [(False, True), (0, 1), (0.0, 1.0), ("zero", "one")] +) +def test_ranking_metric_pos_label_types(metric, classes): + """Check that the metric works with different types of `pos_label`. + + We can expect `pos_label` to be a bool, an integer, a float, a string. + No error should be raised for those types. + """ + rng = np.random.RandomState(42) + n_samples, pos_label = 10, classes[-1] + y_true = rng.choice(classes, size=n_samples, replace=True) + y_proba = rng.rand(n_samples) + result = metric(y_true, y_proba, pos_label=pos_label) + if isinstance(result, float): + assert not np.isnan(result) + else: + metric_1, metric_2, thresholds = result + assert not np.isnan(metric_1).any() + assert not np.isnan(metric_2).any() + assert not np.isnan(thresholds).any() + + +def test_roc_curve_with_probablity_estimates(global_random_seed): + """Check that thresholds do not exceed 1.0 when `y_score` is a probability + estimate. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/26193 + """ + rng = np.random.RandomState(global_random_seed) + y_true = rng.randint(0, 2, size=10) + y_score = rng.rand(10) + _, _, thresholds = roc_curve(y_true, y_score) + assert np.isinf(thresholds[0]) diff --git a/venv/lib/python3.10/site-packages/sklearn/metrics/tests/test_regression.py b/venv/lib/python3.10/site-packages/sklearn/metrics/tests/test_regression.py new file mode 100644 index 0000000000000000000000000000000000000000..29afac5cbc824a1aaf93a41b21d9e18ca6d1e093 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/metrics/tests/test_regression.py @@ -0,0 +1,671 @@ +from itertools import product + +import numpy as np +import pytest +from numpy.testing import assert_allclose +from scipy import optimize +from scipy.special import factorial, xlogy + +from sklearn.dummy import DummyRegressor +from sklearn.exceptions import UndefinedMetricWarning +from sklearn.metrics import ( + d2_absolute_error_score, + d2_pinball_score, + d2_tweedie_score, + explained_variance_score, + make_scorer, + max_error, + mean_absolute_error, + mean_absolute_percentage_error, + mean_pinball_loss, + mean_squared_error, + mean_squared_log_error, + mean_tweedie_deviance, + median_absolute_error, + r2_score, + root_mean_squared_error, + root_mean_squared_log_error, +) +from sklearn.metrics._regression import _check_reg_targets +from sklearn.model_selection import GridSearchCV +from sklearn.utils._testing import ( + assert_almost_equal, + assert_array_almost_equal, + assert_array_equal, +) + + +def test_regression_metrics(n_samples=50): + y_true = np.arange(n_samples) + y_pred = y_true + 1 + y_pred_2 = y_true - 1 + + assert_almost_equal(mean_squared_error(y_true, y_pred), 1.0) + assert_almost_equal( + mean_squared_log_error(y_true, y_pred), + mean_squared_error(np.log(1 + y_true), np.log(1 + y_pred)), + ) + assert_almost_equal(mean_absolute_error(y_true, y_pred), 1.0) + assert_almost_equal(mean_pinball_loss(y_true, y_pred), 0.5) + assert_almost_equal(mean_pinball_loss(y_true, y_pred_2), 0.5) + assert_almost_equal(mean_pinball_loss(y_true, y_pred, alpha=0.4), 0.6) + assert_almost_equal(mean_pinball_loss(y_true, y_pred_2, alpha=0.4), 0.4) + assert_almost_equal(median_absolute_error(y_true, y_pred), 1.0) + mape = mean_absolute_percentage_error(y_true, y_pred) + assert np.isfinite(mape) + assert mape > 1e6 + assert_almost_equal(max_error(y_true, y_pred), 1.0) + assert_almost_equal(r2_score(y_true, y_pred), 0.995, 2) + assert_almost_equal(r2_score(y_true, y_pred, force_finite=False), 0.995, 2) + assert_almost_equal(explained_variance_score(y_true, y_pred), 1.0) + assert_almost_equal( + explained_variance_score(y_true, y_pred, force_finite=False), 1.0 + ) + assert_almost_equal( + mean_tweedie_deviance(y_true, y_pred, power=0), + mean_squared_error(y_true, y_pred), + ) + assert_almost_equal( + d2_tweedie_score(y_true, y_pred, power=0), r2_score(y_true, y_pred) + ) + dev_median = np.abs(y_true - np.median(y_true)).sum() + assert_array_almost_equal( + d2_absolute_error_score(y_true, y_pred), + 1 - np.abs(y_true - y_pred).sum() / dev_median, + ) + alpha = 0.2 + pinball_loss = lambda y_true, y_pred, alpha: alpha * np.maximum( + y_true - y_pred, 0 + ) + (1 - alpha) * np.maximum(y_pred - y_true, 0) + y_quantile = np.percentile(y_true, q=alpha * 100) + assert_almost_equal( + d2_pinball_score(y_true, y_pred, alpha=alpha), + 1 + - pinball_loss(y_true, y_pred, alpha).sum() + / pinball_loss(y_true, y_quantile, alpha).sum(), + ) + assert_almost_equal( + d2_absolute_error_score(y_true, y_pred), + d2_pinball_score(y_true, y_pred, alpha=0.5), + ) + + # Tweedie deviance needs positive y_pred, except for p=0, + # p>=2 needs positive y_true + # results evaluated by sympy + y_true = np.arange(1, 1 + n_samples) + y_pred = 2 * y_true + n = n_samples + assert_almost_equal( + mean_tweedie_deviance(y_true, y_pred, power=-1), + 5 / 12 * n * (n**2 + 2 * n + 1), + ) + assert_almost_equal( + mean_tweedie_deviance(y_true, y_pred, power=1), (n + 1) * (1 - np.log(2)) + ) + assert_almost_equal( + mean_tweedie_deviance(y_true, y_pred, power=2), 2 * np.log(2) - 1 + ) + assert_almost_equal( + mean_tweedie_deviance(y_true, y_pred, power=3 / 2), + ((6 * np.sqrt(2) - 8) / n) * np.sqrt(y_true).sum(), + ) + assert_almost_equal( + mean_tweedie_deviance(y_true, y_pred, power=3), np.sum(1 / y_true) / (4 * n) + ) + + dev_mean = 2 * np.mean(xlogy(y_true, 2 * y_true / (n + 1))) + assert_almost_equal( + d2_tweedie_score(y_true, y_pred, power=1), + 1 - (n + 1) * (1 - np.log(2)) / dev_mean, + ) + + dev_mean = 2 * np.log((n + 1) / 2) - 2 / n * np.log(factorial(n)) + assert_almost_equal( + d2_tweedie_score(y_true, y_pred, power=2), 1 - (2 * np.log(2) - 1) / dev_mean + ) + + +def test_root_mean_squared_error_multioutput_raw_value(): + # non-regression test for + # https://github.com/scikit-learn/scikit-learn/pull/16323 + mse = mean_squared_error([[1]], [[10]], multioutput="raw_values") + rmse = root_mean_squared_error([[1]], [[10]], multioutput="raw_values") + assert np.sqrt(mse) == pytest.approx(rmse) + + +def test_multioutput_regression(): + y_true = np.array([[1, 0, 0, 1], [0, 1, 1, 1], [1, 1, 0, 1]]) + y_pred = np.array([[0, 0, 0, 1], [1, 0, 1, 1], [0, 0, 0, 1]]) + + error = mean_squared_error(y_true, y_pred) + assert_almost_equal(error, (1.0 / 3 + 2.0 / 3 + 2.0 / 3) / 4.0) + + error = root_mean_squared_error(y_true, y_pred) + assert_almost_equal(error, 0.454, decimal=2) + + error = mean_squared_log_error(y_true, y_pred) + assert_almost_equal(error, 0.200, decimal=2) + + error = root_mean_squared_log_error(y_true, y_pred) + assert_almost_equal(error, 0.315, decimal=2) + + # mean_absolute_error and mean_squared_error are equal because + # it is a binary problem. + error = mean_absolute_error(y_true, y_pred) + assert_almost_equal(error, (1.0 + 2.0 / 3) / 4.0) + + error = mean_pinball_loss(y_true, y_pred) + assert_almost_equal(error, (1.0 + 2.0 / 3) / 8.0) + + error = np.around(mean_absolute_percentage_error(y_true, y_pred), decimals=2) + assert np.isfinite(error) + assert error > 1e6 + error = median_absolute_error(y_true, y_pred) + assert_almost_equal(error, (1.0 + 1.0) / 4.0) + + error = r2_score(y_true, y_pred, multioutput="variance_weighted") + assert_almost_equal(error, 1.0 - 5.0 / 2) + error = r2_score(y_true, y_pred, multioutput="uniform_average") + assert_almost_equal(error, -0.875) + + score = d2_pinball_score(y_true, y_pred, alpha=0.5, multioutput="raw_values") + raw_expected_score = [ + 1 + - np.abs(y_true[:, i] - y_pred[:, i]).sum() + / np.abs(y_true[:, i] - np.median(y_true[:, i])).sum() + for i in range(y_true.shape[1]) + ] + # in the last case, the denominator vanishes and hence we get nan, + # but since the numerator vanishes as well the expected score is 1.0 + raw_expected_score = np.where(np.isnan(raw_expected_score), 1, raw_expected_score) + assert_array_almost_equal(score, raw_expected_score) + + score = d2_pinball_score(y_true, y_pred, alpha=0.5, multioutput="uniform_average") + assert_almost_equal(score, raw_expected_score.mean()) + # constant `y_true` with force_finite=True leads to 1. or 0. + yc = [5.0, 5.0] + error = r2_score(yc, [5.0, 5.0], multioutput="variance_weighted") + assert_almost_equal(error, 1.0) + error = r2_score(yc, [5.0, 5.1], multioutput="variance_weighted") + assert_almost_equal(error, 0.0) + + # Setting force_finite=False results in the nan for 4th output propagating + error = r2_score( + y_true, y_pred, multioutput="variance_weighted", force_finite=False + ) + assert_almost_equal(error, np.nan) + error = r2_score(y_true, y_pred, multioutput="uniform_average", force_finite=False) + assert_almost_equal(error, np.nan) + + # Dropping the 4th output to check `force_finite=False` for nominal + y_true = y_true[:, :-1] + y_pred = y_pred[:, :-1] + error = r2_score(y_true, y_pred, multioutput="variance_weighted") + error2 = r2_score( + y_true, y_pred, multioutput="variance_weighted", force_finite=False + ) + assert_almost_equal(error, error2) + error = r2_score(y_true, y_pred, multioutput="uniform_average") + error2 = r2_score(y_true, y_pred, multioutput="uniform_average", force_finite=False) + assert_almost_equal(error, error2) + + # constant `y_true` with force_finite=False leads to NaN or -Inf. + error = r2_score( + yc, [5.0, 5.0], multioutput="variance_weighted", force_finite=False + ) + assert_almost_equal(error, np.nan) + error = r2_score( + yc, [5.0, 6.0], multioutput="variance_weighted", force_finite=False + ) + assert_almost_equal(error, -np.inf) + + +def test_regression_metrics_at_limits(): + # Single-sample case + # Note: for r2 and d2_tweedie see also test_regression_single_sample + assert_almost_equal(mean_squared_error([0.0], [0.0]), 0.0) + assert_almost_equal(root_mean_squared_error([0.0], [0.0]), 0.0) + assert_almost_equal(mean_squared_log_error([0.0], [0.0]), 0.0) + assert_almost_equal(mean_absolute_error([0.0], [0.0]), 0.0) + assert_almost_equal(mean_pinball_loss([0.0], [0.0]), 0.0) + assert_almost_equal(mean_absolute_percentage_error([0.0], [0.0]), 0.0) + assert_almost_equal(median_absolute_error([0.0], [0.0]), 0.0) + assert_almost_equal(max_error([0.0], [0.0]), 0.0) + assert_almost_equal(explained_variance_score([0.0], [0.0]), 1.0) + + # Perfect cases + assert_almost_equal(r2_score([0.0, 1], [0.0, 1]), 1.0) + assert_almost_equal(d2_pinball_score([0.0, 1], [0.0, 1]), 1.0) + + # Non-finite cases + # R² and explained variance have a fix by default for non-finite cases + for s in (r2_score, explained_variance_score): + assert_almost_equal(s([0, 0], [1, -1]), 0.0) + assert_almost_equal(s([0, 0], [1, -1], force_finite=False), -np.inf) + assert_almost_equal(s([1, 1], [1, 1]), 1.0) + assert_almost_equal(s([1, 1], [1, 1], force_finite=False), np.nan) + msg = ( + "Mean Squared Logarithmic Error cannot be used when targets " + "contain negative values." + ) + with pytest.raises(ValueError, match=msg): + mean_squared_log_error([-1.0], [-1.0]) + msg = ( + "Mean Squared Logarithmic Error cannot be used when targets " + "contain negative values." + ) + with pytest.raises(ValueError, match=msg): + mean_squared_log_error([1.0, 2.0, 3.0], [1.0, -2.0, 3.0]) + msg = ( + "Mean Squared Logarithmic Error cannot be used when targets " + "contain negative values." + ) + with pytest.raises(ValueError, match=msg): + mean_squared_log_error([1.0, -2.0, 3.0], [1.0, 2.0, 3.0]) + msg = ( + "Root Mean Squared Logarithmic Error cannot be used when targets " + "contain negative values." + ) + with pytest.raises(ValueError, match=msg): + root_mean_squared_log_error([1.0, -2.0, 3.0], [1.0, 2.0, 3.0]) + + # Tweedie deviance error + power = -1.2 + assert_allclose( + mean_tweedie_deviance([0], [1.0], power=power), 2 / (2 - power), rtol=1e-3 + ) + msg = "can only be used on strictly positive y_pred." + with pytest.raises(ValueError, match=msg): + mean_tweedie_deviance([0.0], [0.0], power=power) + with pytest.raises(ValueError, match=msg): + d2_tweedie_score([0.0] * 2, [0.0] * 2, power=power) + + assert_almost_equal(mean_tweedie_deviance([0.0], [0.0], power=0), 0.0, 2) + + power = 1.0 + msg = "only be used on non-negative y and strictly positive y_pred." + with pytest.raises(ValueError, match=msg): + mean_tweedie_deviance([0.0], [0.0], power=power) + with pytest.raises(ValueError, match=msg): + d2_tweedie_score([0.0] * 2, [0.0] * 2, power=power) + + power = 1.5 + assert_allclose(mean_tweedie_deviance([0.0], [1.0], power=power), 2 / (2 - power)) + msg = "only be used on non-negative y and strictly positive y_pred." + with pytest.raises(ValueError, match=msg): + mean_tweedie_deviance([0.0], [0.0], power=power) + with pytest.raises(ValueError, match=msg): + d2_tweedie_score([0.0] * 2, [0.0] * 2, power=power) + + power = 2.0 + assert_allclose(mean_tweedie_deviance([1.0], [1.0], power=power), 0.00, atol=1e-8) + msg = "can only be used on strictly positive y and y_pred." + with pytest.raises(ValueError, match=msg): + mean_tweedie_deviance([0.0], [0.0], power=power) + with pytest.raises(ValueError, match=msg): + d2_tweedie_score([0.0] * 2, [0.0] * 2, power=power) + + power = 3.0 + assert_allclose(mean_tweedie_deviance([1.0], [1.0], power=power), 0.00, atol=1e-8) + msg = "can only be used on strictly positive y and y_pred." + with pytest.raises(ValueError, match=msg): + mean_tweedie_deviance([0.0], [0.0], power=power) + with pytest.raises(ValueError, match=msg): + d2_tweedie_score([0.0] * 2, [0.0] * 2, power=power) + + +def test__check_reg_targets(): + # All of length 3 + EXAMPLES = [ + ("continuous", [1, 2, 3], 1), + ("continuous", [[1], [2], [3]], 1), + ("continuous-multioutput", [[1, 1], [2, 2], [3, 1]], 2), + ("continuous-multioutput", [[5, 1], [4, 2], [3, 1]], 2), + ("continuous-multioutput", [[1, 3, 4], [2, 2, 2], [3, 1, 1]], 3), + ] + + for (type1, y1, n_out1), (type2, y2, n_out2) in product(EXAMPLES, repeat=2): + if type1 == type2 and n_out1 == n_out2: + y_type, y_check1, y_check2, multioutput = _check_reg_targets(y1, y2, None) + assert type1 == y_type + if type1 == "continuous": + assert_array_equal(y_check1, np.reshape(y1, (-1, 1))) + assert_array_equal(y_check2, np.reshape(y2, (-1, 1))) + else: + assert_array_equal(y_check1, y1) + assert_array_equal(y_check2, y2) + else: + with pytest.raises(ValueError): + _check_reg_targets(y1, y2, None) + + +def test__check_reg_targets_exception(): + invalid_multioutput = "this_value_is_not_valid" + expected_message = ( + "Allowed 'multioutput' string values are.+You provided multioutput={!r}".format( + invalid_multioutput + ) + ) + with pytest.raises(ValueError, match=expected_message): + _check_reg_targets([1, 2, 3], [[1], [2], [3]], invalid_multioutput) + + +def test_regression_multioutput_array(): + y_true = [[1, 2], [2.5, -1], [4.5, 3], [5, 7]] + y_pred = [[1, 1], [2, -1], [5, 4], [5, 6.5]] + + mse = mean_squared_error(y_true, y_pred, multioutput="raw_values") + mae = mean_absolute_error(y_true, y_pred, multioutput="raw_values") + + pbl = mean_pinball_loss(y_true, y_pred, multioutput="raw_values") + mape = mean_absolute_percentage_error(y_true, y_pred, multioutput="raw_values") + r = r2_score(y_true, y_pred, multioutput="raw_values") + evs = explained_variance_score(y_true, y_pred, multioutput="raw_values") + d2ps = d2_pinball_score(y_true, y_pred, alpha=0.5, multioutput="raw_values") + evs2 = explained_variance_score( + y_true, y_pred, multioutput="raw_values", force_finite=False + ) + + assert_array_almost_equal(mse, [0.125, 0.5625], decimal=2) + assert_array_almost_equal(mae, [0.25, 0.625], decimal=2) + assert_array_almost_equal(pbl, [0.25 / 2, 0.625 / 2], decimal=2) + assert_array_almost_equal(mape, [0.0778, 0.2262], decimal=2) + assert_array_almost_equal(r, [0.95, 0.93], decimal=2) + assert_array_almost_equal(evs, [0.95, 0.93], decimal=2) + assert_array_almost_equal(d2ps, [0.833, 0.722], decimal=2) + assert_array_almost_equal(evs2, [0.95, 0.93], decimal=2) + + # mean_absolute_error and mean_squared_error are equal because + # it is a binary problem. + y_true = [[0, 0]] * 4 + y_pred = [[1, 1]] * 4 + mse = mean_squared_error(y_true, y_pred, multioutput="raw_values") + mae = mean_absolute_error(y_true, y_pred, multioutput="raw_values") + pbl = mean_pinball_loss(y_true, y_pred, multioutput="raw_values") + r = r2_score(y_true, y_pred, multioutput="raw_values") + d2ps = d2_pinball_score(y_true, y_pred, multioutput="raw_values") + assert_array_almost_equal(mse, [1.0, 1.0], decimal=2) + assert_array_almost_equal(mae, [1.0, 1.0], decimal=2) + assert_array_almost_equal(pbl, [0.5, 0.5], decimal=2) + assert_array_almost_equal(r, [0.0, 0.0], decimal=2) + assert_array_almost_equal(d2ps, [0.0, 0.0], decimal=2) + + r = r2_score([[0, -1], [0, 1]], [[2, 2], [1, 1]], multioutput="raw_values") + assert_array_almost_equal(r, [0, -3.5], decimal=2) + assert np.mean(r) == r2_score( + [[0, -1], [0, 1]], [[2, 2], [1, 1]], multioutput="uniform_average" + ) + evs = explained_variance_score( + [[0, -1], [0, 1]], [[2, 2], [1, 1]], multioutput="raw_values" + ) + assert_array_almost_equal(evs, [0, -1.25], decimal=2) + evs2 = explained_variance_score( + [[0, -1], [0, 1]], + [[2, 2], [1, 1]], + multioutput="raw_values", + force_finite=False, + ) + assert_array_almost_equal(evs2, [-np.inf, -1.25], decimal=2) + + # Checking for the condition in which both numerator and denominator is + # zero. + y_true = [[1, 3], [1, 2]] + y_pred = [[1, 4], [1, 1]] + r2 = r2_score(y_true, y_pred, multioutput="raw_values") + assert_array_almost_equal(r2, [1.0, -3.0], decimal=2) + assert np.mean(r2) == r2_score(y_true, y_pred, multioutput="uniform_average") + r22 = r2_score(y_true, y_pred, multioutput="raw_values", force_finite=False) + assert_array_almost_equal(r22, [np.nan, -3.0], decimal=2) + assert_almost_equal( + np.mean(r22), + r2_score(y_true, y_pred, multioutput="uniform_average", force_finite=False), + ) + + evs = explained_variance_score(y_true, y_pred, multioutput="raw_values") + assert_array_almost_equal(evs, [1.0, -3.0], decimal=2) + assert np.mean(evs) == explained_variance_score(y_true, y_pred) + d2ps = d2_pinball_score(y_true, y_pred, alpha=0.5, multioutput="raw_values") + assert_array_almost_equal(d2ps, [1.0, -1.0], decimal=2) + evs2 = explained_variance_score( + y_true, y_pred, multioutput="raw_values", force_finite=False + ) + assert_array_almost_equal(evs2, [np.nan, -3.0], decimal=2) + assert_almost_equal( + np.mean(evs2), explained_variance_score(y_true, y_pred, force_finite=False) + ) + + # Handling msle separately as it does not accept negative inputs. + y_true = np.array([[0.5, 1], [1, 2], [7, 6]]) + y_pred = np.array([[0.5, 2], [1, 2.5], [8, 8]]) + msle = mean_squared_log_error(y_true, y_pred, multioutput="raw_values") + msle2 = mean_squared_error( + np.log(1 + y_true), np.log(1 + y_pred), multioutput="raw_values" + ) + assert_array_almost_equal(msle, msle2, decimal=2) + + +def test_regression_custom_weights(): + y_true = [[1, 2], [2.5, -1], [4.5, 3], [5, 7]] + y_pred = [[1, 1], [2, -1], [5, 4], [5, 6.5]] + + msew = mean_squared_error(y_true, y_pred, multioutput=[0.4, 0.6]) + rmsew = root_mean_squared_error(y_true, y_pred, multioutput=[0.4, 0.6]) + maew = mean_absolute_error(y_true, y_pred, multioutput=[0.4, 0.6]) + mapew = mean_absolute_percentage_error(y_true, y_pred, multioutput=[0.4, 0.6]) + rw = r2_score(y_true, y_pred, multioutput=[0.4, 0.6]) + evsw = explained_variance_score(y_true, y_pred, multioutput=[0.4, 0.6]) + d2psw = d2_pinball_score(y_true, y_pred, alpha=0.5, multioutput=[0.4, 0.6]) + evsw2 = explained_variance_score( + y_true, y_pred, multioutput=[0.4, 0.6], force_finite=False + ) + + assert_almost_equal(msew, 0.39, decimal=2) + assert_almost_equal(rmsew, 0.59, decimal=2) + assert_almost_equal(maew, 0.475, decimal=3) + assert_almost_equal(mapew, 0.1668, decimal=2) + assert_almost_equal(rw, 0.94, decimal=2) + assert_almost_equal(evsw, 0.94, decimal=2) + assert_almost_equal(d2psw, 0.766, decimal=2) + assert_almost_equal(evsw2, 0.94, decimal=2) + + # Handling msle separately as it does not accept negative inputs. + y_true = np.array([[0.5, 1], [1, 2], [7, 6]]) + y_pred = np.array([[0.5, 2], [1, 2.5], [8, 8]]) + msle = mean_squared_log_error(y_true, y_pred, multioutput=[0.3, 0.7]) + msle2 = mean_squared_error( + np.log(1 + y_true), np.log(1 + y_pred), multioutput=[0.3, 0.7] + ) + assert_almost_equal(msle, msle2, decimal=2) + + +@pytest.mark.parametrize("metric", [r2_score, d2_tweedie_score, d2_pinball_score]) +def test_regression_single_sample(metric): + y_true = [0] + y_pred = [1] + warning_msg = "not well-defined with less than two samples." + + # Trigger the warning + with pytest.warns(UndefinedMetricWarning, match=warning_msg): + score = metric(y_true, y_pred) + assert np.isnan(score) + + +def test_tweedie_deviance_continuity(): + n_samples = 100 + + y_true = np.random.RandomState(0).rand(n_samples) + 0.1 + y_pred = np.random.RandomState(1).rand(n_samples) + 0.1 + + assert_allclose( + mean_tweedie_deviance(y_true, y_pred, power=0 - 1e-10), + mean_tweedie_deviance(y_true, y_pred, power=0), + ) + + # Ws we get closer to the limit, with 1e-12 difference the absolute + # tolerance to pass the below check increases. There are likely + # numerical precision issues on the edges of different definition + # regions. + assert_allclose( + mean_tweedie_deviance(y_true, y_pred, power=1 + 1e-10), + mean_tweedie_deviance(y_true, y_pred, power=1), + atol=1e-6, + ) + + assert_allclose( + mean_tweedie_deviance(y_true, y_pred, power=2 - 1e-10), + mean_tweedie_deviance(y_true, y_pred, power=2), + atol=1e-6, + ) + + assert_allclose( + mean_tweedie_deviance(y_true, y_pred, power=2 + 1e-10), + mean_tweedie_deviance(y_true, y_pred, power=2), + atol=1e-6, + ) + + +def test_mean_absolute_percentage_error(): + random_number_generator = np.random.RandomState(42) + y_true = random_number_generator.exponential(size=100) + y_pred = 1.2 * y_true + assert mean_absolute_percentage_error(y_true, y_pred) == pytest.approx(0.2) + + +@pytest.mark.parametrize( + "distribution", ["normal", "lognormal", "exponential", "uniform"] +) +@pytest.mark.parametrize("target_quantile", [0.05, 0.5, 0.75]) +def test_mean_pinball_loss_on_constant_predictions(distribution, target_quantile): + if not hasattr(np, "quantile"): + pytest.skip( + "This test requires a more recent version of numpy " + "with support for np.quantile." + ) + + # Check that the pinball loss is minimized by the empirical quantile. + n_samples = 3000 + rng = np.random.RandomState(42) + data = getattr(rng, distribution)(size=n_samples) + + # Compute the best possible pinball loss for any constant predictor: + best_pred = np.quantile(data, target_quantile) + best_constant_pred = np.full(n_samples, fill_value=best_pred) + best_pbl = mean_pinball_loss(data, best_constant_pred, alpha=target_quantile) + + # Evaluate the loss on a grid of quantiles + candidate_predictions = np.quantile(data, np.linspace(0, 1, 100)) + for pred in candidate_predictions: + # Compute the pinball loss of a constant predictor: + constant_pred = np.full(n_samples, fill_value=pred) + pbl = mean_pinball_loss(data, constant_pred, alpha=target_quantile) + + # Check that the loss of this constant predictor is greater or equal + # than the loss of using the optimal quantile (up to machine + # precision): + assert pbl >= best_pbl - np.finfo(best_pbl.dtype).eps + + # Check that the value of the pinball loss matches the analytical + # formula. + expected_pbl = (pred - data[data < pred]).sum() * (1 - target_quantile) + ( + data[data >= pred] - pred + ).sum() * target_quantile + expected_pbl /= n_samples + assert_almost_equal(expected_pbl, pbl) + + # Check that we can actually recover the target_quantile by minimizing the + # pinball loss w.r.t. the constant prediction quantile. + def objective_func(x): + constant_pred = np.full(n_samples, fill_value=x) + return mean_pinball_loss(data, constant_pred, alpha=target_quantile) + + result = optimize.minimize(objective_func, data.mean(), method="Nelder-Mead") + assert result.success + # The minimum is not unique with limited data, hence the large tolerance. + assert result.x == pytest.approx(best_pred, rel=1e-2) + assert result.fun == pytest.approx(best_pbl) + + +def test_dummy_quantile_parameter_tuning(): + # Integration test to check that it is possible to use the pinball loss to + # tune the hyperparameter of a quantile regressor. This is conceptually + # similar to the previous test but using the scikit-learn estimator and + # scoring API instead. + n_samples = 1000 + rng = np.random.RandomState(0) + X = rng.normal(size=(n_samples, 5)) # Ignored + y = rng.exponential(size=n_samples) + + all_quantiles = [0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95] + for alpha in all_quantiles: + neg_mean_pinball_loss = make_scorer( + mean_pinball_loss, + alpha=alpha, + greater_is_better=False, + ) + regressor = DummyRegressor(strategy="quantile", quantile=0.25) + grid_search = GridSearchCV( + regressor, + param_grid=dict(quantile=all_quantiles), + scoring=neg_mean_pinball_loss, + ).fit(X, y) + + assert grid_search.best_params_["quantile"] == pytest.approx(alpha) + + +def test_pinball_loss_relation_with_mae(): + # Test that mean_pinball loss with alpha=0.5 if half of mean absolute error + rng = np.random.RandomState(714) + n = 100 + y_true = rng.normal(size=n) + y_pred = y_true.copy() + rng.uniform(n) + assert ( + mean_absolute_error(y_true, y_pred) + == mean_pinball_loss(y_true, y_pred, alpha=0.5) * 2 + ) + + +# TODO(1.6): remove this test +@pytest.mark.parametrize("metric", [mean_squared_error, mean_squared_log_error]) +def test_mean_squared_deprecation_squared(metric): + """Check the deprecation warning of the squared parameter""" + depr_msg = "'squared' is deprecated in version 1.4 and will be removed in 1.6." + y_true, y_pred = np.arange(10), np.arange(1, 11) + with pytest.warns(FutureWarning, match=depr_msg): + metric(y_true, y_pred, squared=False) + + +# TODO(1.6): remove this test +@pytest.mark.filterwarnings("ignore:'squared' is deprecated") +@pytest.mark.parametrize( + "old_func, new_func", + [ + (mean_squared_error, root_mean_squared_error), + (mean_squared_log_error, root_mean_squared_log_error), + ], +) +def test_rmse_rmsle_parameter(old_func, new_func): + # Check that the new rmse/rmsle function is equivalent to + # the old mse/msle + squared=False function. + y_true = np.array([[1, 0, 0, 1], [0, 1, 1, 1], [1, 1, 0, 1]]) + y_pred = np.array([[0, 0, 0, 1], [1, 0, 1, 1], [0, 0, 0, 1]]) + y_true = np.array([[0.5, 1], [1, 2], [7, 6]]) + y_pred = np.array([[0.5, 2], [1, 2.5], [8, 8]]) + sw = np.arange(len(y_true)) + + expected = old_func(y_true, y_pred, squared=False) + actual = new_func(y_true, y_pred) + assert_allclose(expected, actual) + + expected = old_func(y_true, y_pred, sample_weight=sw, squared=False) + actual = new_func(y_true, y_pred, sample_weight=sw) + assert_allclose(expected, actual) + + expected = old_func(y_true, y_pred, multioutput="raw_values", squared=False) + actual = new_func(y_true, y_pred, multioutput="raw_values") + assert_allclose(expected, actual) + + expected = old_func( + y_true, y_pred, sample_weight=sw, multioutput="raw_values", squared=False + ) + actual = new_func(y_true, y_pred, sample_weight=sw, multioutput="raw_values") + assert_allclose(expected, actual) diff --git a/venv/lib/python3.10/site-packages/sklearn/metrics/tests/test_score_objects.py b/venv/lib/python3.10/site-packages/sklearn/metrics/tests/test_score_objects.py new file mode 100644 index 0000000000000000000000000000000000000000..5e3b0dd71d33fa26cf439b1c3d2a57eeb72e33d7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/metrics/tests/test_score_objects.py @@ -0,0 +1,1507 @@ +import numbers +import os +import pickle +import shutil +import tempfile +from copy import deepcopy +from functools import partial +from unittest.mock import Mock + +import joblib +import numpy as np +import pytest +from numpy.testing import assert_allclose + +from sklearn import config_context +from sklearn.base import BaseEstimator +from sklearn.cluster import KMeans +from sklearn.datasets import ( + load_diabetes, + make_blobs, + make_classification, + make_multilabel_classification, + make_regression, +) +from sklearn.linear_model import LogisticRegression, Perceptron, Ridge +from sklearn.metrics import ( + accuracy_score, + average_precision_score, + balanced_accuracy_score, + brier_score_loss, + check_scoring, + f1_score, + fbeta_score, + get_scorer, + get_scorer_names, + jaccard_score, + log_loss, + make_scorer, + matthews_corrcoef, + precision_score, + r2_score, + recall_score, + roc_auc_score, + top_k_accuracy_score, +) +from sklearn.metrics import cluster as cluster_module +from sklearn.metrics._scorer import ( + _check_multimetric_scoring, + _MultimetricScorer, + _PassthroughScorer, + _Scorer, +) +from sklearn.model_selection import GridSearchCV, cross_val_score, train_test_split +from sklearn.multiclass import OneVsRestClassifier +from sklearn.neighbors import KNeighborsClassifier +from sklearn.pipeline import make_pipeline +from sklearn.svm import LinearSVC +from sklearn.tests.metadata_routing_common import ( + assert_request_equal, + assert_request_is_empty, +) +from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor +from sklearn.utils._testing import ( + assert_almost_equal, + assert_array_equal, + ignore_warnings, +) +from sklearn.utils.metadata_routing import MetadataRouter + +REGRESSION_SCORERS = [ + "explained_variance", + "r2", + "neg_mean_absolute_error", + "neg_mean_squared_error", + "neg_mean_absolute_percentage_error", + "neg_mean_squared_log_error", + "neg_median_absolute_error", + "neg_root_mean_squared_error", + "neg_root_mean_squared_log_error", + "mean_absolute_error", + "mean_absolute_percentage_error", + "mean_squared_error", + "median_absolute_error", + "max_error", + "neg_mean_poisson_deviance", + "neg_mean_gamma_deviance", +] + +CLF_SCORERS = [ + "accuracy", + "balanced_accuracy", + "top_k_accuracy", + "f1", + "f1_weighted", + "f1_macro", + "f1_micro", + "roc_auc", + "average_precision", + "precision", + "precision_weighted", + "precision_macro", + "precision_micro", + "recall", + "recall_weighted", + "recall_macro", + "recall_micro", + "neg_log_loss", + "neg_brier_score", + "jaccard", + "jaccard_weighted", + "jaccard_macro", + "jaccard_micro", + "roc_auc_ovr", + "roc_auc_ovo", + "roc_auc_ovr_weighted", + "roc_auc_ovo_weighted", + "matthews_corrcoef", + "positive_likelihood_ratio", + "neg_negative_likelihood_ratio", +] + +# All supervised cluster scorers (They behave like classification metric) +CLUSTER_SCORERS = [ + "adjusted_rand_score", + "rand_score", + "homogeneity_score", + "completeness_score", + "v_measure_score", + "mutual_info_score", + "adjusted_mutual_info_score", + "normalized_mutual_info_score", + "fowlkes_mallows_score", +] + +MULTILABEL_ONLY_SCORERS = [ + "precision_samples", + "recall_samples", + "f1_samples", + "jaccard_samples", +] + +REQUIRE_POSITIVE_Y_SCORERS = ["neg_mean_poisson_deviance", "neg_mean_gamma_deviance"] + + +def _require_positive_y(y): + """Make targets strictly positive""" + offset = abs(y.min()) + 1 + y = y + offset + return y + + +def _make_estimators(X_train, y_train, y_ml_train): + # Make estimators that make sense to test various scoring methods + sensible_regr = DecisionTreeRegressor(random_state=0) + # some of the regressions scorers require strictly positive input. + sensible_regr.fit(X_train, _require_positive_y(y_train)) + sensible_clf = DecisionTreeClassifier(random_state=0) + sensible_clf.fit(X_train, y_train) + sensible_ml_clf = DecisionTreeClassifier(random_state=0) + sensible_ml_clf.fit(X_train, y_ml_train) + return dict( + [(name, sensible_regr) for name in REGRESSION_SCORERS] + + [(name, sensible_clf) for name in CLF_SCORERS] + + [(name, sensible_clf) for name in CLUSTER_SCORERS] + + [(name, sensible_ml_clf) for name in MULTILABEL_ONLY_SCORERS] + ) + + +X_mm, y_mm, y_ml_mm = None, None, None +ESTIMATORS = None +TEMP_FOLDER = None + + +def setup_module(): + # Create some memory mapped data + global X_mm, y_mm, y_ml_mm, TEMP_FOLDER, ESTIMATORS + TEMP_FOLDER = tempfile.mkdtemp(prefix="sklearn_test_score_objects_") + X, y = make_classification(n_samples=30, n_features=5, random_state=0) + _, y_ml = make_multilabel_classification(n_samples=X.shape[0], random_state=0) + filename = os.path.join(TEMP_FOLDER, "test_data.pkl") + joblib.dump((X, y, y_ml), filename) + X_mm, y_mm, y_ml_mm = joblib.load(filename, mmap_mode="r") + ESTIMATORS = _make_estimators(X_mm, y_mm, y_ml_mm) + + +def teardown_module(): + global X_mm, y_mm, y_ml_mm, TEMP_FOLDER, ESTIMATORS + # GC closes the mmap file descriptors + X_mm, y_mm, y_ml_mm, ESTIMATORS = None, None, None, None + shutil.rmtree(TEMP_FOLDER) + + +class EstimatorWithFit(BaseEstimator): + """Dummy estimator to test scoring validators""" + + def fit(self, X, y): + return self + + +class EstimatorWithFitAndScore: + """Dummy estimator to test scoring validators""" + + def fit(self, X, y): + return self + + def score(self, X, y): + return 1.0 + + +class EstimatorWithFitAndPredict: + """Dummy estimator to test scoring validators""" + + def fit(self, X, y): + self.y = y + return self + + def predict(self, X): + return self.y + + +class DummyScorer: + """Dummy scorer that always returns 1.""" + + def __call__(self, est, X, y): + return 1 + + +def test_all_scorers_repr(): + # Test that all scorers have a working repr + for name in get_scorer_names(): + repr(get_scorer(name)) + + +def check_scoring_validator_for_single_metric_usecases(scoring_validator): + # Test all branches of single metric usecases + estimator = EstimatorWithFitAndScore() + estimator.fit([[1]], [1]) + scorer = scoring_validator(estimator) + assert isinstance(scorer, _PassthroughScorer) + assert_almost_equal(scorer(estimator, [[1]], [1]), 1.0) + + estimator = EstimatorWithFitAndPredict() + estimator.fit([[1]], [1]) + pattern = ( + r"If no scoring is specified, the estimator passed should have" + r" a 'score' method\. The estimator .* does not\." + ) + with pytest.raises(TypeError, match=pattern): + scoring_validator(estimator) + + scorer = scoring_validator(estimator, scoring="accuracy") + assert_almost_equal(scorer(estimator, [[1]], [1]), 1.0) + + estimator = EstimatorWithFit() + scorer = scoring_validator(estimator, scoring="accuracy") + assert isinstance(scorer, _Scorer) + assert scorer._response_method == "predict" + + # Test the allow_none parameter for check_scoring alone + if scoring_validator is check_scoring: + estimator = EstimatorWithFit() + scorer = scoring_validator(estimator, allow_none=True) + assert scorer is None + + +@pytest.mark.parametrize( + "scoring", + ( + ("accuracy",), + ["precision"], + {"acc": "accuracy", "precision": "precision"}, + ("accuracy", "precision"), + ["precision", "accuracy"], + { + "accuracy": make_scorer(accuracy_score), + "precision": make_scorer(precision_score), + }, + ), + ids=[ + "single_tuple", + "single_list", + "dict_str", + "multi_tuple", + "multi_list", + "dict_callable", + ], +) +def test_check_scoring_and_check_multimetric_scoring(scoring): + check_scoring_validator_for_single_metric_usecases(check_scoring) + # To make sure the check_scoring is correctly applied to the constituent + # scorers + + estimator = LinearSVC(dual="auto", random_state=0) + estimator.fit([[1], [2], [3]], [1, 1, 0]) + + scorers = _check_multimetric_scoring(estimator, scoring) + assert isinstance(scorers, dict) + assert sorted(scorers.keys()) == sorted(list(scoring)) + assert all([isinstance(scorer, _Scorer) for scorer in list(scorers.values())]) + assert all(scorer._response_method == "predict" for scorer in scorers.values()) + + if "acc" in scoring: + assert_almost_equal( + scorers["acc"](estimator, [[1], [2], [3]], [1, 0, 0]), 2.0 / 3.0 + ) + if "accuracy" in scoring: + assert_almost_equal( + scorers["accuracy"](estimator, [[1], [2], [3]], [1, 0, 0]), 2.0 / 3.0 + ) + if "precision" in scoring: + assert_almost_equal( + scorers["precision"](estimator, [[1], [2], [3]], [1, 0, 0]), 0.5 + ) + + +@pytest.mark.parametrize( + "scoring, msg", + [ + ( + (make_scorer(precision_score), make_scorer(accuracy_score)), + "One or more of the elements were callables", + ), + ([5], "Non-string types were found"), + ((make_scorer(precision_score),), "One or more of the elements were callables"), + ((), "Empty list was given"), + (("f1", "f1"), "Duplicate elements were found"), + ({4: "accuracy"}, "Non-string types were found in the keys"), + ({}, "An empty dict was passed"), + ], + ids=[ + "tuple of callables", + "list of int", + "tuple of one callable", + "empty tuple", + "non-unique str", + "non-string key dict", + "empty dict", + ], +) +def test_check_scoring_and_check_multimetric_scoring_errors(scoring, msg): + # Make sure it raises errors when scoring parameter is not valid. + # More weird corner cases are tested at test_validation.py + estimator = EstimatorWithFitAndPredict() + estimator.fit([[1]], [1]) + + with pytest.raises(ValueError, match=msg): + _check_multimetric_scoring(estimator, scoring=scoring) + + +def test_check_scoring_gridsearchcv(): + # test that check_scoring works on GridSearchCV and pipeline. + # slightly redundant non-regression test. + + grid = GridSearchCV(LinearSVC(dual="auto"), param_grid={"C": [0.1, 1]}, cv=3) + scorer = check_scoring(grid, scoring="f1") + assert isinstance(scorer, _Scorer) + assert scorer._response_method == "predict" + + pipe = make_pipeline(LinearSVC(dual="auto")) + scorer = check_scoring(pipe, scoring="f1") + assert isinstance(scorer, _Scorer) + assert scorer._response_method == "predict" + + # check that cross_val_score definitely calls the scorer + # and doesn't make any assumptions about the estimator apart from having a + # fit. + scores = cross_val_score( + EstimatorWithFit(), [[1], [2], [3]], [1, 0, 1], scoring=DummyScorer(), cv=3 + ) + assert_array_equal(scores, 1) + + +@pytest.mark.parametrize( + "scorer_name, metric", + [ + ("f1", f1_score), + ("f1_weighted", partial(f1_score, average="weighted")), + ("f1_macro", partial(f1_score, average="macro")), + ("f1_micro", partial(f1_score, average="micro")), + ("precision", precision_score), + ("precision_weighted", partial(precision_score, average="weighted")), + ("precision_macro", partial(precision_score, average="macro")), + ("precision_micro", partial(precision_score, average="micro")), + ("recall", recall_score), + ("recall_weighted", partial(recall_score, average="weighted")), + ("recall_macro", partial(recall_score, average="macro")), + ("recall_micro", partial(recall_score, average="micro")), + ("jaccard", jaccard_score), + ("jaccard_weighted", partial(jaccard_score, average="weighted")), + ("jaccard_macro", partial(jaccard_score, average="macro")), + ("jaccard_micro", partial(jaccard_score, average="micro")), + ("top_k_accuracy", top_k_accuracy_score), + ("matthews_corrcoef", matthews_corrcoef), + ], +) +def test_classification_binary_scores(scorer_name, metric): + # check consistency between score and scorer for scores supporting + # binary classification. + X, y = make_blobs(random_state=0, centers=2) + X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) + clf = LinearSVC(dual="auto", random_state=0) + clf.fit(X_train, y_train) + + score = get_scorer(scorer_name)(clf, X_test, y_test) + expected_score = metric(y_test, clf.predict(X_test)) + assert_almost_equal(score, expected_score) + + +@pytest.mark.parametrize( + "scorer_name, metric", + [ + ("accuracy", accuracy_score), + ("balanced_accuracy", balanced_accuracy_score), + ("f1_weighted", partial(f1_score, average="weighted")), + ("f1_macro", partial(f1_score, average="macro")), + ("f1_micro", partial(f1_score, average="micro")), + ("precision_weighted", partial(precision_score, average="weighted")), + ("precision_macro", partial(precision_score, average="macro")), + ("precision_micro", partial(precision_score, average="micro")), + ("recall_weighted", partial(recall_score, average="weighted")), + ("recall_macro", partial(recall_score, average="macro")), + ("recall_micro", partial(recall_score, average="micro")), + ("jaccard_weighted", partial(jaccard_score, average="weighted")), + ("jaccard_macro", partial(jaccard_score, average="macro")), + ("jaccard_micro", partial(jaccard_score, average="micro")), + ], +) +def test_classification_multiclass_scores(scorer_name, metric): + # check consistency between score and scorer for scores supporting + # multiclass classification. + X, y = make_classification( + n_classes=3, n_informative=3, n_samples=30, random_state=0 + ) + + # use `stratify` = y to ensure train and test sets capture all classes + X_train, X_test, y_train, y_test = train_test_split( + X, y, random_state=0, stratify=y + ) + + clf = DecisionTreeClassifier(random_state=0) + clf.fit(X_train, y_train) + score = get_scorer(scorer_name)(clf, X_test, y_test) + expected_score = metric(y_test, clf.predict(X_test)) + assert score == pytest.approx(expected_score) + + +def test_custom_scorer_pickling(): + # test that custom scorer can be pickled + X, y = make_blobs(random_state=0, centers=2) + X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) + clf = LinearSVC(dual="auto", random_state=0) + clf.fit(X_train, y_train) + + scorer = make_scorer(fbeta_score, beta=2) + score1 = scorer(clf, X_test, y_test) + unpickled_scorer = pickle.loads(pickle.dumps(scorer)) + score2 = unpickled_scorer(clf, X_test, y_test) + assert score1 == pytest.approx(score2) + + # smoke test the repr: + repr(fbeta_score) + + +def test_regression_scorers(): + # Test regression scorers. + diabetes = load_diabetes() + X, y = diabetes.data, diabetes.target + X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) + clf = Ridge() + clf.fit(X_train, y_train) + score1 = get_scorer("r2")(clf, X_test, y_test) + score2 = r2_score(y_test, clf.predict(X_test)) + assert_almost_equal(score1, score2) + + +def test_thresholded_scorers(): + # Test scorers that take thresholds. + X, y = make_blobs(random_state=0, centers=2) + X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) + clf = LogisticRegression(random_state=0) + clf.fit(X_train, y_train) + score1 = get_scorer("roc_auc")(clf, X_test, y_test) + score2 = roc_auc_score(y_test, clf.decision_function(X_test)) + score3 = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1]) + assert_almost_equal(score1, score2) + assert_almost_equal(score1, score3) + + logscore = get_scorer("neg_log_loss")(clf, X_test, y_test) + logloss = log_loss(y_test, clf.predict_proba(X_test)) + assert_almost_equal(-logscore, logloss) + + # same for an estimator without decision_function + clf = DecisionTreeClassifier() + clf.fit(X_train, y_train) + score1 = get_scorer("roc_auc")(clf, X_test, y_test) + score2 = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1]) + assert_almost_equal(score1, score2) + + # test with a regressor (no decision_function) + reg = DecisionTreeRegressor() + reg.fit(X_train, y_train) + err_msg = "DecisionTreeRegressor has none of the following attributes" + with pytest.raises(AttributeError, match=err_msg): + get_scorer("roc_auc")(reg, X_test, y_test) + + # Test that an exception is raised on more than two classes + X, y = make_blobs(random_state=0, centers=3) + X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) + clf.fit(X_train, y_train) + with pytest.raises(ValueError, match="multi_class must be in \\('ovo', 'ovr'\\)"): + get_scorer("roc_auc")(clf, X_test, y_test) + + # test error is raised with a single class present in model + # (predict_proba shape is not suitable for binary auc) + X, y = make_blobs(random_state=0, centers=2) + X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) + clf = DecisionTreeClassifier() + clf.fit(X_train, np.zeros_like(y_train)) + with pytest.raises(ValueError, match="need classifier with two classes"): + get_scorer("roc_auc")(clf, X_test, y_test) + + # for proba scorers + with pytest.raises(ValueError, match="need classifier with two classes"): + get_scorer("neg_log_loss")(clf, X_test, y_test) + + +def test_thresholded_scorers_multilabel_indicator_data(): + # Test that the scorer work with multilabel-indicator format + # for multilabel and multi-output multi-class classifier + X, y = make_multilabel_classification(allow_unlabeled=False, random_state=0) + X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) + + # Multi-output multi-class predict_proba + clf = DecisionTreeClassifier() + clf.fit(X_train, y_train) + y_proba = clf.predict_proba(X_test) + score1 = get_scorer("roc_auc")(clf, X_test, y_test) + score2 = roc_auc_score(y_test, np.vstack([p[:, -1] for p in y_proba]).T) + assert_almost_equal(score1, score2) + + # Multilabel predict_proba + clf = OneVsRestClassifier(DecisionTreeClassifier()) + clf.fit(X_train, y_train) + score1 = get_scorer("roc_auc")(clf, X_test, y_test) + score2 = roc_auc_score(y_test, clf.predict_proba(X_test)) + assert_almost_equal(score1, score2) + + # Multilabel decision function + clf = OneVsRestClassifier(LinearSVC(dual="auto", random_state=0)) + clf.fit(X_train, y_train) + score1 = get_scorer("roc_auc")(clf, X_test, y_test) + score2 = roc_auc_score(y_test, clf.decision_function(X_test)) + assert_almost_equal(score1, score2) + + +def test_supervised_cluster_scorers(): + # Test clustering scorers against gold standard labeling. + X, y = make_blobs(random_state=0, centers=2) + X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) + km = KMeans(n_clusters=3, n_init="auto") + km.fit(X_train) + for name in CLUSTER_SCORERS: + score1 = get_scorer(name)(km, X_test, y_test) + score2 = getattr(cluster_module, name)(y_test, km.predict(X_test)) + assert_almost_equal(score1, score2) + + +@ignore_warnings +def test_raises_on_score_list(): + # Test that when a list of scores is returned, we raise proper errors. + X, y = make_blobs(random_state=0) + f1_scorer_no_average = make_scorer(f1_score, average=None) + clf = DecisionTreeClassifier() + with pytest.raises(ValueError): + cross_val_score(clf, X, y, scoring=f1_scorer_no_average) + grid_search = GridSearchCV( + clf, scoring=f1_scorer_no_average, param_grid={"max_depth": [1, 2]} + ) + with pytest.raises(ValueError): + grid_search.fit(X, y) + + +@ignore_warnings +def test_classification_scorer_sample_weight(): + # Test that classification scorers support sample_weight or raise sensible + # errors + + # Unlike the metrics invariance test, in the scorer case it's harder + # to ensure that, on the classifier output, weighted and unweighted + # scores really should be unequal. + X, y = make_classification(random_state=0) + _, y_ml = make_multilabel_classification(n_samples=X.shape[0], random_state=0) + split = train_test_split(X, y, y_ml, random_state=0) + X_train, X_test, y_train, y_test, y_ml_train, y_ml_test = split + + sample_weight = np.ones_like(y_test) + sample_weight[:10] = 0 + + # get sensible estimators for each metric + estimator = _make_estimators(X_train, y_train, y_ml_train) + + for name in get_scorer_names(): + scorer = get_scorer(name) + if name in REGRESSION_SCORERS: + # skip the regression scores + continue + if name == "top_k_accuracy": + # in the binary case k > 1 will always lead to a perfect score + scorer._kwargs = {"k": 1} + if name in MULTILABEL_ONLY_SCORERS: + target = y_ml_test + else: + target = y_test + try: + weighted = scorer( + estimator[name], X_test, target, sample_weight=sample_weight + ) + ignored = scorer(estimator[name], X_test[10:], target[10:]) + unweighted = scorer(estimator[name], X_test, target) + # this should not raise. sample_weight should be ignored if None. + _ = scorer(estimator[name], X_test[:10], target[:10], sample_weight=None) + assert weighted != unweighted, ( + f"scorer {name} behaves identically when called with " + f"sample weights: {weighted} vs {unweighted}" + ) + assert_almost_equal( + weighted, + ignored, + err_msg=( + f"scorer {name} behaves differently " + "when ignoring samples and setting " + f"sample_weight to 0: {weighted} vs {ignored}" + ), + ) + + except TypeError as e: + assert "sample_weight" in str(e), ( + f"scorer {name} raises unhelpful exception when called " + f"with sample weights: {str(e)}" + ) + + +@ignore_warnings +def test_regression_scorer_sample_weight(): + # Test that regression scorers support sample_weight or raise sensible + # errors + + # Odd number of test samples req for neg_median_absolute_error + X, y = make_regression(n_samples=101, n_features=20, random_state=0) + y = _require_positive_y(y) + X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) + + sample_weight = np.ones_like(y_test) + # Odd number req for neg_median_absolute_error + sample_weight[:11] = 0 + + reg = DecisionTreeRegressor(random_state=0) + reg.fit(X_train, y_train) + + for name in get_scorer_names(): + scorer = get_scorer(name) + if name not in REGRESSION_SCORERS: + # skip classification scorers + continue + try: + weighted = scorer(reg, X_test, y_test, sample_weight=sample_weight) + ignored = scorer(reg, X_test[11:], y_test[11:]) + unweighted = scorer(reg, X_test, y_test) + assert weighted != unweighted, ( + f"scorer {name} behaves identically when called with " + f"sample weights: {weighted} vs {unweighted}" + ) + assert_almost_equal( + weighted, + ignored, + err_msg=( + f"scorer {name} behaves differently " + "when ignoring samples and setting " + f"sample_weight to 0: {weighted} vs {ignored}" + ), + ) + + except TypeError as e: + assert "sample_weight" in str(e), ( + f"scorer {name} raises unhelpful exception when called " + f"with sample weights: {str(e)}" + ) + + +@pytest.mark.parametrize("name", get_scorer_names()) +def test_scorer_memmap_input(name): + # Non-regression test for #6147: some score functions would + # return singleton memmap when computed on memmap data instead of scalar + # float values. + + if name in REQUIRE_POSITIVE_Y_SCORERS: + y_mm_1 = _require_positive_y(y_mm) + y_ml_mm_1 = _require_positive_y(y_ml_mm) + else: + y_mm_1, y_ml_mm_1 = y_mm, y_ml_mm + + # UndefinedMetricWarning for P / R scores + with ignore_warnings(): + scorer, estimator = get_scorer(name), ESTIMATORS[name] + if name in MULTILABEL_ONLY_SCORERS: + score = scorer(estimator, X_mm, y_ml_mm_1) + else: + score = scorer(estimator, X_mm, y_mm_1) + assert isinstance(score, numbers.Number), name + + +def test_scoring_is_not_metric(): + with pytest.raises(ValueError, match="make_scorer"): + check_scoring(LogisticRegression(), scoring=f1_score) + with pytest.raises(ValueError, match="make_scorer"): + check_scoring(LogisticRegression(), scoring=roc_auc_score) + with pytest.raises(ValueError, match="make_scorer"): + check_scoring(Ridge(), scoring=r2_score) + with pytest.raises(ValueError, match="make_scorer"): + check_scoring(KMeans(), scoring=cluster_module.adjusted_rand_score) + with pytest.raises(ValueError, match="make_scorer"): + check_scoring(KMeans(), scoring=cluster_module.rand_score) + + +@pytest.mark.parametrize( + ( + "scorers,expected_predict_count," + "expected_predict_proba_count,expected_decision_func_count" + ), + [ + ( + { + "a1": "accuracy", + "a2": "accuracy", + "ll1": "neg_log_loss", + "ll2": "neg_log_loss", + "ra1": "roc_auc", + "ra2": "roc_auc", + }, + 1, + 1, + 1, + ), + (["roc_auc", "accuracy"], 1, 0, 1), + (["neg_log_loss", "accuracy"], 1, 1, 0), + ], +) +def test_multimetric_scorer_calls_method_once( + scorers, + expected_predict_count, + expected_predict_proba_count, + expected_decision_func_count, +): + X, y = np.array([[1], [1], [0], [0], [0]]), np.array([0, 1, 1, 1, 0]) + + mock_est = Mock() + mock_est._estimator_type = "classifier" + fit_func = Mock(return_value=mock_est, name="fit") + fit_func.__name__ = "fit" + predict_func = Mock(return_value=y, name="predict") + predict_func.__name__ = "predict" + + pos_proba = np.random.rand(X.shape[0]) + proba = np.c_[1 - pos_proba, pos_proba] + predict_proba_func = Mock(return_value=proba, name="predict_proba") + predict_proba_func.__name__ = "predict_proba" + decision_function_func = Mock(return_value=pos_proba, name="decision_function") + decision_function_func.__name__ = "decision_function" + + mock_est.fit = fit_func + mock_est.predict = predict_func + mock_est.predict_proba = predict_proba_func + mock_est.decision_function = decision_function_func + # add the classes that would be found during fit + mock_est.classes_ = np.array([0, 1]) + + scorer_dict = _check_multimetric_scoring(LogisticRegression(), scorers) + multi_scorer = _MultimetricScorer(scorers=scorer_dict) + results = multi_scorer(mock_est, X, y) + + assert set(scorers) == set(results) # compare dict keys + + assert predict_func.call_count == expected_predict_count + assert predict_proba_func.call_count == expected_predict_proba_count + assert decision_function_func.call_count == expected_decision_func_count + + +@pytest.mark.parametrize( + "scorers", + [ + (["roc_auc", "neg_log_loss"]), + ( + { + "roc_auc": make_scorer( + roc_auc_score, + response_method=["predict_proba", "decision_function"], + ), + "neg_log_loss": make_scorer(log_loss, response_method="predict_proba"), + } + ), + ], +) +def test_multimetric_scorer_calls_method_once_classifier_no_decision(scorers): + predict_proba_call_cnt = 0 + + class MockKNeighborsClassifier(KNeighborsClassifier): + def predict_proba(self, X): + nonlocal predict_proba_call_cnt + predict_proba_call_cnt += 1 + return super().predict_proba(X) + + X, y = np.array([[1], [1], [0], [0], [0]]), np.array([0, 1, 1, 1, 0]) + + # no decision function + clf = MockKNeighborsClassifier(n_neighbors=1) + clf.fit(X, y) + + scorer_dict = _check_multimetric_scoring(clf, scorers) + scorer = _MultimetricScorer(scorers=scorer_dict) + scorer(clf, X, y) + + assert predict_proba_call_cnt == 1 + + +def test_multimetric_scorer_calls_method_once_regressor_threshold(): + predict_called_cnt = 0 + + class MockDecisionTreeRegressor(DecisionTreeRegressor): + def predict(self, X): + nonlocal predict_called_cnt + predict_called_cnt += 1 + return super().predict(X) + + X, y = np.array([[1], [1], [0], [0], [0]]), np.array([0, 1, 1, 1, 0]) + + # no decision function + clf = MockDecisionTreeRegressor() + clf.fit(X, y) + + scorers = {"neg_mse": "neg_mean_squared_error", "r2": "r2"} + scorer_dict = _check_multimetric_scoring(clf, scorers) + scorer = _MultimetricScorer(scorers=scorer_dict) + scorer(clf, X, y) + + assert predict_called_cnt == 1 + + +def test_multimetric_scorer_sanity_check(): + # scoring dictionary returned is the same as calling each scorer separately + scorers = { + "a1": "accuracy", + "a2": "accuracy", + "ll1": "neg_log_loss", + "ll2": "neg_log_loss", + "ra1": "roc_auc", + "ra2": "roc_auc", + } + + X, y = make_classification(random_state=0) + + clf = DecisionTreeClassifier() + clf.fit(X, y) + + scorer_dict = _check_multimetric_scoring(clf, scorers) + multi_scorer = _MultimetricScorer(scorers=scorer_dict) + + result = multi_scorer(clf, X, y) + + separate_scores = { + name: get_scorer(name)(clf, X, y) + for name in ["accuracy", "neg_log_loss", "roc_auc"] + } + + for key, value in result.items(): + score_name = scorers[key] + assert_allclose(value, separate_scores[score_name]) + + +@pytest.mark.parametrize("raise_exc", [True, False]) +def test_multimetric_scorer_exception_handling(raise_exc): + """Check that the calling of the `_MultimetricScorer` returns + exception messages in the result dict for the failing scorers + in case of `raise_exc` is `False` and if `raise_exc` is `True`, + then the proper exception is raised. + """ + scorers = { + "failing_1": "neg_mean_squared_log_error", + "non_failing": "neg_median_absolute_error", + "failing_2": "neg_mean_squared_log_error", + } + + X, y = make_classification( + n_samples=50, n_features=2, n_redundant=0, random_state=0 + ) + y *= -1 # neg_mean_squared_log_error fails if y contains negative values + + clf = DecisionTreeClassifier().fit(X, y) + + scorer_dict = _check_multimetric_scoring(clf, scorers) + multi_scorer = _MultimetricScorer(scorers=scorer_dict, raise_exc=raise_exc) + + error_msg = ( + "Mean Squared Logarithmic Error cannot be used when targets contain" + " negative values." + ) + + if raise_exc: + with pytest.raises(ValueError, match=error_msg): + multi_scorer(clf, X, y) + else: + result = multi_scorer(clf, X, y) + + exception_message_1 = result["failing_1"] + score = result["non_failing"] + exception_message_2 = result["failing_2"] + + assert isinstance(exception_message_1, str) and error_msg in exception_message_1 + assert isinstance(score, float) + assert isinstance(exception_message_2, str) and error_msg in exception_message_2 + + +@pytest.mark.parametrize( + "scorer_name, metric", + [ + ("roc_auc_ovr", partial(roc_auc_score, multi_class="ovr")), + ("roc_auc_ovo", partial(roc_auc_score, multi_class="ovo")), + ( + "roc_auc_ovr_weighted", + partial(roc_auc_score, multi_class="ovr", average="weighted"), + ), + ( + "roc_auc_ovo_weighted", + partial(roc_auc_score, multi_class="ovo", average="weighted"), + ), + ], +) +def test_multiclass_roc_proba_scorer(scorer_name, metric): + scorer = get_scorer(scorer_name) + X, y = make_classification( + n_classes=3, n_informative=3, n_samples=20, random_state=0 + ) + lr = LogisticRegression(multi_class="multinomial").fit(X, y) + y_proba = lr.predict_proba(X) + expected_score = metric(y, y_proba) + + assert scorer(lr, X, y) == pytest.approx(expected_score) + + +def test_multiclass_roc_proba_scorer_label(): + scorer = make_scorer( + roc_auc_score, + multi_class="ovo", + labels=[0, 1, 2], + response_method="predict_proba", + ) + X, y = make_classification( + n_classes=3, n_informative=3, n_samples=20, random_state=0 + ) + lr = LogisticRegression(multi_class="multinomial").fit(X, y) + y_proba = lr.predict_proba(X) + + y_binary = y == 0 + expected_score = roc_auc_score( + y_binary, y_proba, multi_class="ovo", labels=[0, 1, 2] + ) + + assert scorer(lr, X, y_binary) == pytest.approx(expected_score) + + +@pytest.mark.parametrize( + "scorer_name", + ["roc_auc_ovr", "roc_auc_ovo", "roc_auc_ovr_weighted", "roc_auc_ovo_weighted"], +) +def test_multiclass_roc_no_proba_scorer_errors(scorer_name): + # Perceptron has no predict_proba + scorer = get_scorer(scorer_name) + X, y = make_classification( + n_classes=3, n_informative=3, n_samples=20, random_state=0 + ) + lr = Perceptron().fit(X, y) + msg = "Perceptron has none of the following attributes: predict_proba." + with pytest.raises(AttributeError, match=msg): + scorer(lr, X, y) + + +@pytest.fixture +def string_labeled_classification_problem(): + """Train a classifier on binary problem with string target. + + The classifier is trained on a binary classification problem where the + minority class of interest has a string label that is intentionally not the + greatest class label using the lexicographic order. In this case, "cancer" + is the positive label, and `classifier.classes_` is + `["cancer", "not cancer"]`. + + In addition, the dataset is imbalanced to better identify problems when + using non-symmetric performance metrics such as f1-score, average precision + and so on. + + Returns + ------- + classifier : estimator object + Trained classifier on the binary problem. + X_test : ndarray of shape (n_samples, n_features) + Data to be used as testing set in tests. + y_test : ndarray of shape (n_samples,), dtype=object + Binary target where labels are strings. + y_pred : ndarray of shape (n_samples,), dtype=object + Prediction of `classifier` when predicting for `X_test`. + y_pred_proba : ndarray of shape (n_samples, 2), dtype=np.float64 + Probabilities of `classifier` when predicting for `X_test`. + y_pred_decision : ndarray of shape (n_samples,), dtype=np.float64 + Decision function values of `classifier` when predicting on `X_test`. + """ + from sklearn.datasets import load_breast_cancer + from sklearn.utils import shuffle + + X, y = load_breast_cancer(return_X_y=True) + # create an highly imbalanced classification task + idx_positive = np.flatnonzero(y == 1) + idx_negative = np.flatnonzero(y == 0) + idx_selected = np.hstack([idx_negative, idx_positive[:25]]) + X, y = X[idx_selected], y[idx_selected] + X, y = shuffle(X, y, random_state=42) + # only use 2 features to make the problem even harder + X = X[:, :2] + y = np.array(["cancer" if c == 1 else "not cancer" for c in y], dtype=object) + X_train, X_test, y_train, y_test = train_test_split( + X, + y, + stratify=y, + random_state=0, + ) + classifier = LogisticRegression().fit(X_train, y_train) + y_pred = classifier.predict(X_test) + y_pred_proba = classifier.predict_proba(X_test) + y_pred_decision = classifier.decision_function(X_test) + + return classifier, X_test, y_test, y_pred, y_pred_proba, y_pred_decision + + +def test_average_precision_pos_label(string_labeled_classification_problem): + # check that _Scorer will lead to the right score when passing + # `pos_label`. Currently, only `average_precision_score` is defined to + # be such a scorer. + ( + clf, + X_test, + y_test, + _, + y_pred_proba, + y_pred_decision, + ) = string_labeled_classification_problem + + pos_label = "cancer" + # we need to select the positive column or reverse the decision values + y_pred_proba = y_pred_proba[:, 0] + y_pred_decision = y_pred_decision * -1 + assert clf.classes_[0] == pos_label + + # check that when calling the scoring function, probability estimates and + # decision values lead to the same results + ap_proba = average_precision_score(y_test, y_pred_proba, pos_label=pos_label) + ap_decision_function = average_precision_score( + y_test, y_pred_decision, pos_label=pos_label + ) + assert ap_proba == pytest.approx(ap_decision_function) + + # create a scorer which would require to pass a `pos_label` + # check that it fails if `pos_label` is not provided + average_precision_scorer = make_scorer( + average_precision_score, + response_method=("decision_function", "predict_proba"), + ) + err_msg = "pos_label=1 is not a valid label. It should be one of " + with pytest.raises(ValueError, match=err_msg): + average_precision_scorer(clf, X_test, y_test) + + # otherwise, the scorer should give the same results than calling the + # scoring function + average_precision_scorer = make_scorer( + average_precision_score, + response_method=("decision_function", "predict_proba"), + pos_label=pos_label, + ) + ap_scorer = average_precision_scorer(clf, X_test, y_test) + + assert ap_scorer == pytest.approx(ap_proba) + + # The above scorer call is using `clf.decision_function`. We will force + # it to use `clf.predict_proba`. + clf_without_predict_proba = deepcopy(clf) + + def _predict_proba(self, X): + raise NotImplementedError + + clf_without_predict_proba.predict_proba = partial( + _predict_proba, clf_without_predict_proba + ) + # sanity check + with pytest.raises(NotImplementedError): + clf_without_predict_proba.predict_proba(X_test) + + ap_scorer = average_precision_scorer(clf_without_predict_proba, X_test, y_test) + assert ap_scorer == pytest.approx(ap_proba) + + +def test_brier_score_loss_pos_label(string_labeled_classification_problem): + # check that _Scorer leads to the right score when `pos_label` is + # provided. Currently only the `brier_score_loss` is defined to be such + # a scorer. + clf, X_test, y_test, _, y_pred_proba, _ = string_labeled_classification_problem + + pos_label = "cancer" + assert clf.classes_[0] == pos_label + + # brier score loss is symmetric + brier_pos_cancer = brier_score_loss(y_test, y_pred_proba[:, 0], pos_label="cancer") + brier_pos_not_cancer = brier_score_loss( + y_test, y_pred_proba[:, 1], pos_label="not cancer" + ) + assert brier_pos_cancer == pytest.approx(brier_pos_not_cancer) + + brier_scorer = make_scorer( + brier_score_loss, + response_method="predict_proba", + pos_label=pos_label, + ) + assert brier_scorer(clf, X_test, y_test) == pytest.approx(brier_pos_cancer) + + +@pytest.mark.parametrize( + "score_func", [f1_score, precision_score, recall_score, jaccard_score] +) +def test_non_symmetric_metric_pos_label( + score_func, string_labeled_classification_problem +): + # check that _Scorer leads to the right score when `pos_label` is + # provided. We check for all possible metric supported. + # Note: At some point we may end up having "scorer tags". + clf, X_test, y_test, y_pred, _, _ = string_labeled_classification_problem + + pos_label = "cancer" + assert clf.classes_[0] == pos_label + + score_pos_cancer = score_func(y_test, y_pred, pos_label="cancer") + score_pos_not_cancer = score_func(y_test, y_pred, pos_label="not cancer") + + assert score_pos_cancer != pytest.approx(score_pos_not_cancer) + + scorer = make_scorer(score_func, pos_label=pos_label) + assert scorer(clf, X_test, y_test) == pytest.approx(score_pos_cancer) + + +@pytest.mark.parametrize( + "scorer", + [ + make_scorer( + average_precision_score, + response_method=("decision_function", "predict_proba"), + pos_label="xxx", + ), + make_scorer(brier_score_loss, response_method="predict_proba", pos_label="xxx"), + make_scorer(f1_score, pos_label="xxx"), + ], + ids=["non-thresholded scorer", "probability scorer", "thresholded scorer"], +) +def test_scorer_select_proba_error(scorer): + # check that we raise the proper error when passing an unknown + # pos_label + X, y = make_classification( + n_classes=2, n_informative=3, n_samples=20, random_state=0 + ) + lr = LogisticRegression().fit(X, y) + assert scorer._kwargs["pos_label"] not in np.unique(y).tolist() + + err_msg = "is not a valid label" + with pytest.raises(ValueError, match=err_msg): + scorer(lr, X, y) + + +def test_get_scorer_return_copy(): + # test that get_scorer returns a copy + assert get_scorer("roc_auc") is not get_scorer("roc_auc") + + +def test_scorer_no_op_multiclass_select_proba(): + # check that calling a _Scorer on a multiclass problem do not raise + # even if `y_true` would be binary during the scoring. + # `_select_proba_binary` should not be called in this case. + X, y = make_classification( + n_classes=3, n_informative=3, n_samples=20, random_state=0 + ) + lr = LogisticRegression().fit(X, y) + + mask_last_class = y == lr.classes_[-1] + X_test, y_test = X[~mask_last_class], y[~mask_last_class] + assert_array_equal(np.unique(y_test), lr.classes_[:-1]) + + scorer = make_scorer( + roc_auc_score, + response_method="predict_proba", + multi_class="ovo", + labels=lr.classes_, + ) + scorer(lr, X_test, y_test) + + +@pytest.mark.parametrize("name", get_scorer_names()) +def test_scorer_set_score_request_raises(name): + """Test that set_score_request is only available when feature flag is on.""" + # Make sure they expose the routing methods. + scorer = get_scorer(name) + with pytest.raises(RuntimeError, match="This method is only available"): + scorer.set_score_request() + + +@pytest.mark.usefixtures("enable_slep006") +@pytest.mark.parametrize("name", get_scorer_names(), ids=get_scorer_names()) +def test_scorer_metadata_request(name): + """Testing metadata requests for scorers. + + This test checks many small things in a large test, to reduce the + boilerplate required for each section. + """ + # Make sure they expose the routing methods. + scorer = get_scorer(name) + assert hasattr(scorer, "set_score_request") + assert hasattr(scorer, "get_metadata_routing") + + # Check that by default no metadata is requested. + assert_request_is_empty(scorer.get_metadata_routing()) + + weighted_scorer = scorer.set_score_request(sample_weight=True) + # set_score_request should mutate the instance, rather than returning a + # new instance + assert weighted_scorer is scorer + + # make sure the scorer doesn't request anything on methods other than + # `score`, and that the requested value on `score` is correct. + assert_request_is_empty(weighted_scorer.get_metadata_routing(), exclude="score") + assert ( + weighted_scorer.get_metadata_routing().score.requests["sample_weight"] is True + ) + + # make sure putting the scorer in a router doesn't request anything by + # default + router = MetadataRouter(owner="test").add( + method_mapping="score", scorer=get_scorer(name) + ) + # make sure `sample_weight` is refused if passed. + with pytest.raises(TypeError, match="got unexpected argument"): + router.validate_metadata(params={"sample_weight": 1}, method="score") + # make sure `sample_weight` is not routed even if passed. + routed_params = router.route_params(params={"sample_weight": 1}, caller="score") + assert not routed_params.scorer.score + + # make sure putting weighted_scorer in a router requests sample_weight + router = MetadataRouter(owner="test").add( + scorer=weighted_scorer, method_mapping="score" + ) + router.validate_metadata(params={"sample_weight": 1}, method="score") + routed_params = router.route_params(params={"sample_weight": 1}, caller="score") + assert list(routed_params.scorer.score.keys()) == ["sample_weight"] + + +@pytest.mark.usefixtures("enable_slep006") +def test_metadata_kwarg_conflict(): + """This test makes sure the right warning is raised if the user passes + some metadata both as a constructor to make_scorer, and during __call__. + """ + X, y = make_classification( + n_classes=3, n_informative=3, n_samples=20, random_state=0 + ) + lr = LogisticRegression().fit(X, y) + + scorer = make_scorer( + roc_auc_score, + response_method="predict_proba", + multi_class="ovo", + labels=lr.classes_, + ) + with pytest.warns(UserWarning, match="already set as kwargs"): + scorer.set_score_request(labels=True) + + with pytest.warns(UserWarning, match="There is an overlap"): + scorer(lr, X, y, labels=lr.classes_) + + +@pytest.mark.usefixtures("enable_slep006") +def test_PassthroughScorer_metadata_request(): + """Test that _PassthroughScorer properly routes metadata. + + _PassthroughScorer should behave like a consumer, mirroring whatever is the + underlying score method. + """ + scorer = _PassthroughScorer( + estimator=LinearSVC() + .set_score_request(sample_weight="alias") + .set_fit_request(sample_weight=True) + ) + # Test that _PassthroughScorer doesn't change estimator's routing. + assert_request_equal( + scorer.get_metadata_routing(), + {"fit": {"sample_weight": True}, "score": {"sample_weight": "alias"}}, + ) + + +@pytest.mark.usefixtures("enable_slep006") +def test_multimetric_scoring_metadata_routing(): + # Test that _MultimetricScorer properly routes metadata. + def score1(y_true, y_pred): + return 1 + + def score2(y_true, y_pred, sample_weight="test"): + # make sure sample_weight is not passed + assert sample_weight == "test" + return 1 + + def score3(y_true, y_pred, sample_weight=None): + # make sure sample_weight is passed + assert sample_weight is not None + return 1 + + scorers = { + "score1": make_scorer(score1), + "score2": make_scorer(score2).set_score_request(sample_weight=False), + "score3": make_scorer(score3).set_score_request(sample_weight=True), + } + + X, y = make_classification( + n_samples=50, n_features=2, n_redundant=0, random_state=0 + ) + + clf = DecisionTreeClassifier().fit(X, y) + + scorer_dict = _check_multimetric_scoring(clf, scorers) + multi_scorer = _MultimetricScorer(scorers=scorer_dict) + # this should fail, because metadata routing is not enabled and w/o it we + # don't support different metadata for different scorers. + # TODO: remove when enable_metadata_routing is deprecated + with config_context(enable_metadata_routing=False): + with pytest.raises(TypeError, match="got an unexpected keyword argument"): + multi_scorer(clf, X, y, sample_weight=1) + + # This passes since routing is done. + multi_scorer(clf, X, y, sample_weight=1) + + +def test_kwargs_without_metadata_routing_error(): + # Test that kwargs are not supported in scorers if metadata routing is not + # enabled. + # TODO: remove when enable_metadata_routing is deprecated + def score(y_true, y_pred, param=None): + return 1 # pragma: no cover + + X, y = make_classification( + n_samples=50, n_features=2, n_redundant=0, random_state=0 + ) + + clf = DecisionTreeClassifier().fit(X, y) + scorer = make_scorer(score) + with config_context(enable_metadata_routing=False): + with pytest.raises( + ValueError, match="is only supported if enable_metadata_routing=True" + ): + scorer(clf, X, y, param="blah") + + +def test_get_scorer_multilabel_indicator(): + """Check that our scorer deal with multi-label indicator matrices. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/26817 + """ + X, Y = make_multilabel_classification(n_samples=72, n_classes=3, random_state=0) + X_train, X_test, Y_train, Y_test = train_test_split(X, Y, random_state=0) + + estimator = KNeighborsClassifier().fit(X_train, Y_train) + + score = get_scorer("average_precision")(estimator, X_test, Y_test) + assert score > 0.8 + + +@pytest.mark.parametrize( + "scorer, expected_repr", + [ + ( + get_scorer("accuracy"), + "make_scorer(accuracy_score, response_method='predict')", + ), + ( + get_scorer("neg_log_loss"), + ( + "make_scorer(log_loss, greater_is_better=False," + " response_method='predict_proba')" + ), + ), + ( + get_scorer("roc_auc"), + ( + "make_scorer(roc_auc_score, response_method=" + "('decision_function', 'predict_proba'))" + ), + ), + ( + make_scorer(fbeta_score, beta=2), + "make_scorer(fbeta_score, response_method='predict', beta=2)", + ), + ], +) +def test_make_scorer_repr(scorer, expected_repr): + """Check the representation of the scorer.""" + assert repr(scorer) == expected_repr + + +# TODO(1.6): rework this test after the deprecation of `needs_proba` and +# `needs_threshold` +@pytest.mark.filterwarnings("ignore:.*needs_proba.*:FutureWarning") +@pytest.mark.parametrize( + "params, err_type, err_msg", + [ + # response_method should not be set if needs_* are set + ( + {"response_method": "predict_proba", "needs_proba": True}, + ValueError, + "You cannot set both `response_method`", + ), + ( + {"response_method": "predict_proba", "needs_threshold": True}, + ValueError, + "You cannot set both `response_method`", + ), + # cannot set both needs_proba and needs_threshold + ( + {"needs_proba": True, "needs_threshold": True}, + ValueError, + "You cannot set both `needs_proba` and `needs_threshold`", + ), + ], +) +def test_make_scorer_error(params, err_type, err_msg): + """Check that `make_scorer` raises errors if the parameter used.""" + with pytest.raises(err_type, match=err_msg): + make_scorer(lambda y_true, y_pred: 1, **params) + + +# TODO(1.6): remove the following test +@pytest.mark.parametrize( + "deprecated_params, new_params, warn_msg", + [ + ( + {"needs_proba": True}, + {"response_method": "predict_proba"}, + "The `needs_threshold` and `needs_proba` parameter are deprecated", + ), + ( + {"needs_proba": True, "needs_threshold": False}, + {"response_method": "predict_proba"}, + "The `needs_threshold` and `needs_proba` parameter are deprecated", + ), + ( + {"needs_threshold": True}, + {"response_method": ("decision_function", "predict_proba")}, + "The `needs_threshold` and `needs_proba` parameter are deprecated", + ), + ( + {"needs_threshold": True, "needs_proba": False}, + {"response_method": ("decision_function", "predict_proba")}, + "The `needs_threshold` and `needs_proba` parameter are deprecated", + ), + ( + {"needs_threshold": False, "needs_proba": False}, + {"response_method": "predict"}, + "The `needs_threshold` and `needs_proba` parameter are deprecated", + ), + ], +) +def test_make_scorer_deprecation(deprecated_params, new_params, warn_msg): + """Check that we raise a deprecation warning when using `needs_proba` or + `needs_threshold`.""" + X, y = make_classification(n_samples=150, n_features=10, random_state=0) + classifier = LogisticRegression().fit(X, y) + + # check deprecation of needs_proba + with pytest.warns(FutureWarning, match=warn_msg): + deprecated_roc_auc_scorer = make_scorer(roc_auc_score, **deprecated_params) + roc_auc_scorer = make_scorer(roc_auc_score, **new_params) + + assert deprecated_roc_auc_scorer(classifier, X, y) == pytest.approx( + roc_auc_scorer(classifier, X, y) + ) + + +@pytest.mark.parametrize("enable_metadata_routing", [True, False]) +def test_metadata_routing_multimetric_metadata_routing(enable_metadata_routing): + """Test multimetric scorer works with and without metadata routing enabled when + there is no actual metadata to pass. + + Non-regression test for https://github.com/scikit-learn/scikit-learn/issues/28256 + """ + X, y = make_classification(n_samples=50, n_features=10, random_state=0) + estimator = EstimatorWithFitAndPredict().fit(X, y) + + multimetric_scorer = _MultimetricScorer(scorers={"acc": get_scorer("accuracy")}) + with config_context(enable_metadata_routing=enable_metadata_routing): + multimetric_scorer(estimator, X, y) diff --git a/venv/lib/python3.10/site-packages/sklearn/mixture/__init__.py b/venv/lib/python3.10/site-packages/sklearn/mixture/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..f0018196ffc986e82f2cc0f20c25b7d6bc13942b --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/mixture/__init__.py @@ -0,0 +1,8 @@ +""" +The :mod:`sklearn.mixture` module implements mixture modeling algorithms. +""" + +from ._bayesian_mixture import BayesianGaussianMixture +from ._gaussian_mixture import GaussianMixture + +__all__ = ["GaussianMixture", "BayesianGaussianMixture"] diff --git a/venv/lib/python3.10/site-packages/sklearn/mixture/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/mixture/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4cccd896f13f1e5a2221e93bda68e102424c31e0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/mixture/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/mixture/__pycache__/_base.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/mixture/__pycache__/_base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9008093f2173192987e6beb74a3aa7e640e01fe6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/mixture/__pycache__/_base.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/mixture/__pycache__/_bayesian_mixture.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/mixture/__pycache__/_bayesian_mixture.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3af1ad87d6f1516ac0965310d2fb3a8d69a6d125 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/mixture/__pycache__/_bayesian_mixture.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/mixture/__pycache__/_gaussian_mixture.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/mixture/__pycache__/_gaussian_mixture.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a9d3a91fa6fa0d2ebca9ae6d8ee98fb004f0ec7e Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/mixture/__pycache__/_gaussian_mixture.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/mixture/_base.py b/venv/lib/python3.10/site-packages/sklearn/mixture/_base.py new file mode 100644 index 0000000000000000000000000000000000000000..9fb1c232c1012459a381a5e575fe643622cfcad5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/mixture/_base.py @@ -0,0 +1,560 @@ +"""Base class for mixture models.""" + +# Author: Wei Xue +# Modified by Thierry Guillemot +# License: BSD 3 clause + +import warnings +from abc import ABCMeta, abstractmethod +from numbers import Integral, Real +from time import time + +import numpy as np +from scipy.special import logsumexp + +from .. import cluster +from ..base import BaseEstimator, DensityMixin, _fit_context +from ..cluster import kmeans_plusplus +from ..exceptions import ConvergenceWarning +from ..utils import check_random_state +from ..utils._param_validation import Interval, StrOptions +from ..utils.validation import check_is_fitted + + +def _check_shape(param, param_shape, name): + """Validate the shape of the input parameter 'param'. + + Parameters + ---------- + param : array + + param_shape : tuple + + name : str + """ + param = np.array(param) + if param.shape != param_shape: + raise ValueError( + "The parameter '%s' should have the shape of %s, but got %s" + % (name, param_shape, param.shape) + ) + + +class BaseMixture(DensityMixin, BaseEstimator, metaclass=ABCMeta): + """Base class for mixture models. + + This abstract class specifies an interface for all mixture classes and + provides basic common methods for mixture models. + """ + + _parameter_constraints: dict = { + "n_components": [Interval(Integral, 1, None, closed="left")], + "tol": [Interval(Real, 0.0, None, closed="left")], + "reg_covar": [Interval(Real, 0.0, None, closed="left")], + "max_iter": [Interval(Integral, 0, None, closed="left")], + "n_init": [Interval(Integral, 1, None, closed="left")], + "init_params": [ + StrOptions({"kmeans", "random", "random_from_data", "k-means++"}) + ], + "random_state": ["random_state"], + "warm_start": ["boolean"], + "verbose": ["verbose"], + "verbose_interval": [Interval(Integral, 1, None, closed="left")], + } + + def __init__( + self, + n_components, + tol, + reg_covar, + max_iter, + n_init, + init_params, + random_state, + warm_start, + verbose, + verbose_interval, + ): + self.n_components = n_components + self.tol = tol + self.reg_covar = reg_covar + self.max_iter = max_iter + self.n_init = n_init + self.init_params = init_params + self.random_state = random_state + self.warm_start = warm_start + self.verbose = verbose + self.verbose_interval = verbose_interval + + @abstractmethod + def _check_parameters(self, X): + """Check initial parameters of the derived class. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + """ + pass + + def _initialize_parameters(self, X, random_state): + """Initialize the model parameters. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + + random_state : RandomState + A random number generator instance that controls the random seed + used for the method chosen to initialize the parameters. + """ + n_samples, _ = X.shape + + if self.init_params == "kmeans": + resp = np.zeros((n_samples, self.n_components)) + label = ( + cluster.KMeans( + n_clusters=self.n_components, n_init=1, random_state=random_state + ) + .fit(X) + .labels_ + ) + resp[np.arange(n_samples), label] = 1 + elif self.init_params == "random": + resp = random_state.uniform(size=(n_samples, self.n_components)) + resp /= resp.sum(axis=1)[:, np.newaxis] + elif self.init_params == "random_from_data": + resp = np.zeros((n_samples, self.n_components)) + indices = random_state.choice( + n_samples, size=self.n_components, replace=False + ) + resp[indices, np.arange(self.n_components)] = 1 + elif self.init_params == "k-means++": + resp = np.zeros((n_samples, self.n_components)) + _, indices = kmeans_plusplus( + X, + self.n_components, + random_state=random_state, + ) + resp[indices, np.arange(self.n_components)] = 1 + + self._initialize(X, resp) + + @abstractmethod + def _initialize(self, X, resp): + """Initialize the model parameters of the derived class. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + + resp : array-like of shape (n_samples, n_components) + """ + pass + + def fit(self, X, y=None): + """Estimate model parameters with the EM algorithm. + + The method fits the model ``n_init`` times and sets the parameters with + which the model has the largest likelihood or lower bound. Within each + trial, the method iterates between E-step and M-step for ``max_iter`` + times until the change of likelihood or lower bound is less than + ``tol``, otherwise, a ``ConvergenceWarning`` is raised. + If ``warm_start`` is ``True``, then ``n_init`` is ignored and a single + initialization is performed upon the first call. Upon consecutive + calls, training starts where it left off. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + List of n_features-dimensional data points. Each row + corresponds to a single data point. + + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + self : object + The fitted mixture. + """ + # parameters are validated in fit_predict + self.fit_predict(X, y) + return self + + @_fit_context(prefer_skip_nested_validation=True) + def fit_predict(self, X, y=None): + """Estimate model parameters using X and predict the labels for X. + + The method fits the model n_init times and sets the parameters with + which the model has the largest likelihood or lower bound. Within each + trial, the method iterates between E-step and M-step for `max_iter` + times until the change of likelihood or lower bound is less than + `tol`, otherwise, a :class:`~sklearn.exceptions.ConvergenceWarning` is + raised. After fitting, it predicts the most probable label for the + input data points. + + .. versionadded:: 0.20 + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + List of n_features-dimensional data points. Each row + corresponds to a single data point. + + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + labels : array, shape (n_samples,) + Component labels. + """ + X = self._validate_data(X, dtype=[np.float64, np.float32], ensure_min_samples=2) + if X.shape[0] < self.n_components: + raise ValueError( + "Expected n_samples >= n_components " + f"but got n_components = {self.n_components}, " + f"n_samples = {X.shape[0]}" + ) + self._check_parameters(X) + + # if we enable warm_start, we will have a unique initialisation + do_init = not (self.warm_start and hasattr(self, "converged_")) + n_init = self.n_init if do_init else 1 + + max_lower_bound = -np.inf + self.converged_ = False + + random_state = check_random_state(self.random_state) + + n_samples, _ = X.shape + for init in range(n_init): + self._print_verbose_msg_init_beg(init) + + if do_init: + self._initialize_parameters(X, random_state) + + lower_bound = -np.inf if do_init else self.lower_bound_ + + if self.max_iter == 0: + best_params = self._get_parameters() + best_n_iter = 0 + else: + for n_iter in range(1, self.max_iter + 1): + prev_lower_bound = lower_bound + + log_prob_norm, log_resp = self._e_step(X) + self._m_step(X, log_resp) + lower_bound = self._compute_lower_bound(log_resp, log_prob_norm) + + change = lower_bound - prev_lower_bound + self._print_verbose_msg_iter_end(n_iter, change) + + if abs(change) < self.tol: + self.converged_ = True + break + + self._print_verbose_msg_init_end(lower_bound) + + if lower_bound > max_lower_bound or max_lower_bound == -np.inf: + max_lower_bound = lower_bound + best_params = self._get_parameters() + best_n_iter = n_iter + + # Should only warn about convergence if max_iter > 0, otherwise + # the user is assumed to have used 0-iters initialization + # to get the initial means. + if not self.converged_ and self.max_iter > 0: + warnings.warn( + "Initialization %d did not converge. " + "Try different init parameters, " + "or increase max_iter, tol " + "or check for degenerate data." % (init + 1), + ConvergenceWarning, + ) + + self._set_parameters(best_params) + self.n_iter_ = best_n_iter + self.lower_bound_ = max_lower_bound + + # Always do a final e-step to guarantee that the labels returned by + # fit_predict(X) are always consistent with fit(X).predict(X) + # for any value of max_iter and tol (and any random_state). + _, log_resp = self._e_step(X) + + return log_resp.argmax(axis=1) + + def _e_step(self, X): + """E step. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + + Returns + ------- + log_prob_norm : float + Mean of the logarithms of the probabilities of each sample in X + + log_responsibility : array, shape (n_samples, n_components) + Logarithm of the posterior probabilities (or responsibilities) of + the point of each sample in X. + """ + log_prob_norm, log_resp = self._estimate_log_prob_resp(X) + return np.mean(log_prob_norm), log_resp + + @abstractmethod + def _m_step(self, X, log_resp): + """M step. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + + log_resp : array-like of shape (n_samples, n_components) + Logarithm of the posterior probabilities (or responsibilities) of + the point of each sample in X. + """ + pass + + @abstractmethod + def _get_parameters(self): + pass + + @abstractmethod + def _set_parameters(self, params): + pass + + def score_samples(self, X): + """Compute the log-likelihood of each sample. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + List of n_features-dimensional data points. Each row + corresponds to a single data point. + + Returns + ------- + log_prob : array, shape (n_samples,) + Log-likelihood of each sample in `X` under the current model. + """ + check_is_fitted(self) + X = self._validate_data(X, reset=False) + + return logsumexp(self._estimate_weighted_log_prob(X), axis=1) + + def score(self, X, y=None): + """Compute the per-sample average log-likelihood of the given data X. + + Parameters + ---------- + X : array-like of shape (n_samples, n_dimensions) + List of n_features-dimensional data points. Each row + corresponds to a single data point. + + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + log_likelihood : float + Log-likelihood of `X` under the Gaussian mixture model. + """ + return self.score_samples(X).mean() + + def predict(self, X): + """Predict the labels for the data samples in X using trained model. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + List of n_features-dimensional data points. Each row + corresponds to a single data point. + + Returns + ------- + labels : array, shape (n_samples,) + Component labels. + """ + check_is_fitted(self) + X = self._validate_data(X, reset=False) + return self._estimate_weighted_log_prob(X).argmax(axis=1) + + def predict_proba(self, X): + """Evaluate the components' density for each sample. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + List of n_features-dimensional data points. Each row + corresponds to a single data point. + + Returns + ------- + resp : array, shape (n_samples, n_components) + Density of each Gaussian component for each sample in X. + """ + check_is_fitted(self) + X = self._validate_data(X, reset=False) + _, log_resp = self._estimate_log_prob_resp(X) + return np.exp(log_resp) + + def sample(self, n_samples=1): + """Generate random samples from the fitted Gaussian distribution. + + Parameters + ---------- + n_samples : int, default=1 + Number of samples to generate. + + Returns + ------- + X : array, shape (n_samples, n_features) + Randomly generated sample. + + y : array, shape (nsamples,) + Component labels. + """ + check_is_fitted(self) + + if n_samples < 1: + raise ValueError( + "Invalid value for 'n_samples': %d . The sampling requires at " + "least one sample." % (self.n_components) + ) + + _, n_features = self.means_.shape + rng = check_random_state(self.random_state) + n_samples_comp = rng.multinomial(n_samples, self.weights_) + + if self.covariance_type == "full": + X = np.vstack( + [ + rng.multivariate_normal(mean, covariance, int(sample)) + for (mean, covariance, sample) in zip( + self.means_, self.covariances_, n_samples_comp + ) + ] + ) + elif self.covariance_type == "tied": + X = np.vstack( + [ + rng.multivariate_normal(mean, self.covariances_, int(sample)) + for (mean, sample) in zip(self.means_, n_samples_comp) + ] + ) + else: + X = np.vstack( + [ + mean + + rng.standard_normal(size=(sample, n_features)) + * np.sqrt(covariance) + for (mean, covariance, sample) in zip( + self.means_, self.covariances_, n_samples_comp + ) + ] + ) + + y = np.concatenate( + [np.full(sample, j, dtype=int) for j, sample in enumerate(n_samples_comp)] + ) + + return (X, y) + + def _estimate_weighted_log_prob(self, X): + """Estimate the weighted log-probabilities, log P(X | Z) + log weights. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + + Returns + ------- + weighted_log_prob : array, shape (n_samples, n_component) + """ + return self._estimate_log_prob(X) + self._estimate_log_weights() + + @abstractmethod + def _estimate_log_weights(self): + """Estimate log-weights in EM algorithm, E[ log pi ] in VB algorithm. + + Returns + ------- + log_weight : array, shape (n_components, ) + """ + pass + + @abstractmethod + def _estimate_log_prob(self, X): + """Estimate the log-probabilities log P(X | Z). + + Compute the log-probabilities per each component for each sample. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + + Returns + ------- + log_prob : array, shape (n_samples, n_component) + """ + pass + + def _estimate_log_prob_resp(self, X): + """Estimate log probabilities and responsibilities for each sample. + + Compute the log probabilities, weighted log probabilities per + component and responsibilities for each sample in X with respect to + the current state of the model. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + + Returns + ------- + log_prob_norm : array, shape (n_samples,) + log p(X) + + log_responsibilities : array, shape (n_samples, n_components) + logarithm of the responsibilities + """ + weighted_log_prob = self._estimate_weighted_log_prob(X) + log_prob_norm = logsumexp(weighted_log_prob, axis=1) + with np.errstate(under="ignore"): + # ignore underflow + log_resp = weighted_log_prob - log_prob_norm[:, np.newaxis] + return log_prob_norm, log_resp + + def _print_verbose_msg_init_beg(self, n_init): + """Print verbose message on initialization.""" + if self.verbose == 1: + print("Initialization %d" % n_init) + elif self.verbose >= 2: + print("Initialization %d" % n_init) + self._init_prev_time = time() + self._iter_prev_time = self._init_prev_time + + def _print_verbose_msg_iter_end(self, n_iter, diff_ll): + """Print verbose message on initialization.""" + if n_iter % self.verbose_interval == 0: + if self.verbose == 1: + print(" Iteration %d" % n_iter) + elif self.verbose >= 2: + cur_time = time() + print( + " Iteration %d\t time lapse %.5fs\t ll change %.5f" + % (n_iter, cur_time - self._iter_prev_time, diff_ll) + ) + self._iter_prev_time = cur_time + + def _print_verbose_msg_init_end(self, ll): + """Print verbose message on the end of iteration.""" + if self.verbose == 1: + print("Initialization converged: %s" % self.converged_) + elif self.verbose >= 2: + print( + "Initialization converged: %s\t time lapse %.5fs\t ll %.5f" + % (self.converged_, time() - self._init_prev_time, ll) + ) diff --git a/venv/lib/python3.10/site-packages/sklearn/mixture/_bayesian_mixture.py b/venv/lib/python3.10/site-packages/sklearn/mixture/_bayesian_mixture.py new file mode 100644 index 0000000000000000000000000000000000000000..f4169b3e1f4ee847d5963c812950e2e9273268e7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/mixture/_bayesian_mixture.py @@ -0,0 +1,888 @@ +"""Bayesian Gaussian Mixture Model.""" +# Author: Wei Xue +# Thierry Guillemot +# License: BSD 3 clause + +import math +from numbers import Real + +import numpy as np +from scipy.special import betaln, digamma, gammaln + +from ..utils import check_array +from ..utils._param_validation import Interval, StrOptions +from ._base import BaseMixture, _check_shape +from ._gaussian_mixture import ( + _check_precision_matrix, + _check_precision_positivity, + _compute_log_det_cholesky, + _compute_precision_cholesky, + _estimate_gaussian_parameters, + _estimate_log_gaussian_prob, +) + + +def _log_dirichlet_norm(dirichlet_concentration): + """Compute the log of the Dirichlet distribution normalization term. + + Parameters + ---------- + dirichlet_concentration : array-like of shape (n_samples,) + The parameters values of the Dirichlet distribution. + + Returns + ------- + log_dirichlet_norm : float + The log normalization of the Dirichlet distribution. + """ + return gammaln(np.sum(dirichlet_concentration)) - np.sum( + gammaln(dirichlet_concentration) + ) + + +def _log_wishart_norm(degrees_of_freedom, log_det_precisions_chol, n_features): + """Compute the log of the Wishart distribution normalization term. + + Parameters + ---------- + degrees_of_freedom : array-like of shape (n_components,) + The number of degrees of freedom on the covariance Wishart + distributions. + + log_det_precision_chol : array-like of shape (n_components,) + The determinant of the precision matrix for each component. + + n_features : int + The number of features. + + Return + ------ + log_wishart_norm : array-like of shape (n_components,) + The log normalization of the Wishart distribution. + """ + # To simplify the computation we have removed the np.log(np.pi) term + return -( + degrees_of_freedom * log_det_precisions_chol + + degrees_of_freedom * n_features * 0.5 * math.log(2.0) + + np.sum( + gammaln(0.5 * (degrees_of_freedom - np.arange(n_features)[:, np.newaxis])), + 0, + ) + ) + + +class BayesianGaussianMixture(BaseMixture): + """Variational Bayesian estimation of a Gaussian mixture. + + This class allows to infer an approximate posterior distribution over the + parameters of a Gaussian mixture distribution. The effective number of + components can be inferred from the data. + + This class implements two types of prior for the weights distribution: a + finite mixture model with Dirichlet distribution and an infinite mixture + model with the Dirichlet Process. In practice Dirichlet Process inference + algorithm is approximated and uses a truncated distribution with a fixed + maximum number of components (called the Stick-breaking representation). + The number of components actually used almost always depends on the data. + + .. versionadded:: 0.18 + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_components : int, default=1 + The number of mixture components. Depending on the data and the value + of the `weight_concentration_prior` the model can decide to not use + all the components by setting some component `weights_` to values very + close to zero. The number of effective components is therefore smaller + than n_components. + + covariance_type : {'full', 'tied', 'diag', 'spherical'}, default='full' + String describing the type of covariance parameters to use. + Must be one of:: + + 'full' (each component has its own general covariance matrix), + 'tied' (all components share the same general covariance matrix), + 'diag' (each component has its own diagonal covariance matrix), + 'spherical' (each component has its own single variance). + + tol : float, default=1e-3 + The convergence threshold. EM iterations will stop when the + lower bound average gain on the likelihood (of the training data with + respect to the model) is below this threshold. + + reg_covar : float, default=1e-6 + Non-negative regularization added to the diagonal of covariance. + Allows to assure that the covariance matrices are all positive. + + max_iter : int, default=100 + The number of EM iterations to perform. + + n_init : int, default=1 + The number of initializations to perform. The result with the highest + lower bound value on the likelihood is kept. + + init_params : {'kmeans', 'k-means++', 'random', 'random_from_data'}, \ + default='kmeans' + The method used to initialize the weights, the means and the + covariances. + String must be one of: + + 'kmeans' : responsibilities are initialized using kmeans. + 'k-means++' : use the k-means++ method to initialize. + 'random' : responsibilities are initialized randomly. + 'random_from_data' : initial means are randomly selected data points. + + .. versionchanged:: v1.1 + `init_params` now accepts 'random_from_data' and 'k-means++' as + initialization methods. + + weight_concentration_prior_type : {'dirichlet_process', 'dirichlet_distribution'}, \ + default='dirichlet_process' + String describing the type of the weight concentration prior. + + weight_concentration_prior : float or None, default=None + The dirichlet concentration of each component on the weight + distribution (Dirichlet). This is commonly called gamma in the + literature. The higher concentration puts more mass in + the center and will lead to more components being active, while a lower + concentration parameter will lead to more mass at the edge of the + mixture weights simplex. The value of the parameter must be greater + than 0. If it is None, it's set to ``1. / n_components``. + + mean_precision_prior : float or None, default=None + The precision prior on the mean distribution (Gaussian). + Controls the extent of where means can be placed. Larger + values concentrate the cluster means around `mean_prior`. + The value of the parameter must be greater than 0. + If it is None, it is set to 1. + + mean_prior : array-like, shape (n_features,), default=None + The prior on the mean distribution (Gaussian). + If it is None, it is set to the mean of X. + + degrees_of_freedom_prior : float or None, default=None + The prior of the number of degrees of freedom on the covariance + distributions (Wishart). If it is None, it's set to `n_features`. + + covariance_prior : float or array-like, default=None + The prior on the covariance distribution (Wishart). + If it is None, the emiprical covariance prior is initialized using the + covariance of X. The shape depends on `covariance_type`:: + + (n_features, n_features) if 'full', + (n_features, n_features) if 'tied', + (n_features) if 'diag', + float if 'spherical' + + random_state : int, RandomState instance or None, default=None + Controls the random seed given to the method chosen to initialize the + parameters (see `init_params`). + In addition, it controls the generation of random samples from the + fitted distribution (see the method `sample`). + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + warm_start : bool, default=False + If 'warm_start' is True, the solution of the last fitting is used as + initialization for the next call of fit(). This can speed up + convergence when fit is called several times on similar problems. + See :term:`the Glossary `. + + verbose : int, default=0 + Enable verbose output. If 1 then it prints the current + initialization and each iteration step. If greater than 1 then + it prints also the log probability and the time needed + for each step. + + verbose_interval : int, default=10 + Number of iteration done before the next print. + + Attributes + ---------- + weights_ : array-like of shape (n_components,) + The weights of each mixture components. + + means_ : array-like of shape (n_components, n_features) + The mean of each mixture component. + + covariances_ : array-like + The covariance of each mixture component. + The shape depends on `covariance_type`:: + + (n_components,) if 'spherical', + (n_features, n_features) if 'tied', + (n_components, n_features) if 'diag', + (n_components, n_features, n_features) if 'full' + + precisions_ : array-like + The precision matrices for each component in the mixture. A precision + matrix is the inverse of a covariance matrix. A covariance matrix is + symmetric positive definite so the mixture of Gaussian can be + equivalently parameterized by the precision matrices. Storing the + precision matrices instead of the covariance matrices makes it more + efficient to compute the log-likelihood of new samples at test time. + The shape depends on ``covariance_type``:: + + (n_components,) if 'spherical', + (n_features, n_features) if 'tied', + (n_components, n_features) if 'diag', + (n_components, n_features, n_features) if 'full' + + precisions_cholesky_ : array-like + The cholesky decomposition of the precision matrices of each mixture + component. A precision matrix is the inverse of a covariance matrix. + A covariance matrix is symmetric positive definite so the mixture of + Gaussian can be equivalently parameterized by the precision matrices. + Storing the precision matrices instead of the covariance matrices makes + it more efficient to compute the log-likelihood of new samples at test + time. The shape depends on ``covariance_type``:: + + (n_components,) if 'spherical', + (n_features, n_features) if 'tied', + (n_components, n_features) if 'diag', + (n_components, n_features, n_features) if 'full' + + converged_ : bool + True when convergence was reached in fit(), False otherwise. + + n_iter_ : int + Number of step used by the best fit of inference to reach the + convergence. + + lower_bound_ : float + Lower bound value on the model evidence (of the training data) of the + best fit of inference. + + weight_concentration_prior_ : tuple or float + The dirichlet concentration of each component on the weight + distribution (Dirichlet). The type depends on + ``weight_concentration_prior_type``:: + + (float, float) if 'dirichlet_process' (Beta parameters), + float if 'dirichlet_distribution' (Dirichlet parameters). + + The higher concentration puts more mass in + the center and will lead to more components being active, while a lower + concentration parameter will lead to more mass at the edge of the + simplex. + + weight_concentration_ : array-like of shape (n_components,) + The dirichlet concentration of each component on the weight + distribution (Dirichlet). + + mean_precision_prior_ : float + The precision prior on the mean distribution (Gaussian). + Controls the extent of where means can be placed. + Larger values concentrate the cluster means around `mean_prior`. + If mean_precision_prior is set to None, `mean_precision_prior_` is set + to 1. + + mean_precision_ : array-like of shape (n_components,) + The precision of each components on the mean distribution (Gaussian). + + mean_prior_ : array-like of shape (n_features,) + The prior on the mean distribution (Gaussian). + + degrees_of_freedom_prior_ : float + The prior of the number of degrees of freedom on the covariance + distributions (Wishart). + + degrees_of_freedom_ : array-like of shape (n_components,) + The number of degrees of freedom of each components in the model. + + covariance_prior_ : float or array-like + The prior on the covariance distribution (Wishart). + The shape depends on `covariance_type`:: + + (n_features, n_features) if 'full', + (n_features, n_features) if 'tied', + (n_features) if 'diag', + float if 'spherical' + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + GaussianMixture : Finite Gaussian mixture fit with EM. + + References + ---------- + + .. [1] `Bishop, Christopher M. (2006). "Pattern recognition and machine + learning". Vol. 4 No. 4. New York: Springer. + `_ + + .. [2] `Hagai Attias. (2000). "A Variational Bayesian Framework for + Graphical Models". In Advances in Neural Information Processing + Systems 12. + `_ + + .. [3] `Blei, David M. and Michael I. Jordan. (2006). "Variational + inference for Dirichlet process mixtures". Bayesian analysis 1.1 + `_ + + Examples + -------- + >>> import numpy as np + >>> from sklearn.mixture import BayesianGaussianMixture + >>> X = np.array([[1, 2], [1, 4], [1, 0], [4, 2], [12, 4], [10, 7]]) + >>> bgm = BayesianGaussianMixture(n_components=2, random_state=42).fit(X) + >>> bgm.means_ + array([[2.49... , 2.29...], + [8.45..., 4.52... ]]) + >>> bgm.predict([[0, 0], [9, 3]]) + array([0, 1]) + """ + + _parameter_constraints: dict = { + **BaseMixture._parameter_constraints, + "covariance_type": [StrOptions({"spherical", "tied", "diag", "full"})], + "weight_concentration_prior_type": [ + StrOptions({"dirichlet_process", "dirichlet_distribution"}) + ], + "weight_concentration_prior": [ + None, + Interval(Real, 0.0, None, closed="neither"), + ], + "mean_precision_prior": [None, Interval(Real, 0.0, None, closed="neither")], + "mean_prior": [None, "array-like"], + "degrees_of_freedom_prior": [None, Interval(Real, 0.0, None, closed="neither")], + "covariance_prior": [ + None, + "array-like", + Interval(Real, 0.0, None, closed="neither"), + ], + } + + def __init__( + self, + *, + n_components=1, + covariance_type="full", + tol=1e-3, + reg_covar=1e-6, + max_iter=100, + n_init=1, + init_params="kmeans", + weight_concentration_prior_type="dirichlet_process", + weight_concentration_prior=None, + mean_precision_prior=None, + mean_prior=None, + degrees_of_freedom_prior=None, + covariance_prior=None, + random_state=None, + warm_start=False, + verbose=0, + verbose_interval=10, + ): + super().__init__( + n_components=n_components, + tol=tol, + reg_covar=reg_covar, + max_iter=max_iter, + n_init=n_init, + init_params=init_params, + random_state=random_state, + warm_start=warm_start, + verbose=verbose, + verbose_interval=verbose_interval, + ) + + self.covariance_type = covariance_type + self.weight_concentration_prior_type = weight_concentration_prior_type + self.weight_concentration_prior = weight_concentration_prior + self.mean_precision_prior = mean_precision_prior + self.mean_prior = mean_prior + self.degrees_of_freedom_prior = degrees_of_freedom_prior + self.covariance_prior = covariance_prior + + def _check_parameters(self, X): + """Check that the parameters are well defined. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + """ + self._check_weights_parameters() + self._check_means_parameters(X) + self._check_precision_parameters(X) + self._checkcovariance_prior_parameter(X) + + def _check_weights_parameters(self): + """Check the parameter of the Dirichlet distribution.""" + if self.weight_concentration_prior is None: + self.weight_concentration_prior_ = 1.0 / self.n_components + else: + self.weight_concentration_prior_ = self.weight_concentration_prior + + def _check_means_parameters(self, X): + """Check the parameters of the Gaussian distribution. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + """ + _, n_features = X.shape + + if self.mean_precision_prior is None: + self.mean_precision_prior_ = 1.0 + else: + self.mean_precision_prior_ = self.mean_precision_prior + + if self.mean_prior is None: + self.mean_prior_ = X.mean(axis=0) + else: + self.mean_prior_ = check_array( + self.mean_prior, dtype=[np.float64, np.float32], ensure_2d=False + ) + _check_shape(self.mean_prior_, (n_features,), "means") + + def _check_precision_parameters(self, X): + """Check the prior parameters of the precision distribution. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + """ + _, n_features = X.shape + + if self.degrees_of_freedom_prior is None: + self.degrees_of_freedom_prior_ = n_features + elif self.degrees_of_freedom_prior > n_features - 1.0: + self.degrees_of_freedom_prior_ = self.degrees_of_freedom_prior + else: + raise ValueError( + "The parameter 'degrees_of_freedom_prior' " + "should be greater than %d, but got %.3f." + % (n_features - 1, self.degrees_of_freedom_prior) + ) + + def _checkcovariance_prior_parameter(self, X): + """Check the `covariance_prior_`. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + """ + _, n_features = X.shape + + if self.covariance_prior is None: + self.covariance_prior_ = { + "full": np.atleast_2d(np.cov(X.T)), + "tied": np.atleast_2d(np.cov(X.T)), + "diag": np.var(X, axis=0, ddof=1), + "spherical": np.var(X, axis=0, ddof=1).mean(), + }[self.covariance_type] + + elif self.covariance_type in ["full", "tied"]: + self.covariance_prior_ = check_array( + self.covariance_prior, dtype=[np.float64, np.float32], ensure_2d=False + ) + _check_shape( + self.covariance_prior_, + (n_features, n_features), + "%s covariance_prior" % self.covariance_type, + ) + _check_precision_matrix(self.covariance_prior_, self.covariance_type) + elif self.covariance_type == "diag": + self.covariance_prior_ = check_array( + self.covariance_prior, dtype=[np.float64, np.float32], ensure_2d=False + ) + _check_shape( + self.covariance_prior_, + (n_features,), + "%s covariance_prior" % self.covariance_type, + ) + _check_precision_positivity(self.covariance_prior_, self.covariance_type) + # spherical case + else: + self.covariance_prior_ = self.covariance_prior + + def _initialize(self, X, resp): + """Initialization of the mixture parameters. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + + resp : array-like of shape (n_samples, n_components) + """ + nk, xk, sk = _estimate_gaussian_parameters( + X, resp, self.reg_covar, self.covariance_type + ) + + self._estimate_weights(nk) + self._estimate_means(nk, xk) + self._estimate_precisions(nk, xk, sk) + + def _estimate_weights(self, nk): + """Estimate the parameters of the Dirichlet distribution. + + Parameters + ---------- + nk : array-like of shape (n_components,) + """ + if self.weight_concentration_prior_type == "dirichlet_process": + # For dirichlet process weight_concentration will be a tuple + # containing the two parameters of the beta distribution + self.weight_concentration_ = ( + 1.0 + nk, + ( + self.weight_concentration_prior_ + + np.hstack((np.cumsum(nk[::-1])[-2::-1], 0)) + ), + ) + else: + # case Variational Gaussian mixture with dirichlet distribution + self.weight_concentration_ = self.weight_concentration_prior_ + nk + + def _estimate_means(self, nk, xk): + """Estimate the parameters of the Gaussian distribution. + + Parameters + ---------- + nk : array-like of shape (n_components,) + + xk : array-like of shape (n_components, n_features) + """ + self.mean_precision_ = self.mean_precision_prior_ + nk + self.means_ = ( + self.mean_precision_prior_ * self.mean_prior_ + nk[:, np.newaxis] * xk + ) / self.mean_precision_[:, np.newaxis] + + def _estimate_precisions(self, nk, xk, sk): + """Estimate the precisions parameters of the precision distribution. + + Parameters + ---------- + nk : array-like of shape (n_components,) + + xk : array-like of shape (n_components, n_features) + + sk : array-like + The shape depends of `covariance_type`: + 'full' : (n_components, n_features, n_features) + 'tied' : (n_features, n_features) + 'diag' : (n_components, n_features) + 'spherical' : (n_components,) + """ + { + "full": self._estimate_wishart_full, + "tied": self._estimate_wishart_tied, + "diag": self._estimate_wishart_diag, + "spherical": self._estimate_wishart_spherical, + }[self.covariance_type](nk, xk, sk) + + self.precisions_cholesky_ = _compute_precision_cholesky( + self.covariances_, self.covariance_type + ) + + def _estimate_wishart_full(self, nk, xk, sk): + """Estimate the full Wishart distribution parameters. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + + nk : array-like of shape (n_components,) + + xk : array-like of shape (n_components, n_features) + + sk : array-like of shape (n_components, n_features, n_features) + """ + _, n_features = xk.shape + + # Warning : in some Bishop book, there is a typo on the formula 10.63 + # `degrees_of_freedom_k = degrees_of_freedom_0 + Nk` is + # the correct formula + self.degrees_of_freedom_ = self.degrees_of_freedom_prior_ + nk + + self.covariances_ = np.empty((self.n_components, n_features, n_features)) + + for k in range(self.n_components): + diff = xk[k] - self.mean_prior_ + self.covariances_[k] = ( + self.covariance_prior_ + + nk[k] * sk[k] + + nk[k] + * self.mean_precision_prior_ + / self.mean_precision_[k] + * np.outer(diff, diff) + ) + + # Contrary to the original bishop book, we normalize the covariances + self.covariances_ /= self.degrees_of_freedom_[:, np.newaxis, np.newaxis] + + def _estimate_wishart_tied(self, nk, xk, sk): + """Estimate the tied Wishart distribution parameters. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + + nk : array-like of shape (n_components,) + + xk : array-like of shape (n_components, n_features) + + sk : array-like of shape (n_features, n_features) + """ + _, n_features = xk.shape + + # Warning : in some Bishop book, there is a typo on the formula 10.63 + # `degrees_of_freedom_k = degrees_of_freedom_0 + Nk` + # is the correct formula + self.degrees_of_freedom_ = ( + self.degrees_of_freedom_prior_ + nk.sum() / self.n_components + ) + + diff = xk - self.mean_prior_ + self.covariances_ = ( + self.covariance_prior_ + + sk * nk.sum() / self.n_components + + self.mean_precision_prior_ + / self.n_components + * np.dot((nk / self.mean_precision_) * diff.T, diff) + ) + + # Contrary to the original bishop book, we normalize the covariances + self.covariances_ /= self.degrees_of_freedom_ + + def _estimate_wishart_diag(self, nk, xk, sk): + """Estimate the diag Wishart distribution parameters. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + + nk : array-like of shape (n_components,) + + xk : array-like of shape (n_components, n_features) + + sk : array-like of shape (n_components, n_features) + """ + _, n_features = xk.shape + + # Warning : in some Bishop book, there is a typo on the formula 10.63 + # `degrees_of_freedom_k = degrees_of_freedom_0 + Nk` + # is the correct formula + self.degrees_of_freedom_ = self.degrees_of_freedom_prior_ + nk + + diff = xk - self.mean_prior_ + self.covariances_ = self.covariance_prior_ + nk[:, np.newaxis] * ( + sk + + (self.mean_precision_prior_ / self.mean_precision_)[:, np.newaxis] + * np.square(diff) + ) + + # Contrary to the original bishop book, we normalize the covariances + self.covariances_ /= self.degrees_of_freedom_[:, np.newaxis] + + def _estimate_wishart_spherical(self, nk, xk, sk): + """Estimate the spherical Wishart distribution parameters. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + + nk : array-like of shape (n_components,) + + xk : array-like of shape (n_components, n_features) + + sk : array-like of shape (n_components,) + """ + _, n_features = xk.shape + + # Warning : in some Bishop book, there is a typo on the formula 10.63 + # `degrees_of_freedom_k = degrees_of_freedom_0 + Nk` + # is the correct formula + self.degrees_of_freedom_ = self.degrees_of_freedom_prior_ + nk + + diff = xk - self.mean_prior_ + self.covariances_ = self.covariance_prior_ + nk * ( + sk + + self.mean_precision_prior_ + / self.mean_precision_ + * np.mean(np.square(diff), 1) + ) + + # Contrary to the original bishop book, we normalize the covariances + self.covariances_ /= self.degrees_of_freedom_ + + def _m_step(self, X, log_resp): + """M step. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + + log_resp : array-like of shape (n_samples, n_components) + Logarithm of the posterior probabilities (or responsibilities) of + the point of each sample in X. + """ + n_samples, _ = X.shape + + nk, xk, sk = _estimate_gaussian_parameters( + X, np.exp(log_resp), self.reg_covar, self.covariance_type + ) + self._estimate_weights(nk) + self._estimate_means(nk, xk) + self._estimate_precisions(nk, xk, sk) + + def _estimate_log_weights(self): + if self.weight_concentration_prior_type == "dirichlet_process": + digamma_sum = digamma( + self.weight_concentration_[0] + self.weight_concentration_[1] + ) + digamma_a = digamma(self.weight_concentration_[0]) + digamma_b = digamma(self.weight_concentration_[1]) + return ( + digamma_a + - digamma_sum + + np.hstack((0, np.cumsum(digamma_b - digamma_sum)[:-1])) + ) + else: + # case Variational Gaussian mixture with dirichlet distribution + return digamma(self.weight_concentration_) - digamma( + np.sum(self.weight_concentration_) + ) + + def _estimate_log_prob(self, X): + _, n_features = X.shape + # We remove `n_features * np.log(self.degrees_of_freedom_)` because + # the precision matrix is normalized + log_gauss = _estimate_log_gaussian_prob( + X, self.means_, self.precisions_cholesky_, self.covariance_type + ) - 0.5 * n_features * np.log(self.degrees_of_freedom_) + + log_lambda = n_features * np.log(2.0) + np.sum( + digamma( + 0.5 + * (self.degrees_of_freedom_ - np.arange(0, n_features)[:, np.newaxis]) + ), + 0, + ) + + return log_gauss + 0.5 * (log_lambda - n_features / self.mean_precision_) + + def _compute_lower_bound(self, log_resp, log_prob_norm): + """Estimate the lower bound of the model. + + The lower bound on the likelihood (of the training data with respect to + the model) is used to detect the convergence and has to increase at + each iteration. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + + log_resp : array, shape (n_samples, n_components) + Logarithm of the posterior probabilities (or responsibilities) of + the point of each sample in X. + + log_prob_norm : float + Logarithm of the probability of each sample in X. + + Returns + ------- + lower_bound : float + """ + # Contrary to the original formula, we have done some simplification + # and removed all the constant terms. + (n_features,) = self.mean_prior_.shape + + # We removed `.5 * n_features * np.log(self.degrees_of_freedom_)` + # because the precision matrix is normalized. + log_det_precisions_chol = _compute_log_det_cholesky( + self.precisions_cholesky_, self.covariance_type, n_features + ) - 0.5 * n_features * np.log(self.degrees_of_freedom_) + + if self.covariance_type == "tied": + log_wishart = self.n_components * np.float64( + _log_wishart_norm( + self.degrees_of_freedom_, log_det_precisions_chol, n_features + ) + ) + else: + log_wishart = np.sum( + _log_wishart_norm( + self.degrees_of_freedom_, log_det_precisions_chol, n_features + ) + ) + + if self.weight_concentration_prior_type == "dirichlet_process": + log_norm_weight = -np.sum( + betaln(self.weight_concentration_[0], self.weight_concentration_[1]) + ) + else: + log_norm_weight = _log_dirichlet_norm(self.weight_concentration_) + + return ( + -np.sum(np.exp(log_resp) * log_resp) + - log_wishart + - log_norm_weight + - 0.5 * n_features * np.sum(np.log(self.mean_precision_)) + ) + + def _get_parameters(self): + return ( + self.weight_concentration_, + self.mean_precision_, + self.means_, + self.degrees_of_freedom_, + self.covariances_, + self.precisions_cholesky_, + ) + + def _set_parameters(self, params): + ( + self.weight_concentration_, + self.mean_precision_, + self.means_, + self.degrees_of_freedom_, + self.covariances_, + self.precisions_cholesky_, + ) = params + + # Weights computation + if self.weight_concentration_prior_type == "dirichlet_process": + weight_dirichlet_sum = ( + self.weight_concentration_[0] + self.weight_concentration_[1] + ) + tmp = self.weight_concentration_[1] / weight_dirichlet_sum + self.weights_ = ( + self.weight_concentration_[0] + / weight_dirichlet_sum + * np.hstack((1, np.cumprod(tmp[:-1]))) + ) + self.weights_ /= np.sum(self.weights_) + else: + self.weights_ = self.weight_concentration_ / np.sum( + self.weight_concentration_ + ) + + # Precisions matrices computation + if self.covariance_type == "full": + self.precisions_ = np.array( + [ + np.dot(prec_chol, prec_chol.T) + for prec_chol in self.precisions_cholesky_ + ] + ) + + elif self.covariance_type == "tied": + self.precisions_ = np.dot( + self.precisions_cholesky_, self.precisions_cholesky_.T + ) + else: + self.precisions_ = self.precisions_cholesky_**2 diff --git a/venv/lib/python3.10/site-packages/sklearn/mixture/_gaussian_mixture.py b/venv/lib/python3.10/site-packages/sklearn/mixture/_gaussian_mixture.py new file mode 100644 index 0000000000000000000000000000000000000000..09e3674a6779fce1a6270c44af09bc014fcc29b7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/mixture/_gaussian_mixture.py @@ -0,0 +1,912 @@ +"""Gaussian Mixture Model.""" + +# Author: Wei Xue +# Modified by Thierry Guillemot +# License: BSD 3 clause + +import numpy as np +from scipy import linalg + +from ..utils import check_array +from ..utils._param_validation import StrOptions +from ..utils.extmath import row_norms +from ._base import BaseMixture, _check_shape + +############################################################################### +# Gaussian mixture shape checkers used by the GaussianMixture class + + +def _check_weights(weights, n_components): + """Check the user provided 'weights'. + + Parameters + ---------- + weights : array-like of shape (n_components,) + The proportions of components of each mixture. + + n_components : int + Number of components. + + Returns + ------- + weights : array, shape (n_components,) + """ + weights = check_array(weights, dtype=[np.float64, np.float32], ensure_2d=False) + _check_shape(weights, (n_components,), "weights") + + # check range + if any(np.less(weights, 0.0)) or any(np.greater(weights, 1.0)): + raise ValueError( + "The parameter 'weights' should be in the range " + "[0, 1], but got max value %.5f, min value %.5f" + % (np.min(weights), np.max(weights)) + ) + + # check normalization + if not np.allclose(np.abs(1.0 - np.sum(weights)), 0.0): + raise ValueError( + "The parameter 'weights' should be normalized, but got sum(weights) = %.5f" + % np.sum(weights) + ) + return weights + + +def _check_means(means, n_components, n_features): + """Validate the provided 'means'. + + Parameters + ---------- + means : array-like of shape (n_components, n_features) + The centers of the current components. + + n_components : int + Number of components. + + n_features : int + Number of features. + + Returns + ------- + means : array, (n_components, n_features) + """ + means = check_array(means, dtype=[np.float64, np.float32], ensure_2d=False) + _check_shape(means, (n_components, n_features), "means") + return means + + +def _check_precision_positivity(precision, covariance_type): + """Check a precision vector is positive-definite.""" + if np.any(np.less_equal(precision, 0.0)): + raise ValueError("'%s precision' should be positive" % covariance_type) + + +def _check_precision_matrix(precision, covariance_type): + """Check a precision matrix is symmetric and positive-definite.""" + if not ( + np.allclose(precision, precision.T) and np.all(linalg.eigvalsh(precision) > 0.0) + ): + raise ValueError( + "'%s precision' should be symmetric, positive-definite" % covariance_type + ) + + +def _check_precisions_full(precisions, covariance_type): + """Check the precision matrices are symmetric and positive-definite.""" + for prec in precisions: + _check_precision_matrix(prec, covariance_type) + + +def _check_precisions(precisions, covariance_type, n_components, n_features): + """Validate user provided precisions. + + Parameters + ---------- + precisions : array-like + 'full' : shape of (n_components, n_features, n_features) + 'tied' : shape of (n_features, n_features) + 'diag' : shape of (n_components, n_features) + 'spherical' : shape of (n_components,) + + covariance_type : str + + n_components : int + Number of components. + + n_features : int + Number of features. + + Returns + ------- + precisions : array + """ + precisions = check_array( + precisions, + dtype=[np.float64, np.float32], + ensure_2d=False, + allow_nd=covariance_type == "full", + ) + + precisions_shape = { + "full": (n_components, n_features, n_features), + "tied": (n_features, n_features), + "diag": (n_components, n_features), + "spherical": (n_components,), + } + _check_shape( + precisions, precisions_shape[covariance_type], "%s precision" % covariance_type + ) + + _check_precisions = { + "full": _check_precisions_full, + "tied": _check_precision_matrix, + "diag": _check_precision_positivity, + "spherical": _check_precision_positivity, + } + _check_precisions[covariance_type](precisions, covariance_type) + return precisions + + +############################################################################### +# Gaussian mixture parameters estimators (used by the M-Step) + + +def _estimate_gaussian_covariances_full(resp, X, nk, means, reg_covar): + """Estimate the full covariance matrices. + + Parameters + ---------- + resp : array-like of shape (n_samples, n_components) + + X : array-like of shape (n_samples, n_features) + + nk : array-like of shape (n_components,) + + means : array-like of shape (n_components, n_features) + + reg_covar : float + + Returns + ------- + covariances : array, shape (n_components, n_features, n_features) + The covariance matrix of the current components. + """ + n_components, n_features = means.shape + covariances = np.empty((n_components, n_features, n_features)) + for k in range(n_components): + diff = X - means[k] + covariances[k] = np.dot(resp[:, k] * diff.T, diff) / nk[k] + covariances[k].flat[:: n_features + 1] += reg_covar + return covariances + + +def _estimate_gaussian_covariances_tied(resp, X, nk, means, reg_covar): + """Estimate the tied covariance matrix. + + Parameters + ---------- + resp : array-like of shape (n_samples, n_components) + + X : array-like of shape (n_samples, n_features) + + nk : array-like of shape (n_components,) + + means : array-like of shape (n_components, n_features) + + reg_covar : float + + Returns + ------- + covariance : array, shape (n_features, n_features) + The tied covariance matrix of the components. + """ + avg_X2 = np.dot(X.T, X) + avg_means2 = np.dot(nk * means.T, means) + covariance = avg_X2 - avg_means2 + covariance /= nk.sum() + covariance.flat[:: len(covariance) + 1] += reg_covar + return covariance + + +def _estimate_gaussian_covariances_diag(resp, X, nk, means, reg_covar): + """Estimate the diagonal covariance vectors. + + Parameters + ---------- + responsibilities : array-like of shape (n_samples, n_components) + + X : array-like of shape (n_samples, n_features) + + nk : array-like of shape (n_components,) + + means : array-like of shape (n_components, n_features) + + reg_covar : float + + Returns + ------- + covariances : array, shape (n_components, n_features) + The covariance vector of the current components. + """ + avg_X2 = np.dot(resp.T, X * X) / nk[:, np.newaxis] + avg_means2 = means**2 + avg_X_means = means * np.dot(resp.T, X) / nk[:, np.newaxis] + return avg_X2 - 2 * avg_X_means + avg_means2 + reg_covar + + +def _estimate_gaussian_covariances_spherical(resp, X, nk, means, reg_covar): + """Estimate the spherical variance values. + + Parameters + ---------- + responsibilities : array-like of shape (n_samples, n_components) + + X : array-like of shape (n_samples, n_features) + + nk : array-like of shape (n_components,) + + means : array-like of shape (n_components, n_features) + + reg_covar : float + + Returns + ------- + variances : array, shape (n_components,) + The variance values of each components. + """ + return _estimate_gaussian_covariances_diag(resp, X, nk, means, reg_covar).mean(1) + + +def _estimate_gaussian_parameters(X, resp, reg_covar, covariance_type): + """Estimate the Gaussian distribution parameters. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The input data array. + + resp : array-like of shape (n_samples, n_components) + The responsibilities for each data sample in X. + + reg_covar : float + The regularization added to the diagonal of the covariance matrices. + + covariance_type : {'full', 'tied', 'diag', 'spherical'} + The type of precision matrices. + + Returns + ------- + nk : array-like of shape (n_components,) + The numbers of data samples in the current components. + + means : array-like of shape (n_components, n_features) + The centers of the current components. + + covariances : array-like + The covariance matrix of the current components. + The shape depends of the covariance_type. + """ + nk = resp.sum(axis=0) + 10 * np.finfo(resp.dtype).eps + means = np.dot(resp.T, X) / nk[:, np.newaxis] + covariances = { + "full": _estimate_gaussian_covariances_full, + "tied": _estimate_gaussian_covariances_tied, + "diag": _estimate_gaussian_covariances_diag, + "spherical": _estimate_gaussian_covariances_spherical, + }[covariance_type](resp, X, nk, means, reg_covar) + return nk, means, covariances + + +def _compute_precision_cholesky(covariances, covariance_type): + """Compute the Cholesky decomposition of the precisions. + + Parameters + ---------- + covariances : array-like + The covariance matrix of the current components. + The shape depends of the covariance_type. + + covariance_type : {'full', 'tied', 'diag', 'spherical'} + The type of precision matrices. + + Returns + ------- + precisions_cholesky : array-like + The cholesky decomposition of sample precisions of the current + components. The shape depends of the covariance_type. + """ + estimate_precision_error_message = ( + "Fitting the mixture model failed because some components have " + "ill-defined empirical covariance (for instance caused by singleton " + "or collapsed samples). Try to decrease the number of components, " + "or increase reg_covar." + ) + + if covariance_type == "full": + n_components, n_features, _ = covariances.shape + precisions_chol = np.empty((n_components, n_features, n_features)) + for k, covariance in enumerate(covariances): + try: + cov_chol = linalg.cholesky(covariance, lower=True) + except linalg.LinAlgError: + raise ValueError(estimate_precision_error_message) + precisions_chol[k] = linalg.solve_triangular( + cov_chol, np.eye(n_features), lower=True + ).T + elif covariance_type == "tied": + _, n_features = covariances.shape + try: + cov_chol = linalg.cholesky(covariances, lower=True) + except linalg.LinAlgError: + raise ValueError(estimate_precision_error_message) + precisions_chol = linalg.solve_triangular( + cov_chol, np.eye(n_features), lower=True + ).T + else: + if np.any(np.less_equal(covariances, 0.0)): + raise ValueError(estimate_precision_error_message) + precisions_chol = 1.0 / np.sqrt(covariances) + return precisions_chol + + +def _flipudlr(array): + """Reverse the rows and columns of an array.""" + return np.flipud(np.fliplr(array)) + + +def _compute_precision_cholesky_from_precisions(precisions, covariance_type): + r"""Compute the Cholesky decomposition of precisions using precisions themselves. + + As implemented in :func:`_compute_precision_cholesky`, the `precisions_cholesky_` is + an upper-triangular matrix for each Gaussian component, which can be expressed as + the $UU^T$ factorization of the precision matrix for each Gaussian component, where + $U$ is an upper-triangular matrix. + + In order to use the Cholesky decomposition to get $UU^T$, the precision matrix + $\Lambda$ needs to be permutated such that its rows and columns are reversed, which + can be done by applying a similarity transformation with an exchange matrix $J$, + where the 1 elements reside on the anti-diagonal and all other elements are 0. In + particular, the Cholesky decomposition of the transformed precision matrix is + $J\Lambda J=LL^T$, where $L$ is a lower-triangular matrix. Because $\Lambda=UU^T$ + and $J=J^{-1}=J^T$, the `precisions_cholesky_` for each Gaussian component can be + expressed as $JLJ$. + + Refer to #26415 for details. + + Parameters + ---------- + precisions : array-like + The precision matrix of the current components. + The shape depends on the covariance_type. + + covariance_type : {'full', 'tied', 'diag', 'spherical'} + The type of precision matrices. + + Returns + ------- + precisions_cholesky : array-like + The cholesky decomposition of sample precisions of the current + components. The shape depends on the covariance_type. + """ + if covariance_type == "full": + precisions_cholesky = np.array( + [ + _flipudlr(linalg.cholesky(_flipudlr(precision), lower=True)) + for precision in precisions + ] + ) + elif covariance_type == "tied": + precisions_cholesky = _flipudlr( + linalg.cholesky(_flipudlr(precisions), lower=True) + ) + else: + precisions_cholesky = np.sqrt(precisions) + return precisions_cholesky + + +############################################################################### +# Gaussian mixture probability estimators +def _compute_log_det_cholesky(matrix_chol, covariance_type, n_features): + """Compute the log-det of the cholesky decomposition of matrices. + + Parameters + ---------- + matrix_chol : array-like + Cholesky decompositions of the matrices. + 'full' : shape of (n_components, n_features, n_features) + 'tied' : shape of (n_features, n_features) + 'diag' : shape of (n_components, n_features) + 'spherical' : shape of (n_components,) + + covariance_type : {'full', 'tied', 'diag', 'spherical'} + + n_features : int + Number of features. + + Returns + ------- + log_det_precision_chol : array-like of shape (n_components,) + The determinant of the precision matrix for each component. + """ + if covariance_type == "full": + n_components, _, _ = matrix_chol.shape + log_det_chol = np.sum( + np.log(matrix_chol.reshape(n_components, -1)[:, :: n_features + 1]), 1 + ) + + elif covariance_type == "tied": + log_det_chol = np.sum(np.log(np.diag(matrix_chol))) + + elif covariance_type == "diag": + log_det_chol = np.sum(np.log(matrix_chol), axis=1) + + else: + log_det_chol = n_features * (np.log(matrix_chol)) + + return log_det_chol + + +def _estimate_log_gaussian_prob(X, means, precisions_chol, covariance_type): + """Estimate the log Gaussian probability. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + + means : array-like of shape (n_components, n_features) + + precisions_chol : array-like + Cholesky decompositions of the precision matrices. + 'full' : shape of (n_components, n_features, n_features) + 'tied' : shape of (n_features, n_features) + 'diag' : shape of (n_components, n_features) + 'spherical' : shape of (n_components,) + + covariance_type : {'full', 'tied', 'diag', 'spherical'} + + Returns + ------- + log_prob : array, shape (n_samples, n_components) + """ + n_samples, n_features = X.shape + n_components, _ = means.shape + # The determinant of the precision matrix from the Cholesky decomposition + # corresponds to the negative half of the determinant of the full precision + # matrix. + # In short: det(precision_chol) = - det(precision) / 2 + log_det = _compute_log_det_cholesky(precisions_chol, covariance_type, n_features) + + if covariance_type == "full": + log_prob = np.empty((n_samples, n_components)) + for k, (mu, prec_chol) in enumerate(zip(means, precisions_chol)): + y = np.dot(X, prec_chol) - np.dot(mu, prec_chol) + log_prob[:, k] = np.sum(np.square(y), axis=1) + + elif covariance_type == "tied": + log_prob = np.empty((n_samples, n_components)) + for k, mu in enumerate(means): + y = np.dot(X, precisions_chol) - np.dot(mu, precisions_chol) + log_prob[:, k] = np.sum(np.square(y), axis=1) + + elif covariance_type == "diag": + precisions = precisions_chol**2 + log_prob = ( + np.sum((means**2 * precisions), 1) + - 2.0 * np.dot(X, (means * precisions).T) + + np.dot(X**2, precisions.T) + ) + + elif covariance_type == "spherical": + precisions = precisions_chol**2 + log_prob = ( + np.sum(means**2, 1) * precisions + - 2 * np.dot(X, means.T * precisions) + + np.outer(row_norms(X, squared=True), precisions) + ) + # Since we are using the precision of the Cholesky decomposition, + # `- 0.5 * log_det_precision` becomes `+ log_det_precision_chol` + return -0.5 * (n_features * np.log(2 * np.pi) + log_prob) + log_det + + +class GaussianMixture(BaseMixture): + """Gaussian Mixture. + + Representation of a Gaussian mixture model probability distribution. + This class allows to estimate the parameters of a Gaussian mixture + distribution. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.18 + + Parameters + ---------- + n_components : int, default=1 + The number of mixture components. + + covariance_type : {'full', 'tied', 'diag', 'spherical'}, default='full' + String describing the type of covariance parameters to use. + Must be one of: + + - 'full': each component has its own general covariance matrix. + - 'tied': all components share the same general covariance matrix. + - 'diag': each component has its own diagonal covariance matrix. + - 'spherical': each component has its own single variance. + + tol : float, default=1e-3 + The convergence threshold. EM iterations will stop when the + lower bound average gain is below this threshold. + + reg_covar : float, default=1e-6 + Non-negative regularization added to the diagonal of covariance. + Allows to assure that the covariance matrices are all positive. + + max_iter : int, default=100 + The number of EM iterations to perform. + + n_init : int, default=1 + The number of initializations to perform. The best results are kept. + + init_params : {'kmeans', 'k-means++', 'random', 'random_from_data'}, \ + default='kmeans' + The method used to initialize the weights, the means and the + precisions. + String must be one of: + + - 'kmeans' : responsibilities are initialized using kmeans. + - 'k-means++' : use the k-means++ method to initialize. + - 'random' : responsibilities are initialized randomly. + - 'random_from_data' : initial means are randomly selected data points. + + .. versionchanged:: v1.1 + `init_params` now accepts 'random_from_data' and 'k-means++' as + initialization methods. + + weights_init : array-like of shape (n_components, ), default=None + The user-provided initial weights. + If it is None, weights are initialized using the `init_params` method. + + means_init : array-like of shape (n_components, n_features), default=None + The user-provided initial means, + If it is None, means are initialized using the `init_params` method. + + precisions_init : array-like, default=None + The user-provided initial precisions (inverse of the covariance + matrices). + If it is None, precisions are initialized using the 'init_params' + method. + The shape depends on 'covariance_type':: + + (n_components,) if 'spherical', + (n_features, n_features) if 'tied', + (n_components, n_features) if 'diag', + (n_components, n_features, n_features) if 'full' + + random_state : int, RandomState instance or None, default=None + Controls the random seed given to the method chosen to initialize the + parameters (see `init_params`). + In addition, it controls the generation of random samples from the + fitted distribution (see the method `sample`). + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + warm_start : bool, default=False + If 'warm_start' is True, the solution of the last fitting is used as + initialization for the next call of fit(). This can speed up + convergence when fit is called several times on similar problems. + In that case, 'n_init' is ignored and only a single initialization + occurs upon the first call. + See :term:`the Glossary `. + + verbose : int, default=0 + Enable verbose output. If 1 then it prints the current + initialization and each iteration step. If greater than 1 then + it prints also the log probability and the time needed + for each step. + + verbose_interval : int, default=10 + Number of iteration done before the next print. + + Attributes + ---------- + weights_ : array-like of shape (n_components,) + The weights of each mixture components. + + means_ : array-like of shape (n_components, n_features) + The mean of each mixture component. + + covariances_ : array-like + The covariance of each mixture component. + The shape depends on `covariance_type`:: + + (n_components,) if 'spherical', + (n_features, n_features) if 'tied', + (n_components, n_features) if 'diag', + (n_components, n_features, n_features) if 'full' + + precisions_ : array-like + The precision matrices for each component in the mixture. A precision + matrix is the inverse of a covariance matrix. A covariance matrix is + symmetric positive definite so the mixture of Gaussian can be + equivalently parameterized by the precision matrices. Storing the + precision matrices instead of the covariance matrices makes it more + efficient to compute the log-likelihood of new samples at test time. + The shape depends on `covariance_type`:: + + (n_components,) if 'spherical', + (n_features, n_features) if 'tied', + (n_components, n_features) if 'diag', + (n_components, n_features, n_features) if 'full' + + precisions_cholesky_ : array-like + The cholesky decomposition of the precision matrices of each mixture + component. A precision matrix is the inverse of a covariance matrix. + A covariance matrix is symmetric positive definite so the mixture of + Gaussian can be equivalently parameterized by the precision matrices. + Storing the precision matrices instead of the covariance matrices makes + it more efficient to compute the log-likelihood of new samples at test + time. The shape depends on `covariance_type`:: + + (n_components,) if 'spherical', + (n_features, n_features) if 'tied', + (n_components, n_features) if 'diag', + (n_components, n_features, n_features) if 'full' + + converged_ : bool + True when convergence was reached in fit(), False otherwise. + + n_iter_ : int + Number of step used by the best fit of EM to reach the convergence. + + lower_bound_ : float + Lower bound value on the log-likelihood (of the training data with + respect to the model) of the best fit of EM. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + BayesianGaussianMixture : Gaussian mixture model fit with a variational + inference. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.mixture import GaussianMixture + >>> X = np.array([[1, 2], [1, 4], [1, 0], [10, 2], [10, 4], [10, 0]]) + >>> gm = GaussianMixture(n_components=2, random_state=0).fit(X) + >>> gm.means_ + array([[10., 2.], + [ 1., 2.]]) + >>> gm.predict([[0, 0], [12, 3]]) + array([1, 0]) + """ + + _parameter_constraints: dict = { + **BaseMixture._parameter_constraints, + "covariance_type": [StrOptions({"full", "tied", "diag", "spherical"})], + "weights_init": ["array-like", None], + "means_init": ["array-like", None], + "precisions_init": ["array-like", None], + } + + def __init__( + self, + n_components=1, + *, + covariance_type="full", + tol=1e-3, + reg_covar=1e-6, + max_iter=100, + n_init=1, + init_params="kmeans", + weights_init=None, + means_init=None, + precisions_init=None, + random_state=None, + warm_start=False, + verbose=0, + verbose_interval=10, + ): + super().__init__( + n_components=n_components, + tol=tol, + reg_covar=reg_covar, + max_iter=max_iter, + n_init=n_init, + init_params=init_params, + random_state=random_state, + warm_start=warm_start, + verbose=verbose, + verbose_interval=verbose_interval, + ) + + self.covariance_type = covariance_type + self.weights_init = weights_init + self.means_init = means_init + self.precisions_init = precisions_init + + def _check_parameters(self, X): + """Check the Gaussian mixture parameters are well defined.""" + _, n_features = X.shape + + if self.weights_init is not None: + self.weights_init = _check_weights(self.weights_init, self.n_components) + + if self.means_init is not None: + self.means_init = _check_means( + self.means_init, self.n_components, n_features + ) + + if self.precisions_init is not None: + self.precisions_init = _check_precisions( + self.precisions_init, + self.covariance_type, + self.n_components, + n_features, + ) + + def _initialize_parameters(self, X, random_state): + # If all the initial parameters are all provided, then there is no need to run + # the initialization. + compute_resp = ( + self.weights_init is None + or self.means_init is None + or self.precisions_init is None + ) + if compute_resp: + super()._initialize_parameters(X, random_state) + else: + self._initialize(X, None) + + def _initialize(self, X, resp): + """Initialization of the Gaussian mixture parameters. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + + resp : array-like of shape (n_samples, n_components) + """ + n_samples, _ = X.shape + weights, means, covariances = None, None, None + if resp is not None: + weights, means, covariances = _estimate_gaussian_parameters( + X, resp, self.reg_covar, self.covariance_type + ) + if self.weights_init is None: + weights /= n_samples + + self.weights_ = weights if self.weights_init is None else self.weights_init + self.means_ = means if self.means_init is None else self.means_init + + if self.precisions_init is None: + self.covariances_ = covariances + self.precisions_cholesky_ = _compute_precision_cholesky( + covariances, self.covariance_type + ) + else: + self.precisions_cholesky_ = _compute_precision_cholesky_from_precisions( + self.precisions_init, self.covariance_type + ) + + def _m_step(self, X, log_resp): + """M step. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + + log_resp : array-like of shape (n_samples, n_components) + Logarithm of the posterior probabilities (or responsibilities) of + the point of each sample in X. + """ + self.weights_, self.means_, self.covariances_ = _estimate_gaussian_parameters( + X, np.exp(log_resp), self.reg_covar, self.covariance_type + ) + self.weights_ /= self.weights_.sum() + self.precisions_cholesky_ = _compute_precision_cholesky( + self.covariances_, self.covariance_type + ) + + def _estimate_log_prob(self, X): + return _estimate_log_gaussian_prob( + X, self.means_, self.precisions_cholesky_, self.covariance_type + ) + + def _estimate_log_weights(self): + return np.log(self.weights_) + + def _compute_lower_bound(self, _, log_prob_norm): + return log_prob_norm + + def _get_parameters(self): + return ( + self.weights_, + self.means_, + self.covariances_, + self.precisions_cholesky_, + ) + + def _set_parameters(self, params): + ( + self.weights_, + self.means_, + self.covariances_, + self.precisions_cholesky_, + ) = params + + # Attributes computation + _, n_features = self.means_.shape + + if self.covariance_type == "full": + self.precisions_ = np.empty(self.precisions_cholesky_.shape) + for k, prec_chol in enumerate(self.precisions_cholesky_): + self.precisions_[k] = np.dot(prec_chol, prec_chol.T) + + elif self.covariance_type == "tied": + self.precisions_ = np.dot( + self.precisions_cholesky_, self.precisions_cholesky_.T + ) + else: + self.precisions_ = self.precisions_cholesky_**2 + + def _n_parameters(self): + """Return the number of free parameters in the model.""" + _, n_features = self.means_.shape + if self.covariance_type == "full": + cov_params = self.n_components * n_features * (n_features + 1) / 2.0 + elif self.covariance_type == "diag": + cov_params = self.n_components * n_features + elif self.covariance_type == "tied": + cov_params = n_features * (n_features + 1) / 2.0 + elif self.covariance_type == "spherical": + cov_params = self.n_components + mean_params = n_features * self.n_components + return int(cov_params + mean_params + self.n_components - 1) + + def bic(self, X): + """Bayesian information criterion for the current model on the input X. + + You can refer to this :ref:`mathematical section ` for more + details regarding the formulation of the BIC used. + + Parameters + ---------- + X : array of shape (n_samples, n_dimensions) + The input samples. + + Returns + ------- + bic : float + The lower the better. + """ + return -2 * self.score(X) * X.shape[0] + self._n_parameters() * np.log( + X.shape[0] + ) + + def aic(self, X): + """Akaike information criterion for the current model on the input X. + + You can refer to this :ref:`mathematical section ` for more + details regarding the formulation of the AIC used. + + Parameters + ---------- + X : array of shape (n_samples, n_dimensions) + The input samples. + + Returns + ------- + aic : float + The lower the better. + """ + return -2 * self.score(X) * X.shape[0] + 2 * self._n_parameters() diff --git a/venv/lib/python3.10/site-packages/sklearn/mixture/tests/__init__.py b/venv/lib/python3.10/site-packages/sklearn/mixture/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/sklearn/mixture/tests/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/mixture/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..64193fe8f66e426e155060d50a8be8169f96bcb6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/mixture/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/mixture/tests/__pycache__/test_bayesian_mixture.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/mixture/tests/__pycache__/test_bayesian_mixture.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b8430658a4ed86478b42f09e6e075b4ca40180f8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/mixture/tests/__pycache__/test_bayesian_mixture.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/mixture/tests/__pycache__/test_gaussian_mixture.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/mixture/tests/__pycache__/test_gaussian_mixture.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..173ece3b81b802f50991368bf0cadc32b4685a11 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/mixture/tests/__pycache__/test_gaussian_mixture.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/mixture/tests/__pycache__/test_mixture.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/mixture/tests/__pycache__/test_mixture.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4a16a4b3d18e7b711e74903582951b8da2aeccdf Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/mixture/tests/__pycache__/test_mixture.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/mixture/tests/test_bayesian_mixture.py b/venv/lib/python3.10/site-packages/sklearn/mixture/tests/test_bayesian_mixture.py new file mode 100644 index 0000000000000000000000000000000000000000..9c6eb4a86ea0d4e5988706e6e841fe5f5b992871 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/mixture/tests/test_bayesian_mixture.py @@ -0,0 +1,466 @@ +# Author: Wei Xue +# Thierry Guillemot +# License: BSD 3 clause +import copy + +import numpy as np +import pytest +from scipy.special import gammaln + +from sklearn.exceptions import ConvergenceWarning, NotFittedError +from sklearn.metrics.cluster import adjusted_rand_score +from sklearn.mixture import BayesianGaussianMixture +from sklearn.mixture._bayesian_mixture import _log_dirichlet_norm, _log_wishart_norm +from sklearn.mixture.tests.test_gaussian_mixture import RandomData +from sklearn.utils._testing import ( + assert_almost_equal, + assert_array_equal, + ignore_warnings, +) + +COVARIANCE_TYPE = ["full", "tied", "diag", "spherical"] +PRIOR_TYPE = ["dirichlet_process", "dirichlet_distribution"] + + +def test_log_dirichlet_norm(): + rng = np.random.RandomState(0) + + weight_concentration = rng.rand(2) + expected_norm = gammaln(np.sum(weight_concentration)) - np.sum( + gammaln(weight_concentration) + ) + predected_norm = _log_dirichlet_norm(weight_concentration) + + assert_almost_equal(expected_norm, predected_norm) + + +def test_log_wishart_norm(): + rng = np.random.RandomState(0) + + n_components, n_features = 5, 2 + degrees_of_freedom = np.abs(rng.rand(n_components)) + 1.0 + log_det_precisions_chol = n_features * np.log(range(2, 2 + n_components)) + + expected_norm = np.empty(5) + for k, (degrees_of_freedom_k, log_det_k) in enumerate( + zip(degrees_of_freedom, log_det_precisions_chol) + ): + expected_norm[k] = -( + degrees_of_freedom_k * (log_det_k + 0.5 * n_features * np.log(2.0)) + + np.sum( + gammaln( + 0.5 + * (degrees_of_freedom_k - np.arange(0, n_features)[:, np.newaxis]) + ), + 0, + ) + ).item() + predected_norm = _log_wishart_norm( + degrees_of_freedom, log_det_precisions_chol, n_features + ) + + assert_almost_equal(expected_norm, predected_norm) + + +def test_bayesian_mixture_weights_prior_initialisation(): + rng = np.random.RandomState(0) + n_samples, n_components, n_features = 10, 5, 2 + X = rng.rand(n_samples, n_features) + + # Check correct init for a given value of weight_concentration_prior + weight_concentration_prior = rng.rand() + bgmm = BayesianGaussianMixture( + weight_concentration_prior=weight_concentration_prior, random_state=rng + ).fit(X) + assert_almost_equal(weight_concentration_prior, bgmm.weight_concentration_prior_) + + # Check correct init for the default value of weight_concentration_prior + bgmm = BayesianGaussianMixture(n_components=n_components, random_state=rng).fit(X) + assert_almost_equal(1.0 / n_components, bgmm.weight_concentration_prior_) + + +def test_bayesian_mixture_mean_prior_initialisation(): + rng = np.random.RandomState(0) + n_samples, n_components, n_features = 10, 3, 2 + X = rng.rand(n_samples, n_features) + + # Check correct init for a given value of mean_precision_prior + mean_precision_prior = rng.rand() + bgmm = BayesianGaussianMixture( + mean_precision_prior=mean_precision_prior, random_state=rng + ).fit(X) + assert_almost_equal(mean_precision_prior, bgmm.mean_precision_prior_) + + # Check correct init for the default value of mean_precision_prior + bgmm = BayesianGaussianMixture(random_state=rng).fit(X) + assert_almost_equal(1.0, bgmm.mean_precision_prior_) + + # Check correct init for a given value of mean_prior + mean_prior = rng.rand(n_features) + bgmm = BayesianGaussianMixture( + n_components=n_components, mean_prior=mean_prior, random_state=rng + ).fit(X) + assert_almost_equal(mean_prior, bgmm.mean_prior_) + + # Check correct init for the default value of bemean_priorta + bgmm = BayesianGaussianMixture(n_components=n_components, random_state=rng).fit(X) + assert_almost_equal(X.mean(axis=0), bgmm.mean_prior_) + + +def test_bayesian_mixture_precisions_prior_initialisation(): + rng = np.random.RandomState(0) + n_samples, n_features = 10, 2 + X = rng.rand(n_samples, n_features) + + # Check raise message for a bad value of degrees_of_freedom_prior + bad_degrees_of_freedom_prior_ = n_features - 1.0 + bgmm = BayesianGaussianMixture( + degrees_of_freedom_prior=bad_degrees_of_freedom_prior_, random_state=rng + ) + msg = ( + "The parameter 'degrees_of_freedom_prior' should be greater than" + f" {n_features -1}, but got {bad_degrees_of_freedom_prior_:.3f}." + ) + with pytest.raises(ValueError, match=msg): + bgmm.fit(X) + + # Check correct init for a given value of degrees_of_freedom_prior + degrees_of_freedom_prior = rng.rand() + n_features - 1.0 + bgmm = BayesianGaussianMixture( + degrees_of_freedom_prior=degrees_of_freedom_prior, random_state=rng + ).fit(X) + assert_almost_equal(degrees_of_freedom_prior, bgmm.degrees_of_freedom_prior_) + + # Check correct init for the default value of degrees_of_freedom_prior + degrees_of_freedom_prior_default = n_features + bgmm = BayesianGaussianMixture( + degrees_of_freedom_prior=degrees_of_freedom_prior_default, random_state=rng + ).fit(X) + assert_almost_equal( + degrees_of_freedom_prior_default, bgmm.degrees_of_freedom_prior_ + ) + + # Check correct init for a given value of covariance_prior + covariance_prior = { + "full": np.cov(X.T, bias=1) + 10, + "tied": np.cov(X.T, bias=1) + 5, + "diag": np.diag(np.atleast_2d(np.cov(X.T, bias=1))) + 3, + "spherical": rng.rand(), + } + + bgmm = BayesianGaussianMixture(random_state=rng) + for cov_type in ["full", "tied", "diag", "spherical"]: + bgmm.covariance_type = cov_type + bgmm.covariance_prior = covariance_prior[cov_type] + bgmm.fit(X) + assert_almost_equal(covariance_prior[cov_type], bgmm.covariance_prior_) + + # Check correct init for the default value of covariance_prior + covariance_prior_default = { + "full": np.atleast_2d(np.cov(X.T)), + "tied": np.atleast_2d(np.cov(X.T)), + "diag": np.var(X, axis=0, ddof=1), + "spherical": np.var(X, axis=0, ddof=1).mean(), + } + + bgmm = BayesianGaussianMixture(random_state=0) + for cov_type in ["full", "tied", "diag", "spherical"]: + bgmm.covariance_type = cov_type + bgmm.fit(X) + assert_almost_equal(covariance_prior_default[cov_type], bgmm.covariance_prior_) + + +def test_bayesian_mixture_check_is_fitted(): + rng = np.random.RandomState(0) + n_samples, n_features = 10, 2 + + # Check raise message + bgmm = BayesianGaussianMixture(random_state=rng) + X = rng.rand(n_samples, n_features) + + msg = "This BayesianGaussianMixture instance is not fitted yet." + with pytest.raises(ValueError, match=msg): + bgmm.score(X) + + +def test_bayesian_mixture_weights(): + rng = np.random.RandomState(0) + n_samples, n_features = 10, 2 + + X = rng.rand(n_samples, n_features) + + # Case Dirichlet distribution for the weight concentration prior type + bgmm = BayesianGaussianMixture( + weight_concentration_prior_type="dirichlet_distribution", + n_components=3, + random_state=rng, + ).fit(X) + + expected_weights = bgmm.weight_concentration_ / np.sum(bgmm.weight_concentration_) + assert_almost_equal(expected_weights, bgmm.weights_) + assert_almost_equal(np.sum(bgmm.weights_), 1.0) + + # Case Dirichlet process for the weight concentration prior type + dpgmm = BayesianGaussianMixture( + weight_concentration_prior_type="dirichlet_process", + n_components=3, + random_state=rng, + ).fit(X) + weight_dirichlet_sum = ( + dpgmm.weight_concentration_[0] + dpgmm.weight_concentration_[1] + ) + tmp = dpgmm.weight_concentration_[1] / weight_dirichlet_sum + expected_weights = ( + dpgmm.weight_concentration_[0] + / weight_dirichlet_sum + * np.hstack((1, np.cumprod(tmp[:-1]))) + ) + expected_weights /= np.sum(expected_weights) + assert_almost_equal(expected_weights, dpgmm.weights_) + assert_almost_equal(np.sum(dpgmm.weights_), 1.0) + + +@ignore_warnings(category=ConvergenceWarning) +def test_monotonic_likelihood(): + # We check that each step of the each step of variational inference without + # regularization improve monotonically the training set of the bound + rng = np.random.RandomState(0) + rand_data = RandomData(rng, scale=20) + n_components = rand_data.n_components + + for prior_type in PRIOR_TYPE: + for covar_type in COVARIANCE_TYPE: + X = rand_data.X[covar_type] + bgmm = BayesianGaussianMixture( + weight_concentration_prior_type=prior_type, + n_components=2 * n_components, + covariance_type=covar_type, + warm_start=True, + max_iter=1, + random_state=rng, + tol=1e-3, + ) + current_lower_bound = -np.inf + # Do one training iteration at a time so we can make sure that the + # training log likelihood increases after each iteration. + for _ in range(600): + prev_lower_bound = current_lower_bound + current_lower_bound = bgmm.fit(X).lower_bound_ + assert current_lower_bound >= prev_lower_bound + + if bgmm.converged_: + break + assert bgmm.converged_ + + +def test_compare_covar_type(): + # We can compare the 'full' precision with the other cov_type if we apply + # 1 iter of the M-step (done during _initialize_parameters). + rng = np.random.RandomState(0) + rand_data = RandomData(rng, scale=7) + X = rand_data.X["full"] + n_components = rand_data.n_components + + for prior_type in PRIOR_TYPE: + # Computation of the full_covariance + bgmm = BayesianGaussianMixture( + weight_concentration_prior_type=prior_type, + n_components=2 * n_components, + covariance_type="full", + max_iter=1, + random_state=0, + tol=1e-7, + ) + bgmm._check_parameters(X) + bgmm._initialize_parameters(X, np.random.RandomState(0)) + full_covariances = ( + bgmm.covariances_ * bgmm.degrees_of_freedom_[:, np.newaxis, np.newaxis] + ) + + # Check tied_covariance = mean(full_covariances, 0) + bgmm = BayesianGaussianMixture( + weight_concentration_prior_type=prior_type, + n_components=2 * n_components, + covariance_type="tied", + max_iter=1, + random_state=0, + tol=1e-7, + ) + bgmm._check_parameters(X) + bgmm._initialize_parameters(X, np.random.RandomState(0)) + + tied_covariance = bgmm.covariances_ * bgmm.degrees_of_freedom_ + assert_almost_equal(tied_covariance, np.mean(full_covariances, 0)) + + # Check diag_covariance = diag(full_covariances) + bgmm = BayesianGaussianMixture( + weight_concentration_prior_type=prior_type, + n_components=2 * n_components, + covariance_type="diag", + max_iter=1, + random_state=0, + tol=1e-7, + ) + bgmm._check_parameters(X) + bgmm._initialize_parameters(X, np.random.RandomState(0)) + + diag_covariances = bgmm.covariances_ * bgmm.degrees_of_freedom_[:, np.newaxis] + assert_almost_equal( + diag_covariances, np.array([np.diag(cov) for cov in full_covariances]) + ) + + # Check spherical_covariance = np.mean(diag_covariances, 0) + bgmm = BayesianGaussianMixture( + weight_concentration_prior_type=prior_type, + n_components=2 * n_components, + covariance_type="spherical", + max_iter=1, + random_state=0, + tol=1e-7, + ) + bgmm._check_parameters(X) + bgmm._initialize_parameters(X, np.random.RandomState(0)) + + spherical_covariances = bgmm.covariances_ * bgmm.degrees_of_freedom_ + assert_almost_equal(spherical_covariances, np.mean(diag_covariances, 1)) + + +@ignore_warnings(category=ConvergenceWarning) +def test_check_covariance_precision(): + # We check that the dot product of the covariance and the precision + # matrices is identity. + rng = np.random.RandomState(0) + rand_data = RandomData(rng, scale=7) + n_components, n_features = 2 * rand_data.n_components, 2 + + # Computation of the full_covariance + bgmm = BayesianGaussianMixture( + n_components=n_components, max_iter=100, random_state=rng, tol=1e-3, reg_covar=0 + ) + for covar_type in COVARIANCE_TYPE: + bgmm.covariance_type = covar_type + bgmm.fit(rand_data.X[covar_type]) + + if covar_type == "full": + for covar, precision in zip(bgmm.covariances_, bgmm.precisions_): + assert_almost_equal(np.dot(covar, precision), np.eye(n_features)) + elif covar_type == "tied": + assert_almost_equal( + np.dot(bgmm.covariances_, bgmm.precisions_), np.eye(n_features) + ) + + elif covar_type == "diag": + assert_almost_equal( + bgmm.covariances_ * bgmm.precisions_, + np.ones((n_components, n_features)), + ) + + else: + assert_almost_equal( + bgmm.covariances_ * bgmm.precisions_, np.ones(n_components) + ) + + +@ignore_warnings(category=ConvergenceWarning) +def test_invariant_translation(): + # We check here that adding a constant in the data change correctly the + # parameters of the mixture + rng = np.random.RandomState(0) + rand_data = RandomData(rng, scale=100) + n_components = 2 * rand_data.n_components + + for prior_type in PRIOR_TYPE: + for covar_type in COVARIANCE_TYPE: + X = rand_data.X[covar_type] + bgmm1 = BayesianGaussianMixture( + weight_concentration_prior_type=prior_type, + n_components=n_components, + max_iter=100, + random_state=0, + tol=1e-3, + reg_covar=0, + ).fit(X) + bgmm2 = BayesianGaussianMixture( + weight_concentration_prior_type=prior_type, + n_components=n_components, + max_iter=100, + random_state=0, + tol=1e-3, + reg_covar=0, + ).fit(X + 100) + + assert_almost_equal(bgmm1.means_, bgmm2.means_ - 100) + assert_almost_equal(bgmm1.weights_, bgmm2.weights_) + assert_almost_equal(bgmm1.covariances_, bgmm2.covariances_) + + +@pytest.mark.filterwarnings("ignore:.*did not converge.*") +@pytest.mark.parametrize( + "seed, max_iter, tol", + [ + (0, 2, 1e-7), # strict non-convergence + (1, 2, 1e-1), # loose non-convergence + (3, 300, 1e-7), # strict convergence + (4, 300, 1e-1), # loose convergence + ], +) +def test_bayesian_mixture_fit_predict(seed, max_iter, tol): + rng = np.random.RandomState(seed) + rand_data = RandomData(rng, n_samples=50, scale=7) + n_components = 2 * rand_data.n_components + + for covar_type in COVARIANCE_TYPE: + bgmm1 = BayesianGaussianMixture( + n_components=n_components, + max_iter=max_iter, + random_state=rng, + tol=tol, + reg_covar=0, + ) + bgmm1.covariance_type = covar_type + bgmm2 = copy.deepcopy(bgmm1) + X = rand_data.X[covar_type] + + Y_pred1 = bgmm1.fit(X).predict(X) + Y_pred2 = bgmm2.fit_predict(X) + assert_array_equal(Y_pred1, Y_pred2) + + +def test_bayesian_mixture_fit_predict_n_init(): + # Check that fit_predict is equivalent to fit.predict, when n_init > 1 + X = np.random.RandomState(0).randn(50, 5) + gm = BayesianGaussianMixture(n_components=5, n_init=10, random_state=0) + y_pred1 = gm.fit_predict(X) + y_pred2 = gm.predict(X) + assert_array_equal(y_pred1, y_pred2) + + +def test_bayesian_mixture_predict_predict_proba(): + # this is the same test as test_gaussian_mixture_predict_predict_proba() + rng = np.random.RandomState(0) + rand_data = RandomData(rng) + for prior_type in PRIOR_TYPE: + for covar_type in COVARIANCE_TYPE: + X = rand_data.X[covar_type] + Y = rand_data.Y + bgmm = BayesianGaussianMixture( + n_components=rand_data.n_components, + random_state=rng, + weight_concentration_prior_type=prior_type, + covariance_type=covar_type, + ) + + # Check a warning message arrive if we don't do fit + msg = ( + "This BayesianGaussianMixture instance is not fitted yet. " + "Call 'fit' with appropriate arguments before using this " + "estimator." + ) + with pytest.raises(NotFittedError, match=msg): + bgmm.predict(X) + + bgmm.fit(X) + Y_pred = bgmm.predict(X) + Y_pred_proba = bgmm.predict_proba(X).argmax(axis=1) + assert_array_equal(Y_pred, Y_pred_proba) + assert adjusted_rand_score(Y, Y_pred) >= 0.95 diff --git a/venv/lib/python3.10/site-packages/sklearn/mixture/tests/test_gaussian_mixture.py b/venv/lib/python3.10/site-packages/sklearn/mixture/tests/test_gaussian_mixture.py new file mode 100644 index 0000000000000000000000000000000000000000..e24a6af96637458b39e63430beecf53983e0ecf0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/mixture/tests/test_gaussian_mixture.py @@ -0,0 +1,1422 @@ +# Author: Wei Xue +# Thierry Guillemot +# License: BSD 3 clause + +import copy +import itertools +import re +import sys +import warnings +from io import StringIO +from unittest.mock import Mock + +import numpy as np +import pytest +from scipy import linalg, stats + +import sklearn +from sklearn.cluster import KMeans +from sklearn.covariance import EmpiricalCovariance +from sklearn.datasets import make_spd_matrix +from sklearn.exceptions import ConvergenceWarning, NotFittedError +from sklearn.metrics.cluster import adjusted_rand_score +from sklearn.mixture import GaussianMixture +from sklearn.mixture._gaussian_mixture import ( + _compute_log_det_cholesky, + _compute_precision_cholesky, + _estimate_gaussian_covariances_diag, + _estimate_gaussian_covariances_full, + _estimate_gaussian_covariances_spherical, + _estimate_gaussian_covariances_tied, + _estimate_gaussian_parameters, +) +from sklearn.utils._testing import ( + assert_allclose, + assert_almost_equal, + assert_array_almost_equal, + assert_array_equal, + ignore_warnings, +) +from sklearn.utils.extmath import fast_logdet + +COVARIANCE_TYPE = ["full", "tied", "diag", "spherical"] + + +def generate_data(n_samples, n_features, weights, means, precisions, covariance_type): + rng = np.random.RandomState(0) + + X = [] + if covariance_type == "spherical": + for _, (w, m, c) in enumerate(zip(weights, means, precisions["spherical"])): + X.append( + rng.multivariate_normal( + m, c * np.eye(n_features), int(np.round(w * n_samples)) + ) + ) + if covariance_type == "diag": + for _, (w, m, c) in enumerate(zip(weights, means, precisions["diag"])): + X.append( + rng.multivariate_normal(m, np.diag(c), int(np.round(w * n_samples))) + ) + if covariance_type == "tied": + for _, (w, m) in enumerate(zip(weights, means)): + X.append( + rng.multivariate_normal( + m, precisions["tied"], int(np.round(w * n_samples)) + ) + ) + if covariance_type == "full": + for _, (w, m, c) in enumerate(zip(weights, means, precisions["full"])): + X.append(rng.multivariate_normal(m, c, int(np.round(w * n_samples)))) + + X = np.vstack(X) + return X + + +class RandomData: + def __init__(self, rng, n_samples=200, n_components=2, n_features=2, scale=50): + self.n_samples = n_samples + self.n_components = n_components + self.n_features = n_features + + self.weights = rng.rand(n_components) + self.weights = self.weights / self.weights.sum() + self.means = rng.rand(n_components, n_features) * scale + self.covariances = { + "spherical": 0.5 + rng.rand(n_components), + "diag": (0.5 + rng.rand(n_components, n_features)) ** 2, + "tied": make_spd_matrix(n_features, random_state=rng), + "full": np.array( + [ + make_spd_matrix(n_features, random_state=rng) * 0.5 + for _ in range(n_components) + ] + ), + } + self.precisions = { + "spherical": 1.0 / self.covariances["spherical"], + "diag": 1.0 / self.covariances["diag"], + "tied": linalg.inv(self.covariances["tied"]), + "full": np.array( + [linalg.inv(covariance) for covariance in self.covariances["full"]] + ), + } + + self.X = dict( + zip( + COVARIANCE_TYPE, + [ + generate_data( + n_samples, + n_features, + self.weights, + self.means, + self.covariances, + covar_type, + ) + for covar_type in COVARIANCE_TYPE + ], + ) + ) + self.Y = np.hstack( + [ + np.full(int(np.round(w * n_samples)), k, dtype=int) + for k, w in enumerate(self.weights) + ] + ) + + +def test_gaussian_mixture_attributes(): + # test bad parameters + rng = np.random.RandomState(0) + X = rng.rand(10, 2) + + # test good parameters + n_components, tol, n_init, max_iter, reg_covar = 2, 1e-4, 3, 30, 1e-1 + covariance_type, init_params = "full", "random" + gmm = GaussianMixture( + n_components=n_components, + tol=tol, + n_init=n_init, + max_iter=max_iter, + reg_covar=reg_covar, + covariance_type=covariance_type, + init_params=init_params, + ).fit(X) + + assert gmm.n_components == n_components + assert gmm.covariance_type == covariance_type + assert gmm.tol == tol + assert gmm.reg_covar == reg_covar + assert gmm.max_iter == max_iter + assert gmm.n_init == n_init + assert gmm.init_params == init_params + + +def test_check_weights(): + rng = np.random.RandomState(0) + rand_data = RandomData(rng) + + n_components = rand_data.n_components + X = rand_data.X["full"] + + g = GaussianMixture(n_components=n_components) + + # Check bad shape + weights_bad_shape = rng.rand(n_components, 1) + g.weights_init = weights_bad_shape + msg = re.escape( + "The parameter 'weights' should have the shape of " + f"({n_components},), but got {str(weights_bad_shape.shape)}" + ) + with pytest.raises(ValueError, match=msg): + g.fit(X) + + # Check bad range + weights_bad_range = rng.rand(n_components) + 1 + g.weights_init = weights_bad_range + msg = re.escape( + "The parameter 'weights' should be in the range [0, 1], but got" + f" max value {np.min(weights_bad_range):.5f}, " + f"min value {np.max(weights_bad_range):.5f}" + ) + with pytest.raises(ValueError, match=msg): + g.fit(X) + + # Check bad normalization + weights_bad_norm = rng.rand(n_components) + weights_bad_norm = weights_bad_norm / (weights_bad_norm.sum() + 1) + g.weights_init = weights_bad_norm + msg = re.escape( + "The parameter 'weights' should be normalized, " + f"but got sum(weights) = {np.sum(weights_bad_norm):.5f}" + ) + with pytest.raises(ValueError, match=msg): + g.fit(X) + + # Check good weights matrix + weights = rand_data.weights + g = GaussianMixture(weights_init=weights, n_components=n_components) + g.fit(X) + assert_array_equal(weights, g.weights_init) + + +def test_check_means(): + rng = np.random.RandomState(0) + rand_data = RandomData(rng) + + n_components, n_features = rand_data.n_components, rand_data.n_features + X = rand_data.X["full"] + + g = GaussianMixture(n_components=n_components) + + # Check means bad shape + means_bad_shape = rng.rand(n_components + 1, n_features) + g.means_init = means_bad_shape + msg = "The parameter 'means' should have the shape of " + with pytest.raises(ValueError, match=msg): + g.fit(X) + + # Check good means matrix + means = rand_data.means + g.means_init = means + g.fit(X) + assert_array_equal(means, g.means_init) + + +def test_check_precisions(): + rng = np.random.RandomState(0) + rand_data = RandomData(rng) + + n_components, n_features = rand_data.n_components, rand_data.n_features + + # Define the bad precisions for each covariance_type + precisions_bad_shape = { + "full": np.ones((n_components + 1, n_features, n_features)), + "tied": np.ones((n_features + 1, n_features + 1)), + "diag": np.ones((n_components + 1, n_features)), + "spherical": np.ones((n_components + 1)), + } + + # Define not positive-definite precisions + precisions_not_pos = np.ones((n_components, n_features, n_features)) + precisions_not_pos[0] = np.eye(n_features) + precisions_not_pos[0, 0, 0] = -1.0 + + precisions_not_positive = { + "full": precisions_not_pos, + "tied": precisions_not_pos[0], + "diag": np.full((n_components, n_features), -1.0), + "spherical": np.full(n_components, -1.0), + } + + not_positive_errors = { + "full": "symmetric, positive-definite", + "tied": "symmetric, positive-definite", + "diag": "positive", + "spherical": "positive", + } + + for covar_type in COVARIANCE_TYPE: + X = RandomData(rng).X[covar_type] + g = GaussianMixture( + n_components=n_components, covariance_type=covar_type, random_state=rng + ) + + # Check precisions with bad shapes + g.precisions_init = precisions_bad_shape[covar_type] + msg = f"The parameter '{covar_type} precision' should have the shape of" + with pytest.raises(ValueError, match=msg): + g.fit(X) + + # Check not positive precisions + g.precisions_init = precisions_not_positive[covar_type] + msg = f"'{covar_type} precision' should be {not_positive_errors[covar_type]}" + with pytest.raises(ValueError, match=msg): + g.fit(X) + + # Check the correct init of precisions_init + g.precisions_init = rand_data.precisions[covar_type] + g.fit(X) + assert_array_equal(rand_data.precisions[covar_type], g.precisions_init) + + +def test_suffstat_sk_full(): + # compare the precision matrix compute from the + # EmpiricalCovariance.covariance fitted on X*sqrt(resp) + # with _sufficient_sk_full, n_components=1 + rng = np.random.RandomState(0) + n_samples, n_features = 500, 2 + + # special case 1, assuming data is "centered" + X = rng.rand(n_samples, n_features) + resp = rng.rand(n_samples, 1) + X_resp = np.sqrt(resp) * X + nk = np.array([n_samples]) + xk = np.zeros((1, n_features)) + covars_pred = _estimate_gaussian_covariances_full(resp, X, nk, xk, 0) + ecov = EmpiricalCovariance(assume_centered=True) + ecov.fit(X_resp) + assert_almost_equal(ecov.error_norm(covars_pred[0], norm="frobenius"), 0) + assert_almost_equal(ecov.error_norm(covars_pred[0], norm="spectral"), 0) + + # check the precision computation + precs_chol_pred = _compute_precision_cholesky(covars_pred, "full") + precs_pred = np.array([np.dot(prec, prec.T) for prec in precs_chol_pred]) + precs_est = np.array([linalg.inv(cov) for cov in covars_pred]) + assert_array_almost_equal(precs_est, precs_pred) + + # special case 2, assuming resp are all ones + resp = np.ones((n_samples, 1)) + nk = np.array([n_samples]) + xk = X.mean(axis=0).reshape((1, -1)) + covars_pred = _estimate_gaussian_covariances_full(resp, X, nk, xk, 0) + ecov = EmpiricalCovariance(assume_centered=False) + ecov.fit(X) + assert_almost_equal(ecov.error_norm(covars_pred[0], norm="frobenius"), 0) + assert_almost_equal(ecov.error_norm(covars_pred[0], norm="spectral"), 0) + + # check the precision computation + precs_chol_pred = _compute_precision_cholesky(covars_pred, "full") + precs_pred = np.array([np.dot(prec, prec.T) for prec in precs_chol_pred]) + precs_est = np.array([linalg.inv(cov) for cov in covars_pred]) + assert_array_almost_equal(precs_est, precs_pred) + + +def test_suffstat_sk_tied(): + # use equation Nk * Sk / N = S_tied + rng = np.random.RandomState(0) + n_samples, n_features, n_components = 500, 2, 2 + + resp = rng.rand(n_samples, n_components) + resp = resp / resp.sum(axis=1)[:, np.newaxis] + X = rng.rand(n_samples, n_features) + nk = resp.sum(axis=0) + xk = np.dot(resp.T, X) / nk[:, np.newaxis] + + covars_pred_full = _estimate_gaussian_covariances_full(resp, X, nk, xk, 0) + covars_pred_full = ( + np.sum(nk[:, np.newaxis, np.newaxis] * covars_pred_full, 0) / n_samples + ) + + covars_pred_tied = _estimate_gaussian_covariances_tied(resp, X, nk, xk, 0) + + ecov = EmpiricalCovariance() + ecov.covariance_ = covars_pred_full + assert_almost_equal(ecov.error_norm(covars_pred_tied, norm="frobenius"), 0) + assert_almost_equal(ecov.error_norm(covars_pred_tied, norm="spectral"), 0) + + # check the precision computation + precs_chol_pred = _compute_precision_cholesky(covars_pred_tied, "tied") + precs_pred = np.dot(precs_chol_pred, precs_chol_pred.T) + precs_est = linalg.inv(covars_pred_tied) + assert_array_almost_equal(precs_est, precs_pred) + + +def test_suffstat_sk_diag(): + # test against 'full' case + rng = np.random.RandomState(0) + n_samples, n_features, n_components = 500, 2, 2 + + resp = rng.rand(n_samples, n_components) + resp = resp / resp.sum(axis=1)[:, np.newaxis] + X = rng.rand(n_samples, n_features) + nk = resp.sum(axis=0) + xk = np.dot(resp.T, X) / nk[:, np.newaxis] + covars_pred_full = _estimate_gaussian_covariances_full(resp, X, nk, xk, 0) + covars_pred_diag = _estimate_gaussian_covariances_diag(resp, X, nk, xk, 0) + + ecov = EmpiricalCovariance() + for cov_full, cov_diag in zip(covars_pred_full, covars_pred_diag): + ecov.covariance_ = np.diag(np.diag(cov_full)) + cov_diag = np.diag(cov_diag) + assert_almost_equal(ecov.error_norm(cov_diag, norm="frobenius"), 0) + assert_almost_equal(ecov.error_norm(cov_diag, norm="spectral"), 0) + + # check the precision computation + precs_chol_pred = _compute_precision_cholesky(covars_pred_diag, "diag") + assert_almost_equal(covars_pred_diag, 1.0 / precs_chol_pred**2) + + +def test_gaussian_suffstat_sk_spherical(): + # computing spherical covariance equals to the variance of one-dimension + # data after flattening, n_components=1 + rng = np.random.RandomState(0) + n_samples, n_features = 500, 2 + + X = rng.rand(n_samples, n_features) + X = X - X.mean() + resp = np.ones((n_samples, 1)) + nk = np.array([n_samples]) + xk = X.mean() + covars_pred_spherical = _estimate_gaussian_covariances_spherical(resp, X, nk, xk, 0) + covars_pred_spherical2 = np.dot(X.flatten().T, X.flatten()) / ( + n_features * n_samples + ) + assert_almost_equal(covars_pred_spherical, covars_pred_spherical2) + + # check the precision computation + precs_chol_pred = _compute_precision_cholesky(covars_pred_spherical, "spherical") + assert_almost_equal(covars_pred_spherical, 1.0 / precs_chol_pred**2) + + +def test_compute_log_det_cholesky(): + n_features = 2 + rand_data = RandomData(np.random.RandomState(0)) + + for covar_type in COVARIANCE_TYPE: + covariance = rand_data.covariances[covar_type] + + if covar_type == "full": + predected_det = np.array([linalg.det(cov) for cov in covariance]) + elif covar_type == "tied": + predected_det = linalg.det(covariance) + elif covar_type == "diag": + predected_det = np.array([np.prod(cov) for cov in covariance]) + elif covar_type == "spherical": + predected_det = covariance**n_features + + # We compute the cholesky decomposition of the covariance matrix + expected_det = _compute_log_det_cholesky( + _compute_precision_cholesky(covariance, covar_type), + covar_type, + n_features=n_features, + ) + assert_array_almost_equal(expected_det, -0.5 * np.log(predected_det)) + + +def _naive_lmvnpdf_diag(X, means, covars): + resp = np.empty((len(X), len(means))) + stds = np.sqrt(covars) + for i, (mean, std) in enumerate(zip(means, stds)): + resp[:, i] = stats.norm.logpdf(X, mean, std).sum(axis=1) + return resp + + +def test_gaussian_mixture_log_probabilities(): + from sklearn.mixture._gaussian_mixture import _estimate_log_gaussian_prob + + # test against with _naive_lmvnpdf_diag + rng = np.random.RandomState(0) + rand_data = RandomData(rng) + n_samples = 500 + n_features = rand_data.n_features + n_components = rand_data.n_components + + means = rand_data.means + covars_diag = rng.rand(n_components, n_features) + X = rng.rand(n_samples, n_features) + log_prob_naive = _naive_lmvnpdf_diag(X, means, covars_diag) + + # full covariances + precs_full = np.array([np.diag(1.0 / np.sqrt(x)) for x in covars_diag]) + + log_prob = _estimate_log_gaussian_prob(X, means, precs_full, "full") + assert_array_almost_equal(log_prob, log_prob_naive) + + # diag covariances + precs_chol_diag = 1.0 / np.sqrt(covars_diag) + log_prob = _estimate_log_gaussian_prob(X, means, precs_chol_diag, "diag") + assert_array_almost_equal(log_prob, log_prob_naive) + + # tied + covars_tied = np.array([x for x in covars_diag]).mean(axis=0) + precs_tied = np.diag(np.sqrt(1.0 / covars_tied)) + + log_prob_naive = _naive_lmvnpdf_diag(X, means, [covars_tied] * n_components) + log_prob = _estimate_log_gaussian_prob(X, means, precs_tied, "tied") + + assert_array_almost_equal(log_prob, log_prob_naive) + + # spherical + covars_spherical = covars_diag.mean(axis=1) + precs_spherical = 1.0 / np.sqrt(covars_diag.mean(axis=1)) + log_prob_naive = _naive_lmvnpdf_diag( + X, means, [[k] * n_features for k in covars_spherical] + ) + log_prob = _estimate_log_gaussian_prob(X, means, precs_spherical, "spherical") + assert_array_almost_equal(log_prob, log_prob_naive) + + +# skip tests on weighted_log_probabilities, log_weights + + +def test_gaussian_mixture_estimate_log_prob_resp(): + # test whether responsibilities are normalized + rng = np.random.RandomState(0) + rand_data = RandomData(rng, scale=5) + n_samples = rand_data.n_samples + n_features = rand_data.n_features + n_components = rand_data.n_components + + X = rng.rand(n_samples, n_features) + for covar_type in COVARIANCE_TYPE: + weights = rand_data.weights + means = rand_data.means + precisions = rand_data.precisions[covar_type] + g = GaussianMixture( + n_components=n_components, + random_state=rng, + weights_init=weights, + means_init=means, + precisions_init=precisions, + covariance_type=covar_type, + ) + g.fit(X) + resp = g.predict_proba(X) + assert_array_almost_equal(resp.sum(axis=1), np.ones(n_samples)) + assert_array_equal(g.weights_init, weights) + assert_array_equal(g.means_init, means) + assert_array_equal(g.precisions_init, precisions) + + +def test_gaussian_mixture_predict_predict_proba(): + rng = np.random.RandomState(0) + rand_data = RandomData(rng) + for covar_type in COVARIANCE_TYPE: + X = rand_data.X[covar_type] + Y = rand_data.Y + g = GaussianMixture( + n_components=rand_data.n_components, + random_state=rng, + weights_init=rand_data.weights, + means_init=rand_data.means, + precisions_init=rand_data.precisions[covar_type], + covariance_type=covar_type, + ) + + # Check a warning message arrive if we don't do fit + msg = ( + "This GaussianMixture instance is not fitted yet. Call 'fit' " + "with appropriate arguments before using this estimator." + ) + with pytest.raises(NotFittedError, match=msg): + g.predict(X) + + g.fit(X) + Y_pred = g.predict(X) + Y_pred_proba = g.predict_proba(X).argmax(axis=1) + assert_array_equal(Y_pred, Y_pred_proba) + assert adjusted_rand_score(Y, Y_pred) > 0.95 + + +@pytest.mark.filterwarnings("ignore:.*did not converge.*") +@pytest.mark.parametrize( + "seed, max_iter, tol", + [ + (0, 2, 1e-7), # strict non-convergence + (1, 2, 1e-1), # loose non-convergence + (3, 300, 1e-7), # strict convergence + (4, 300, 1e-1), # loose convergence + ], +) +def test_gaussian_mixture_fit_predict(seed, max_iter, tol): + rng = np.random.RandomState(seed) + rand_data = RandomData(rng) + for covar_type in COVARIANCE_TYPE: + X = rand_data.X[covar_type] + Y = rand_data.Y + g = GaussianMixture( + n_components=rand_data.n_components, + random_state=rng, + weights_init=rand_data.weights, + means_init=rand_data.means, + precisions_init=rand_data.precisions[covar_type], + covariance_type=covar_type, + max_iter=max_iter, + tol=tol, + ) + + # check if fit_predict(X) is equivalent to fit(X).predict(X) + f = copy.deepcopy(g) + Y_pred1 = f.fit(X).predict(X) + Y_pred2 = g.fit_predict(X) + assert_array_equal(Y_pred1, Y_pred2) + assert adjusted_rand_score(Y, Y_pred2) > 0.95 + + +def test_gaussian_mixture_fit_predict_n_init(): + # Check that fit_predict is equivalent to fit.predict, when n_init > 1 + X = np.random.RandomState(0).randn(1000, 5) + gm = GaussianMixture(n_components=5, n_init=5, random_state=0) + y_pred1 = gm.fit_predict(X) + y_pred2 = gm.predict(X) + assert_array_equal(y_pred1, y_pred2) + + +def test_gaussian_mixture_fit(): + # recover the ground truth + rng = np.random.RandomState(0) + rand_data = RandomData(rng) + n_features = rand_data.n_features + n_components = rand_data.n_components + + for covar_type in COVARIANCE_TYPE: + X = rand_data.X[covar_type] + g = GaussianMixture( + n_components=n_components, + n_init=20, + reg_covar=0, + random_state=rng, + covariance_type=covar_type, + ) + g.fit(X) + + # needs more data to pass the test with rtol=1e-7 + assert_allclose( + np.sort(g.weights_), np.sort(rand_data.weights), rtol=0.1, atol=1e-2 + ) + + arg_idx1 = g.means_[:, 0].argsort() + arg_idx2 = rand_data.means[:, 0].argsort() + assert_allclose( + g.means_[arg_idx1], rand_data.means[arg_idx2], rtol=0.1, atol=1e-2 + ) + + if covar_type == "full": + prec_pred = g.precisions_ + prec_test = rand_data.precisions["full"] + elif covar_type == "tied": + prec_pred = np.array([g.precisions_] * n_components) + prec_test = np.array([rand_data.precisions["tied"]] * n_components) + elif covar_type == "spherical": + prec_pred = np.array([np.eye(n_features) * c for c in g.precisions_]) + prec_test = np.array( + [np.eye(n_features) * c for c in rand_data.precisions["spherical"]] + ) + elif covar_type == "diag": + prec_pred = np.array([np.diag(d) for d in g.precisions_]) + prec_test = np.array([np.diag(d) for d in rand_data.precisions["diag"]]) + + arg_idx1 = np.trace(prec_pred, axis1=1, axis2=2).argsort() + arg_idx2 = np.trace(prec_test, axis1=1, axis2=2).argsort() + for k, h in zip(arg_idx1, arg_idx2): + ecov = EmpiricalCovariance() + ecov.covariance_ = prec_test[h] + # the accuracy depends on the number of data and randomness, rng + assert_allclose(ecov.error_norm(prec_pred[k]), 0, atol=0.15) + + +def test_gaussian_mixture_fit_best_params(): + rng = np.random.RandomState(0) + rand_data = RandomData(rng) + n_components = rand_data.n_components + n_init = 10 + for covar_type in COVARIANCE_TYPE: + X = rand_data.X[covar_type] + g = GaussianMixture( + n_components=n_components, + n_init=1, + reg_covar=0, + random_state=rng, + covariance_type=covar_type, + ) + ll = [] + for _ in range(n_init): + g.fit(X) + ll.append(g.score(X)) + ll = np.array(ll) + g_best = GaussianMixture( + n_components=n_components, + n_init=n_init, + reg_covar=0, + random_state=rng, + covariance_type=covar_type, + ) + g_best.fit(X) + assert_almost_equal(ll.min(), g_best.score(X)) + + +def test_gaussian_mixture_fit_convergence_warning(): + rng = np.random.RandomState(0) + rand_data = RandomData(rng, scale=1) + n_components = rand_data.n_components + max_iter = 1 + for covar_type in COVARIANCE_TYPE: + X = rand_data.X[covar_type] + g = GaussianMixture( + n_components=n_components, + n_init=1, + max_iter=max_iter, + reg_covar=0, + random_state=rng, + covariance_type=covar_type, + ) + msg = ( + f"Initialization {max_iter} did not converge. Try different init " + "parameters, or increase max_iter, tol or check for degenerate" + " data." + ) + with pytest.warns(ConvergenceWarning, match=msg): + g.fit(X) + + +def test_multiple_init(): + # Test that multiple inits does not much worse than a single one + rng = np.random.RandomState(0) + n_samples, n_features, n_components = 50, 5, 2 + X = rng.randn(n_samples, n_features) + for cv_type in COVARIANCE_TYPE: + train1 = ( + GaussianMixture( + n_components=n_components, covariance_type=cv_type, random_state=0 + ) + .fit(X) + .score(X) + ) + train2 = ( + GaussianMixture( + n_components=n_components, + covariance_type=cv_type, + random_state=0, + n_init=5, + ) + .fit(X) + .score(X) + ) + assert train2 >= train1 + + +def test_gaussian_mixture_n_parameters(): + # Test that the right number of parameters is estimated + rng = np.random.RandomState(0) + n_samples, n_features, n_components = 50, 5, 2 + X = rng.randn(n_samples, n_features) + n_params = {"spherical": 13, "diag": 21, "tied": 26, "full": 41} + for cv_type in COVARIANCE_TYPE: + g = GaussianMixture( + n_components=n_components, covariance_type=cv_type, random_state=rng + ).fit(X) + assert g._n_parameters() == n_params[cv_type] + + +def test_bic_1d_1component(): + # Test all of the covariance_types return the same BIC score for + # 1-dimensional, 1 component fits. + rng = np.random.RandomState(0) + n_samples, n_dim, n_components = 100, 1, 1 + X = rng.randn(n_samples, n_dim) + bic_full = ( + GaussianMixture( + n_components=n_components, covariance_type="full", random_state=rng + ) + .fit(X) + .bic(X) + ) + for covariance_type in ["tied", "diag", "spherical"]: + bic = ( + GaussianMixture( + n_components=n_components, + covariance_type=covariance_type, + random_state=rng, + ) + .fit(X) + .bic(X) + ) + assert_almost_equal(bic_full, bic) + + +def test_gaussian_mixture_aic_bic(): + # Test the aic and bic criteria + rng = np.random.RandomState(0) + n_samples, n_features, n_components = 50, 3, 2 + X = rng.randn(n_samples, n_features) + # standard gaussian entropy + sgh = 0.5 * ( + fast_logdet(np.cov(X.T, bias=1)) + n_features * (1 + np.log(2 * np.pi)) + ) + for cv_type in COVARIANCE_TYPE: + g = GaussianMixture( + n_components=n_components, + covariance_type=cv_type, + random_state=rng, + max_iter=200, + ) + g.fit(X) + aic = 2 * n_samples * sgh + 2 * g._n_parameters() + bic = 2 * n_samples * sgh + np.log(n_samples) * g._n_parameters() + bound = n_features / np.sqrt(n_samples) + assert (g.aic(X) - aic) / n_samples < bound + assert (g.bic(X) - bic) / n_samples < bound + + +def test_gaussian_mixture_verbose(): + rng = np.random.RandomState(0) + rand_data = RandomData(rng) + n_components = rand_data.n_components + for covar_type in COVARIANCE_TYPE: + X = rand_data.X[covar_type] + g = GaussianMixture( + n_components=n_components, + n_init=1, + reg_covar=0, + random_state=rng, + covariance_type=covar_type, + verbose=1, + ) + h = GaussianMixture( + n_components=n_components, + n_init=1, + reg_covar=0, + random_state=rng, + covariance_type=covar_type, + verbose=2, + ) + old_stdout = sys.stdout + sys.stdout = StringIO() + try: + g.fit(X) + h.fit(X) + finally: + sys.stdout = old_stdout + + +@pytest.mark.filterwarnings("ignore:.*did not converge.*") +@pytest.mark.parametrize("seed", (0, 1, 2)) +def test_warm_start(seed): + random_state = seed + rng = np.random.RandomState(random_state) + n_samples, n_features, n_components = 500, 2, 2 + X = rng.rand(n_samples, n_features) + + # Assert the warm_start give the same result for the same number of iter + g = GaussianMixture( + n_components=n_components, + n_init=1, + max_iter=2, + reg_covar=0, + random_state=random_state, + warm_start=False, + ) + h = GaussianMixture( + n_components=n_components, + n_init=1, + max_iter=1, + reg_covar=0, + random_state=random_state, + warm_start=True, + ) + + g.fit(X) + score1 = h.fit(X).score(X) + score2 = h.fit(X).score(X) + + assert_almost_equal(g.weights_, h.weights_) + assert_almost_equal(g.means_, h.means_) + assert_almost_equal(g.precisions_, h.precisions_) + assert score2 > score1 + + # Assert that by using warm_start we can converge to a good solution + g = GaussianMixture( + n_components=n_components, + n_init=1, + max_iter=5, + reg_covar=0, + random_state=random_state, + warm_start=False, + tol=1e-6, + ) + h = GaussianMixture( + n_components=n_components, + n_init=1, + max_iter=5, + reg_covar=0, + random_state=random_state, + warm_start=True, + tol=1e-6, + ) + + g.fit(X) + assert not g.converged_ + + h.fit(X) + # depending on the data there is large variability in the number of + # refit necessary to converge due to the complete randomness of the + # data + for _ in range(1000): + h.fit(X) + if h.converged_: + break + assert h.converged_ + + +@ignore_warnings(category=ConvergenceWarning) +def test_convergence_detected_with_warm_start(): + # We check that convergence is detected when warm_start=True + rng = np.random.RandomState(0) + rand_data = RandomData(rng) + n_components = rand_data.n_components + X = rand_data.X["full"] + + for max_iter in (1, 2, 50): + gmm = GaussianMixture( + n_components=n_components, + warm_start=True, + max_iter=max_iter, + random_state=rng, + ) + for _ in range(100): + gmm.fit(X) + if gmm.converged_: + break + assert gmm.converged_ + assert max_iter >= gmm.n_iter_ + + +def test_score(): + covar_type = "full" + rng = np.random.RandomState(0) + rand_data = RandomData(rng, scale=7) + n_components = rand_data.n_components + X = rand_data.X[covar_type] + + # Check the error message if we don't call fit + gmm1 = GaussianMixture( + n_components=n_components, + n_init=1, + max_iter=1, + reg_covar=0, + random_state=rng, + covariance_type=covar_type, + ) + msg = ( + "This GaussianMixture instance is not fitted yet. Call 'fit' with " + "appropriate arguments before using this estimator." + ) + with pytest.raises(NotFittedError, match=msg): + gmm1.score(X) + + # Check score value + with warnings.catch_warnings(): + warnings.simplefilter("ignore", ConvergenceWarning) + gmm1.fit(X) + gmm_score = gmm1.score(X) + gmm_score_proba = gmm1.score_samples(X).mean() + assert_almost_equal(gmm_score, gmm_score_proba) + + # Check if the score increase + gmm2 = GaussianMixture( + n_components=n_components, + n_init=1, + reg_covar=0, + random_state=rng, + covariance_type=covar_type, + ).fit(X) + assert gmm2.score(X) > gmm1.score(X) + + +def test_score_samples(): + covar_type = "full" + rng = np.random.RandomState(0) + rand_data = RandomData(rng, scale=7) + n_components = rand_data.n_components + X = rand_data.X[covar_type] + + # Check the error message if we don't call fit + gmm = GaussianMixture( + n_components=n_components, + n_init=1, + reg_covar=0, + random_state=rng, + covariance_type=covar_type, + ) + msg = ( + "This GaussianMixture instance is not fitted yet. Call 'fit' with " + "appropriate arguments before using this estimator." + ) + with pytest.raises(NotFittedError, match=msg): + gmm.score_samples(X) + + gmm_score_samples = gmm.fit(X).score_samples(X) + assert gmm_score_samples.shape[0] == rand_data.n_samples + + +def test_monotonic_likelihood(): + # We check that each step of the EM without regularization improve + # monotonically the training set likelihood + rng = np.random.RandomState(0) + rand_data = RandomData(rng, scale=7) + n_components = rand_data.n_components + + for covar_type in COVARIANCE_TYPE: + X = rand_data.X[covar_type] + gmm = GaussianMixture( + n_components=n_components, + covariance_type=covar_type, + reg_covar=0, + warm_start=True, + max_iter=1, + random_state=rng, + tol=1e-7, + ) + current_log_likelihood = -np.inf + with warnings.catch_warnings(): + warnings.simplefilter("ignore", ConvergenceWarning) + # Do one training iteration at a time so we can make sure that the + # training log likelihood increases after each iteration. + for _ in range(600): + prev_log_likelihood = current_log_likelihood + current_log_likelihood = gmm.fit(X).score(X) + assert current_log_likelihood >= prev_log_likelihood + + if gmm.converged_: + break + + assert gmm.converged_ + + +def test_regularisation(): + # We train the GaussianMixture on degenerate data by defining two clusters + # of a 0 covariance. + rng = np.random.RandomState(0) + n_samples, n_features = 10, 5 + + X = np.vstack( + (np.ones((n_samples // 2, n_features)), np.zeros((n_samples // 2, n_features))) + ) + + for covar_type in COVARIANCE_TYPE: + gmm = GaussianMixture( + n_components=n_samples, + reg_covar=0, + covariance_type=covar_type, + random_state=rng, + ) + + with warnings.catch_warnings(): + warnings.simplefilter("ignore", RuntimeWarning) + msg = re.escape( + "Fitting the mixture model failed because some components have" + " ill-defined empirical covariance (for instance caused by " + "singleton or collapsed samples). Try to decrease the number " + "of components, or increase reg_covar." + ) + with pytest.raises(ValueError, match=msg): + gmm.fit(X) + + gmm.set_params(reg_covar=1e-6).fit(X) + + +def test_property(): + rng = np.random.RandomState(0) + rand_data = RandomData(rng, scale=7) + n_components = rand_data.n_components + + for covar_type in COVARIANCE_TYPE: + X = rand_data.X[covar_type] + gmm = GaussianMixture( + n_components=n_components, + covariance_type=covar_type, + random_state=rng, + n_init=5, + ) + gmm.fit(X) + if covar_type == "full": + for prec, covar in zip(gmm.precisions_, gmm.covariances_): + assert_array_almost_equal(linalg.inv(prec), covar) + elif covar_type == "tied": + assert_array_almost_equal(linalg.inv(gmm.precisions_), gmm.covariances_) + else: + assert_array_almost_equal(gmm.precisions_, 1.0 / gmm.covariances_) + + +def test_sample(): + rng = np.random.RandomState(0) + rand_data = RandomData(rng, scale=7, n_components=3) + n_features, n_components = rand_data.n_features, rand_data.n_components + + for covar_type in COVARIANCE_TYPE: + X = rand_data.X[covar_type] + + gmm = GaussianMixture( + n_components=n_components, covariance_type=covar_type, random_state=rng + ) + # To sample we need that GaussianMixture is fitted + msg = "This GaussianMixture instance is not fitted" + with pytest.raises(NotFittedError, match=msg): + gmm.sample(0) + gmm.fit(X) + + msg = "Invalid value for 'n_samples'" + with pytest.raises(ValueError, match=msg): + gmm.sample(0) + + # Just to make sure the class samples correctly + n_samples = 20000 + X_s, y_s = gmm.sample(n_samples) + + for k in range(n_components): + if covar_type == "full": + assert_array_almost_equal( + gmm.covariances_[k], np.cov(X_s[y_s == k].T), decimal=1 + ) + elif covar_type == "tied": + assert_array_almost_equal( + gmm.covariances_, np.cov(X_s[y_s == k].T), decimal=1 + ) + elif covar_type == "diag": + assert_array_almost_equal( + gmm.covariances_[k], np.diag(np.cov(X_s[y_s == k].T)), decimal=1 + ) + else: + assert_array_almost_equal( + gmm.covariances_[k], + np.var(X_s[y_s == k] - gmm.means_[k]), + decimal=1, + ) + + means_s = np.array([np.mean(X_s[y_s == k], 0) for k in range(n_components)]) + assert_array_almost_equal(gmm.means_, means_s, decimal=1) + + # Check shapes of sampled data, see + # https://github.com/scikit-learn/scikit-learn/issues/7701 + assert X_s.shape == (n_samples, n_features) + + for sample_size in range(1, 100): + X_s, _ = gmm.sample(sample_size) + assert X_s.shape == (sample_size, n_features) + + +@ignore_warnings(category=ConvergenceWarning) +def test_init(): + # We check that by increasing the n_init number we have a better solution + for random_state in range(15): + rand_data = RandomData( + np.random.RandomState(random_state), n_samples=50, scale=1 + ) + n_components = rand_data.n_components + X = rand_data.X["full"] + + gmm1 = GaussianMixture( + n_components=n_components, n_init=1, max_iter=1, random_state=random_state + ).fit(X) + gmm2 = GaussianMixture( + n_components=n_components, n_init=10, max_iter=1, random_state=random_state + ).fit(X) + + assert gmm2.lower_bound_ >= gmm1.lower_bound_ + + +def test_gaussian_mixture_setting_best_params(): + """`GaussianMixture`'s best_parameters, `n_iter_` and `lower_bound_` + must be set appropriately in the case of divergence. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/18216 + """ + rnd = np.random.RandomState(0) + n_samples = 30 + X = rnd.uniform(size=(n_samples, 3)) + + # following initialization parameters were found to lead to divergence + means_init = np.array( + [ + [0.670637869618158, 0.21038256107384043, 0.12892629765485303], + [0.09394051075844147, 0.5759464955561779, 0.929296197576212], + [0.5033230372781258, 0.9569852381759425, 0.08654043447295741], + [0.18578301420435747, 0.5531158970919143, 0.19388943970532435], + [0.4548589928173794, 0.35182513658825276, 0.568146063202464], + [0.609279894978321, 0.7929063819678847, 0.9620097270828052], + ] + ) + precisions_init = np.array( + [ + 999999.999604483, + 999999.9990869573, + 553.7603944542167, + 204.78596008931834, + 15.867423501783637, + 85.4595728389735, + ] + ) + weights_init = [ + 0.03333333333333341, + 0.03333333333333341, + 0.06666666666666674, + 0.06666666666666674, + 0.7000000000000001, + 0.10000000000000007, + ] + + gmm = GaussianMixture( + covariance_type="spherical", + reg_covar=0, + means_init=means_init, + weights_init=weights_init, + random_state=rnd, + n_components=len(weights_init), + precisions_init=precisions_init, + max_iter=1, + ) + # ensure that no error is thrown during fit + gmm.fit(X) + + # check that the fit did not converge + assert not gmm.converged_ + + # check that parameters are set for gmm + for attr in [ + "weights_", + "means_", + "covariances_", + "precisions_cholesky_", + "n_iter_", + "lower_bound_", + ]: + assert hasattr(gmm, attr) + + +@pytest.mark.parametrize( + "init_params", ["random", "random_from_data", "k-means++", "kmeans"] +) +def test_init_means_not_duplicated(init_params, global_random_seed): + # Check that all initialisations provide not duplicated starting means + rng = np.random.RandomState(global_random_seed) + rand_data = RandomData(rng, scale=5) + n_components = rand_data.n_components + X = rand_data.X["full"] + + gmm = GaussianMixture( + n_components=n_components, init_params=init_params, random_state=rng, max_iter=0 + ) + gmm.fit(X) + + means = gmm.means_ + for i_mean, j_mean in itertools.combinations(means, r=2): + assert not np.allclose(i_mean, j_mean) + + +@pytest.mark.parametrize( + "init_params", ["random", "random_from_data", "k-means++", "kmeans"] +) +def test_means_for_all_inits(init_params, global_random_seed): + # Check fitted means properties for all initializations + rng = np.random.RandomState(global_random_seed) + rand_data = RandomData(rng, scale=5) + n_components = rand_data.n_components + X = rand_data.X["full"] + + gmm = GaussianMixture( + n_components=n_components, init_params=init_params, random_state=rng + ) + gmm.fit(X) + + assert gmm.means_.shape == (n_components, X.shape[1]) + assert np.all(X.min(axis=0) <= gmm.means_) + assert np.all(gmm.means_ <= X.max(axis=0)) + assert gmm.converged_ + + +def test_max_iter_zero(): + # Check that max_iter=0 returns initialisation as expected + # Pick arbitrary initial means and check equal to max_iter=0 + rng = np.random.RandomState(0) + rand_data = RandomData(rng, scale=5) + n_components = rand_data.n_components + X = rand_data.X["full"] + means_init = [[20, 30], [30, 25]] + gmm = GaussianMixture( + n_components=n_components, + random_state=rng, + means_init=means_init, + tol=1e-06, + max_iter=0, + ) + gmm.fit(X) + + assert_allclose(gmm.means_, means_init) + + +def test_gaussian_mixture_precisions_init_diag(): + """Check that we properly initialize `precision_cholesky_` when we manually + provide the precision matrix. + + In this regard, we check the consistency between estimating the precision + matrix and providing the same precision matrix as initialization. It should + lead to the same results with the same number of iterations. + + If the initialization is wrong then the number of iterations will increase. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/16944 + """ + # generate a toy dataset + n_samples = 300 + rng = np.random.RandomState(0) + shifted_gaussian = rng.randn(n_samples, 2) + np.array([20, 20]) + C = np.array([[0.0, -0.7], [3.5, 0.7]]) + stretched_gaussian = np.dot(rng.randn(n_samples, 2), C) + X = np.vstack([shifted_gaussian, stretched_gaussian]) + + # common parameters to check the consistency of precision initialization + n_components, covariance_type, reg_covar, random_state = 2, "diag", 1e-6, 0 + + # execute the manual initialization to compute the precision matrix: + # - run KMeans to have an initial guess + # - estimate the covariance + # - compute the precision matrix from the estimated covariance + resp = np.zeros((X.shape[0], n_components)) + label = ( + KMeans(n_clusters=n_components, n_init=1, random_state=random_state) + .fit(X) + .labels_ + ) + resp[np.arange(X.shape[0]), label] = 1 + _, _, covariance = _estimate_gaussian_parameters( + X, resp, reg_covar=reg_covar, covariance_type=covariance_type + ) + precisions_init = 1 / covariance + + gm_with_init = GaussianMixture( + n_components=n_components, + covariance_type=covariance_type, + reg_covar=reg_covar, + precisions_init=precisions_init, + random_state=random_state, + ).fit(X) + + gm_without_init = GaussianMixture( + n_components=n_components, + covariance_type=covariance_type, + reg_covar=reg_covar, + random_state=random_state, + ).fit(X) + + assert gm_without_init.n_iter_ == gm_with_init.n_iter_ + assert_allclose( + gm_with_init.precisions_cholesky_, gm_without_init.precisions_cholesky_ + ) + + +def _generate_data(seed, n_samples, n_features, n_components): + """Randomly generate samples and responsibilities.""" + rs = np.random.RandomState(seed) + X = rs.random_sample((n_samples, n_features)) + resp = rs.random_sample((n_samples, n_components)) + resp /= resp.sum(axis=1)[:, np.newaxis] + return X, resp + + +def _calculate_precisions(X, resp, covariance_type): + """Calculate precision matrix of X and its Cholesky decomposition + for the given covariance type. + """ + reg_covar = 1e-6 + weights, means, covariances = _estimate_gaussian_parameters( + X, resp, reg_covar, covariance_type + ) + precisions_cholesky = _compute_precision_cholesky(covariances, covariance_type) + + _, n_components = resp.shape + # Instantiate a `GaussianMixture` model in order to use its + # `_set_parameters` method to return the `precisions_` and + # `precisions_cholesky_` from matching the `covariance_type` + # provided. + gmm = GaussianMixture(n_components=n_components, covariance_type=covariance_type) + params = (weights, means, covariances, precisions_cholesky) + gmm._set_parameters(params) + return gmm.precisions_, gmm.precisions_cholesky_ + + +@pytest.mark.parametrize("covariance_type", COVARIANCE_TYPE) +def test_gaussian_mixture_precisions_init(covariance_type, global_random_seed): + """Non-regression test for #26415.""" + + X, resp = _generate_data( + seed=global_random_seed, + n_samples=100, + n_features=3, + n_components=4, + ) + + precisions_init, desired_precisions_cholesky = _calculate_precisions( + X, resp, covariance_type + ) + gmm = GaussianMixture( + covariance_type=covariance_type, precisions_init=precisions_init + ) + gmm._initialize(X, resp) + actual_precisions_cholesky = gmm.precisions_cholesky_ + assert_allclose(actual_precisions_cholesky, desired_precisions_cholesky) + + +def test_gaussian_mixture_single_component_stable(): + """ + Non-regression test for #23032 ensuring 1-component GM works on only a + few samples. + """ + rng = np.random.RandomState(0) + X = rng.multivariate_normal(np.zeros(2), np.identity(2), size=3) + gm = GaussianMixture(n_components=1) + gm.fit(X).sample() + + +def test_gaussian_mixture_all_init_does_not_estimate_gaussian_parameters( + monkeypatch, + global_random_seed, +): + """When all init parameters are provided, the Gaussian parameters + are not estimated. + + Non-regression test for gh-26015. + """ + + mock = Mock(side_effect=_estimate_gaussian_parameters) + monkeypatch.setattr( + sklearn.mixture._gaussian_mixture, "_estimate_gaussian_parameters", mock + ) + + rng = np.random.RandomState(global_random_seed) + rand_data = RandomData(rng) + + gm = GaussianMixture( + n_components=rand_data.n_components, + weights_init=rand_data.weights, + means_init=rand_data.means, + precisions_init=rand_data.precisions["full"], + random_state=rng, + ) + gm.fit(rand_data.X["full"]) + # The initial gaussian parameters are not estimated. They are estimated for every + # m_step. + assert mock.call_count == gm.n_iter_ diff --git a/venv/lib/python3.10/site-packages/sklearn/mixture/tests/test_mixture.py b/venv/lib/python3.10/site-packages/sklearn/mixture/tests/test_mixture.py new file mode 100644 index 0000000000000000000000000000000000000000..f0ea3494f0e7d086968d3f9ff7eac0ecdcf51a96 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/mixture/tests/test_mixture.py @@ -0,0 +1,30 @@ +# Author: Guillaume Lemaitre +# License: BSD 3 clause + +import numpy as np +import pytest + +from sklearn.mixture import BayesianGaussianMixture, GaussianMixture + + +@pytest.mark.parametrize("estimator", [GaussianMixture(), BayesianGaussianMixture()]) +def test_gaussian_mixture_n_iter(estimator): + # check that n_iter is the number of iteration performed. + rng = np.random.RandomState(0) + X = rng.rand(10, 5) + max_iter = 1 + estimator.set_params(max_iter=max_iter) + estimator.fit(X) + assert estimator.n_iter_ == max_iter + + +@pytest.mark.parametrize("estimator", [GaussianMixture(), BayesianGaussianMixture()]) +def test_mixture_n_components_greater_than_n_samples_error(estimator): + """Check error when n_components <= n_samples""" + rng = np.random.RandomState(0) + X = rng.rand(10, 5) + estimator.set_params(n_components=12) + + msg = "Expected n_samples >= n_components" + with pytest.raises(ValueError, match=msg): + estimator.fit(X)