diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/covariance/__init__.py b/env-llmeval/lib/python3.10/site-packages/sklearn/covariance/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8fcf8c68444e5e97b45520110b7c9c5ac38a62c7 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/covariance/__init__.py @@ -0,0 +1,44 @@ +""" +The :mod:`sklearn.covariance` module includes methods and algorithms to +robustly estimate the covariance of features given a set of points. The +precision matrix defined as the inverse of the covariance is also estimated. +Covariance estimation is closely related to the theory of Gaussian Graphical +Models. +""" + +from ._elliptic_envelope import EllipticEnvelope +from ._empirical_covariance import ( + EmpiricalCovariance, + empirical_covariance, + log_likelihood, +) +from ._graph_lasso import GraphicalLasso, GraphicalLassoCV, graphical_lasso +from ._robust_covariance import MinCovDet, fast_mcd +from ._shrunk_covariance import ( + OAS, + LedoitWolf, + ShrunkCovariance, + ledoit_wolf, + ledoit_wolf_shrinkage, + oas, + shrunk_covariance, +) + +__all__ = [ + "EllipticEnvelope", + "EmpiricalCovariance", + "GraphicalLasso", + "GraphicalLassoCV", + "LedoitWolf", + "MinCovDet", + "OAS", + "ShrunkCovariance", + "empirical_covariance", + "fast_mcd", + "graphical_lasso", + "ledoit_wolf", + "ledoit_wolf_shrinkage", + "log_likelihood", + "oas", + "shrunk_covariance", +] diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/covariance/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/covariance/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..824dd6b82cce9f79d6920e48f260c541758e586c Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/covariance/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/covariance/__pycache__/_elliptic_envelope.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/covariance/__pycache__/_elliptic_envelope.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..00332edb49655d5de355b443c8b36c7c395fd603 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/covariance/__pycache__/_elliptic_envelope.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/covariance/__pycache__/_graph_lasso.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/covariance/__pycache__/_graph_lasso.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..38df668ff1a81caaa61e400d2dae5d80fe8a5137 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/covariance/__pycache__/_graph_lasso.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/covariance/__pycache__/_robust_covariance.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/covariance/__pycache__/_robust_covariance.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..466b13ac969f377c540f833d7a67e6e1ddf2d978 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/covariance/__pycache__/_robust_covariance.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/covariance/__pycache__/_shrunk_covariance.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/covariance/__pycache__/_shrunk_covariance.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..32965bcd57338fb0584a419edea028cc7cf4c068 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/covariance/__pycache__/_shrunk_covariance.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/covariance/_empirical_covariance.py b/env-llmeval/lib/python3.10/site-packages/sklearn/covariance/_empirical_covariance.py new file mode 100644 index 0000000000000000000000000000000000000000..db52bfa05ded30c7494e0ca3bbda4ddb6a37daa5 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/covariance/_empirical_covariance.py @@ -0,0 +1,364 @@ +""" +Maximum likelihood covariance estimator. + +""" + +# Author: Alexandre Gramfort +# Gael Varoquaux +# Virgile Fritsch +# +# License: BSD 3 clause + +# avoid division truncation +import warnings + +import numpy as np +from scipy import linalg + +from .. import config_context +from ..base import BaseEstimator, _fit_context +from ..metrics.pairwise import pairwise_distances +from ..utils import check_array +from ..utils._param_validation import validate_params +from ..utils.extmath import fast_logdet + + +@validate_params( + { + "emp_cov": [np.ndarray], + "precision": [np.ndarray], + }, + prefer_skip_nested_validation=True, +) +def log_likelihood(emp_cov, precision): + """Compute the sample mean of the log_likelihood under a covariance model. + + Computes the empirical expected log-likelihood, allowing for universal + comparison (beyond this software package), and accounts for normalization + terms and scaling. + + Parameters + ---------- + emp_cov : ndarray of shape (n_features, n_features) + Maximum Likelihood Estimator of covariance. + + precision : ndarray of shape (n_features, n_features) + The precision matrix of the covariance model to be tested. + + Returns + ------- + log_likelihood_ : float + Sample mean of the log-likelihood. + """ + p = precision.shape[0] + log_likelihood_ = -np.sum(emp_cov * precision) + fast_logdet(precision) + log_likelihood_ -= p * np.log(2 * np.pi) + log_likelihood_ /= 2.0 + return log_likelihood_ + + +@validate_params( + { + "X": ["array-like"], + "assume_centered": ["boolean"], + }, + prefer_skip_nested_validation=True, +) +def empirical_covariance(X, *, assume_centered=False): + """Compute the Maximum likelihood covariance estimator. + + Parameters + ---------- + X : ndarray of shape (n_samples, n_features) + Data from which to compute the covariance estimate. + + assume_centered : bool, default=False + If `True`, data will not be centered before computation. + Useful when working with data whose mean is almost, but not exactly + zero. + If `False`, data will be centered before computation. + + Returns + ------- + covariance : ndarray of shape (n_features, n_features) + Empirical covariance (Maximum Likelihood Estimator). + + Examples + -------- + >>> from sklearn.covariance import empirical_covariance + >>> X = [[1,1,1],[1,1,1],[1,1,1], + ... [0,0,0],[0,0,0],[0,0,0]] + >>> empirical_covariance(X) + array([[0.25, 0.25, 0.25], + [0.25, 0.25, 0.25], + [0.25, 0.25, 0.25]]) + """ + X = check_array(X, ensure_2d=False, force_all_finite=False) + + if X.ndim == 1: + X = np.reshape(X, (1, -1)) + + if X.shape[0] == 1: + warnings.warn( + "Only one sample available. You may want to reshape your data array" + ) + + if assume_centered: + covariance = np.dot(X.T, X) / X.shape[0] + else: + covariance = np.cov(X.T, bias=1) + + if covariance.ndim == 0: + covariance = np.array([[covariance]]) + return covariance + + +class EmpiricalCovariance(BaseEstimator): + """Maximum likelihood covariance estimator. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + store_precision : bool, default=True + Specifies if the estimated precision is stored. + + assume_centered : bool, default=False + If True, data are not centered before computation. + Useful when working with data whose mean is almost, but not exactly + zero. + If False (default), data are centered before computation. + + Attributes + ---------- + location_ : ndarray of shape (n_features,) + Estimated location, i.e. the estimated mean. + + covariance_ : ndarray of shape (n_features, n_features) + Estimated covariance matrix + + precision_ : ndarray of shape (n_features, n_features) + Estimated pseudo-inverse matrix. + (stored only if store_precision is True) + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + EllipticEnvelope : An object for detecting outliers in + a Gaussian distributed dataset. + GraphicalLasso : Sparse inverse covariance estimation + with an l1-penalized estimator. + LedoitWolf : LedoitWolf Estimator. + MinCovDet : Minimum Covariance Determinant + (robust estimator of covariance). + OAS : Oracle Approximating Shrinkage Estimator. + ShrunkCovariance : Covariance estimator with shrinkage. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.covariance import EmpiricalCovariance + >>> from sklearn.datasets import make_gaussian_quantiles + >>> real_cov = np.array([[.8, .3], + ... [.3, .4]]) + >>> rng = np.random.RandomState(0) + >>> X = rng.multivariate_normal(mean=[0, 0], + ... cov=real_cov, + ... size=500) + >>> cov = EmpiricalCovariance().fit(X) + >>> cov.covariance_ + array([[0.7569..., 0.2818...], + [0.2818..., 0.3928...]]) + >>> cov.location_ + array([0.0622..., 0.0193...]) + """ + + _parameter_constraints: dict = { + "store_precision": ["boolean"], + "assume_centered": ["boolean"], + } + + def __init__(self, *, store_precision=True, assume_centered=False): + self.store_precision = store_precision + self.assume_centered = assume_centered + + def _set_covariance(self, covariance): + """Saves the covariance and precision estimates + + Storage is done accordingly to `self.store_precision`. + Precision stored only if invertible. + + Parameters + ---------- + covariance : array-like of shape (n_features, n_features) + Estimated covariance matrix to be stored, and from which precision + is computed. + """ + covariance = check_array(covariance) + # set covariance + self.covariance_ = covariance + # set precision + if self.store_precision: + self.precision_ = linalg.pinvh(covariance, check_finite=False) + else: + self.precision_ = None + + def get_precision(self): + """Getter for the precision matrix. + + Returns + ------- + precision_ : array-like of shape (n_features, n_features) + The precision matrix associated to the current covariance object. + """ + if self.store_precision: + precision = self.precision_ + else: + precision = linalg.pinvh(self.covariance_, check_finite=False) + return precision + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """Fit the maximum likelihood covariance estimator to X. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data, where `n_samples` is the number of samples and + `n_features` is the number of features. + + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + self : object + Returns the instance itself. + """ + X = self._validate_data(X) + if self.assume_centered: + self.location_ = np.zeros(X.shape[1]) + else: + self.location_ = X.mean(0) + covariance = empirical_covariance(X, assume_centered=self.assume_centered) + self._set_covariance(covariance) + + return self + + def score(self, X_test, y=None): + """Compute the log-likelihood of `X_test` under the estimated Gaussian model. + + The Gaussian model is defined by its mean and covariance matrix which are + represented respectively by `self.location_` and `self.covariance_`. + + Parameters + ---------- + X_test : array-like of shape (n_samples, n_features) + Test data of which we compute the likelihood, where `n_samples` is + the number of samples and `n_features` is the number of features. + `X_test` is assumed to be drawn from the same distribution than + the data used in fit (including centering). + + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + res : float + The log-likelihood of `X_test` with `self.location_` and `self.covariance_` + as estimators of the Gaussian model mean and covariance matrix respectively. + """ + X_test = self._validate_data(X_test, reset=False) + # compute empirical covariance of the test set + test_cov = empirical_covariance(X_test - self.location_, assume_centered=True) + # compute log likelihood + res = log_likelihood(test_cov, self.get_precision()) + + return res + + def error_norm(self, comp_cov, norm="frobenius", scaling=True, squared=True): + """Compute the Mean Squared Error between two covariance estimators. + + Parameters + ---------- + comp_cov : array-like of shape (n_features, n_features) + The covariance to compare with. + + norm : {"frobenius", "spectral"}, default="frobenius" + The type of norm used to compute the error. Available error types: + - 'frobenius' (default): sqrt(tr(A^t.A)) + - 'spectral': sqrt(max(eigenvalues(A^t.A)) + where A is the error ``(comp_cov - self.covariance_)``. + + scaling : bool, default=True + If True (default), the squared error norm is divided by n_features. + If False, the squared error norm is not rescaled. + + squared : bool, default=True + Whether to compute the squared error norm or the error norm. + If True (default), the squared error norm is returned. + If False, the error norm is returned. + + Returns + ------- + result : float + The Mean Squared Error (in the sense of the Frobenius norm) between + `self` and `comp_cov` covariance estimators. + """ + # compute the error + error = comp_cov - self.covariance_ + # compute the error norm + if norm == "frobenius": + squared_norm = np.sum(error**2) + elif norm == "spectral": + squared_norm = np.amax(linalg.svdvals(np.dot(error.T, error))) + else: + raise NotImplementedError( + "Only spectral and frobenius norms are implemented" + ) + # optionally scale the error norm + if scaling: + squared_norm = squared_norm / error.shape[0] + # finally get either the squared norm or the norm + if squared: + result = squared_norm + else: + result = np.sqrt(squared_norm) + + return result + + def mahalanobis(self, X): + """Compute the squared Mahalanobis distances of given observations. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The observations, the Mahalanobis distances of the which we + compute. Observations are assumed to be drawn from the same + distribution than the data used in fit. + + Returns + ------- + dist : ndarray of shape (n_samples,) + Squared Mahalanobis distances of the observations. + """ + X = self._validate_data(X, reset=False) + + precision = self.get_precision() + with config_context(assume_finite=True): + # compute mahalanobis distances + dist = pairwise_distances( + X, self.location_[np.newaxis, :], metric="mahalanobis", VI=precision + ) + + return np.reshape(dist, (len(X),)) ** 2 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/covariance/_graph_lasso.py b/env-llmeval/lib/python3.10/site-packages/sklearn/covariance/_graph_lasso.py new file mode 100644 index 0000000000000000000000000000000000000000..fb40ffda162a4c0f31fc82c9124daff9bb4ecbb2 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/covariance/_graph_lasso.py @@ -0,0 +1,1110 @@ +"""GraphicalLasso: sparse inverse covariance estimation with an l1-penalized +estimator. +""" + +# Author: Gael Varoquaux +# License: BSD 3 clause +# Copyright: INRIA +import operator +import sys +import time +import warnings +from numbers import Integral, Real + +import numpy as np +from scipy import linalg + +from ..base import _fit_context +from ..exceptions import ConvergenceWarning + +# mypy error: Module 'sklearn.linear_model' has no attribute '_cd_fast' +from ..linear_model import _cd_fast as cd_fast # type: ignore +from ..linear_model import lars_path_gram +from ..model_selection import check_cv, cross_val_score +from ..utils._param_validation import Interval, StrOptions, validate_params +from ..utils.metadata_routing import _RoutingNotSupportedMixin +from ..utils.parallel import Parallel, delayed +from ..utils.validation import ( + _is_arraylike_not_scalar, + check_random_state, + check_scalar, +) +from . import EmpiricalCovariance, empirical_covariance, log_likelihood + + +# Helper functions to compute the objective and dual objective functions +# of the l1-penalized estimator +def _objective(mle, precision_, alpha): + """Evaluation of the graphical-lasso objective function + + the objective function is made of a shifted scaled version of the + normalized log-likelihood (i.e. its empirical mean over the samples) and a + penalisation term to promote sparsity + """ + p = precision_.shape[0] + cost = -2.0 * log_likelihood(mle, precision_) + p * np.log(2 * np.pi) + cost += alpha * (np.abs(precision_).sum() - np.abs(np.diag(precision_)).sum()) + return cost + + +def _dual_gap(emp_cov, precision_, alpha): + """Expression of the dual gap convergence criterion + + The specific definition is given in Duchi "Projected Subgradient Methods + for Learning Sparse Gaussians". + """ + gap = np.sum(emp_cov * precision_) + gap -= precision_.shape[0] + gap += alpha * (np.abs(precision_).sum() - np.abs(np.diag(precision_)).sum()) + return gap + + +# The g-lasso algorithm +def _graphical_lasso( + emp_cov, + alpha, + *, + cov_init=None, + mode="cd", + tol=1e-4, + enet_tol=1e-4, + max_iter=100, + verbose=False, + eps=np.finfo(np.float64).eps, +): + _, n_features = emp_cov.shape + if alpha == 0: + # Early return without regularization + precision_ = linalg.inv(emp_cov) + cost = -2.0 * log_likelihood(emp_cov, precision_) + cost += n_features * np.log(2 * np.pi) + d_gap = np.sum(emp_cov * precision_) - n_features + return emp_cov, precision_, (cost, d_gap), 0 + + if cov_init is None: + covariance_ = emp_cov.copy() + else: + covariance_ = cov_init.copy() + # As a trivial regularization (Tikhonov like), we scale down the + # off-diagonal coefficients of our starting point: This is needed, as + # in the cross-validation the cov_init can easily be + # ill-conditioned, and the CV loop blows. Beside, this takes + # conservative stand-point on the initial conditions, and it tends to + # make the convergence go faster. + covariance_ *= 0.95 + diagonal = emp_cov.flat[:: n_features + 1] + covariance_.flat[:: n_features + 1] = diagonal + precision_ = linalg.pinvh(covariance_) + + indices = np.arange(n_features) + i = 0 # initialize the counter to be robust to `max_iter=0` + costs = list() + # The different l1 regression solver have different numerical errors + if mode == "cd": + errors = dict(over="raise", invalid="ignore") + else: + errors = dict(invalid="raise") + try: + # be robust to the max_iter=0 edge case, see: + # https://github.com/scikit-learn/scikit-learn/issues/4134 + d_gap = np.inf + # set a sub_covariance buffer + sub_covariance = np.copy(covariance_[1:, 1:], order="C") + for i in range(max_iter): + for idx in range(n_features): + # To keep the contiguous matrix `sub_covariance` equal to + # covariance_[indices != idx].T[indices != idx] + # we only need to update 1 column and 1 line when idx changes + if idx > 0: + di = idx - 1 + sub_covariance[di] = covariance_[di][indices != idx] + sub_covariance[:, di] = covariance_[:, di][indices != idx] + else: + sub_covariance[:] = covariance_[1:, 1:] + row = emp_cov[idx, indices != idx] + with np.errstate(**errors): + if mode == "cd": + # Use coordinate descent + coefs = -( + precision_[indices != idx, idx] + / (precision_[idx, idx] + 1000 * eps) + ) + coefs, _, _, _ = cd_fast.enet_coordinate_descent_gram( + coefs, + alpha, + 0, + sub_covariance, + row, + row, + max_iter, + enet_tol, + check_random_state(None), + False, + ) + else: # mode == "lars" + _, _, coefs = lars_path_gram( + Xy=row, + Gram=sub_covariance, + n_samples=row.size, + alpha_min=alpha / (n_features - 1), + copy_Gram=True, + eps=eps, + method="lars", + return_path=False, + ) + # Update the precision matrix + precision_[idx, idx] = 1.0 / ( + covariance_[idx, idx] + - np.dot(covariance_[indices != idx, idx], coefs) + ) + precision_[indices != idx, idx] = -precision_[idx, idx] * coefs + precision_[idx, indices != idx] = -precision_[idx, idx] * coefs + coefs = np.dot(sub_covariance, coefs) + covariance_[idx, indices != idx] = coefs + covariance_[indices != idx, idx] = coefs + if not np.isfinite(precision_.sum()): + raise FloatingPointError( + "The system is too ill-conditioned for this solver" + ) + d_gap = _dual_gap(emp_cov, precision_, alpha) + cost = _objective(emp_cov, precision_, alpha) + if verbose: + print( + "[graphical_lasso] Iteration % 3i, cost % 3.2e, dual gap %.3e" + % (i, cost, d_gap) + ) + costs.append((cost, d_gap)) + if np.abs(d_gap) < tol: + break + if not np.isfinite(cost) and i > 0: + raise FloatingPointError( + "Non SPD result: the system is too ill-conditioned for this solver" + ) + else: + warnings.warn( + "graphical_lasso: did not converge after %i iteration: dual gap: %.3e" + % (max_iter, d_gap), + ConvergenceWarning, + ) + except FloatingPointError as e: + e.args = (e.args[0] + ". The system is too ill-conditioned for this solver",) + raise e + + return covariance_, precision_, costs, i + 1 + + +def alpha_max(emp_cov): + """Find the maximum alpha for which there are some non-zeros off-diagonal. + + Parameters + ---------- + emp_cov : ndarray of shape (n_features, n_features) + The sample covariance matrix. + + Notes + ----- + This results from the bound for the all the Lasso that are solved + in GraphicalLasso: each time, the row of cov corresponds to Xy. As the + bound for alpha is given by `max(abs(Xy))`, the result follows. + """ + A = np.copy(emp_cov) + A.flat[:: A.shape[0] + 1] = 0 + return np.max(np.abs(A)) + + +@validate_params( + { + "emp_cov": ["array-like"], + "cov_init": ["array-like", None], + "return_costs": ["boolean"], + "return_n_iter": ["boolean"], + }, + prefer_skip_nested_validation=False, +) +def graphical_lasso( + emp_cov, + alpha, + *, + cov_init=None, + mode="cd", + tol=1e-4, + enet_tol=1e-4, + max_iter=100, + verbose=False, + return_costs=False, + eps=np.finfo(np.float64).eps, + return_n_iter=False, +): + """L1-penalized covariance estimator. + + Read more in the :ref:`User Guide `. + + .. versionchanged:: v0.20 + graph_lasso has been renamed to graphical_lasso + + Parameters + ---------- + emp_cov : array-like of shape (n_features, n_features) + Empirical covariance from which to compute the covariance estimate. + + alpha : float + The regularization parameter: the higher alpha, the more + regularization, the sparser the inverse covariance. + Range is (0, inf]. + + cov_init : array of shape (n_features, n_features), default=None + The initial guess for the covariance. If None, then the empirical + covariance is used. + + .. deprecated:: 1.3 + `cov_init` is deprecated in 1.3 and will be removed in 1.5. + It currently has no effect. + + mode : {'cd', 'lars'}, default='cd' + The Lasso solver to use: coordinate descent or LARS. Use LARS for + very sparse underlying graphs, where p > n. Elsewhere prefer cd + which is more numerically stable. + + tol : float, default=1e-4 + The tolerance to declare convergence: if the dual gap goes below + this value, iterations are stopped. Range is (0, inf]. + + enet_tol : float, default=1e-4 + The tolerance for the elastic net solver used to calculate the descent + direction. This parameter controls the accuracy of the search direction + for a given column update, not of the overall parameter estimate. Only + used for mode='cd'. Range is (0, inf]. + + max_iter : int, default=100 + The maximum number of iterations. + + verbose : bool, default=False + If verbose is True, the objective function and dual gap are + printed at each iteration. + + return_costs : bool, default=False + If return_costs is True, the objective function and dual gap + at each iteration are returned. + + eps : float, default=eps + The machine-precision regularization in the computation of the + Cholesky diagonal factors. Increase this for very ill-conditioned + systems. Default is `np.finfo(np.float64).eps`. + + return_n_iter : bool, default=False + Whether or not to return the number of iterations. + + Returns + ------- + covariance : ndarray of shape (n_features, n_features) + The estimated covariance matrix. + + precision : ndarray of shape (n_features, n_features) + The estimated (sparse) precision matrix. + + costs : list of (objective, dual_gap) pairs + The list of values of the objective function and the dual gap at + each iteration. Returned only if return_costs is True. + + n_iter : int + Number of iterations. Returned only if `return_n_iter` is set to True. + + See Also + -------- + GraphicalLasso : Sparse inverse covariance estimation + with an l1-penalized estimator. + GraphicalLassoCV : Sparse inverse covariance with + cross-validated choice of the l1 penalty. + + Notes + ----- + The algorithm employed to solve this problem is the GLasso algorithm, + from the Friedman 2008 Biostatistics paper. It is the same algorithm + as in the R `glasso` package. + + One possible difference with the `glasso` R package is that the + diagonal coefficients are not penalized. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.datasets import make_sparse_spd_matrix + >>> from sklearn.covariance import empirical_covariance, graphical_lasso + >>> true_cov = make_sparse_spd_matrix(n_dim=3,random_state=42) + >>> rng = np.random.RandomState(42) + >>> X = rng.multivariate_normal(mean=np.zeros(3), cov=true_cov, size=3) + >>> emp_cov = empirical_covariance(X, assume_centered=True) + >>> emp_cov, _ = graphical_lasso(emp_cov, alpha=0.05) + >>> emp_cov + array([[ 1.68..., 0.21..., -0.20...], + [ 0.21..., 0.22..., -0.08...], + [-0.20..., -0.08..., 0.23...]]) + """ + + if cov_init is not None: + warnings.warn( + ( + "The cov_init parameter is deprecated in 1.3 and will be removed in " + "1.5. It does not have any effect." + ), + FutureWarning, + ) + + model = GraphicalLasso( + alpha=alpha, + mode=mode, + covariance="precomputed", + tol=tol, + enet_tol=enet_tol, + max_iter=max_iter, + verbose=verbose, + eps=eps, + assume_centered=True, + ).fit(emp_cov) + + output = [model.covariance_, model.precision_] + if return_costs: + output.append(model.costs_) + if return_n_iter: + output.append(model.n_iter_) + return tuple(output) + + +class BaseGraphicalLasso(EmpiricalCovariance): + _parameter_constraints: dict = { + **EmpiricalCovariance._parameter_constraints, + "tol": [Interval(Real, 0, None, closed="right")], + "enet_tol": [Interval(Real, 0, None, closed="right")], + "max_iter": [Interval(Integral, 0, None, closed="left")], + "mode": [StrOptions({"cd", "lars"})], + "verbose": ["verbose"], + "eps": [Interval(Real, 0, None, closed="both")], + } + _parameter_constraints.pop("store_precision") + + def __init__( + self, + tol=1e-4, + enet_tol=1e-4, + max_iter=100, + mode="cd", + verbose=False, + eps=np.finfo(np.float64).eps, + assume_centered=False, + ): + super().__init__(assume_centered=assume_centered) + self.tol = tol + self.enet_tol = enet_tol + self.max_iter = max_iter + self.mode = mode + self.verbose = verbose + self.eps = eps + + +class GraphicalLasso(BaseGraphicalLasso): + """Sparse inverse covariance estimation with an l1-penalized estimator. + + Read more in the :ref:`User Guide `. + + .. versionchanged:: v0.20 + GraphLasso has been renamed to GraphicalLasso + + Parameters + ---------- + alpha : float, default=0.01 + The regularization parameter: the higher alpha, the more + regularization, the sparser the inverse covariance. + Range is (0, inf]. + + mode : {'cd', 'lars'}, default='cd' + The Lasso solver to use: coordinate descent or LARS. Use LARS for + very sparse underlying graphs, where p > n. Elsewhere prefer cd + which is more numerically stable. + + covariance : "precomputed", default=None + If covariance is "precomputed", the input data in `fit` is assumed + to be the covariance matrix. If `None`, the empirical covariance + is estimated from the data `X`. + + .. versionadded:: 1.3 + + tol : float, default=1e-4 + The tolerance to declare convergence: if the dual gap goes below + this value, iterations are stopped. Range is (0, inf]. + + enet_tol : float, default=1e-4 + The tolerance for the elastic net solver used to calculate the descent + direction. This parameter controls the accuracy of the search direction + for a given column update, not of the overall parameter estimate. Only + used for mode='cd'. Range is (0, inf]. + + max_iter : int, default=100 + The maximum number of iterations. + + verbose : bool, default=False + If verbose is True, the objective function and dual gap are + plotted at each iteration. + + eps : float, default=eps + The machine-precision regularization in the computation of the + Cholesky diagonal factors. Increase this for very ill-conditioned + systems. Default is `np.finfo(np.float64).eps`. + + .. versionadded:: 1.3 + + assume_centered : bool, default=False + If True, data are not centered before computation. + Useful when working with data whose mean is almost, but not exactly + zero. + If False, data are centered before computation. + + Attributes + ---------- + location_ : ndarray of shape (n_features,) + Estimated location, i.e. the estimated mean. + + covariance_ : ndarray of shape (n_features, n_features) + Estimated covariance matrix + + precision_ : ndarray of shape (n_features, n_features) + Estimated pseudo inverse matrix. + + n_iter_ : int + Number of iterations run. + + costs_ : list of (objective, dual_gap) pairs + The list of values of the objective function and the dual gap at + each iteration. Returned only if return_costs is True. + + .. versionadded:: 1.3 + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + graphical_lasso : L1-penalized covariance estimator. + GraphicalLassoCV : Sparse inverse covariance with + cross-validated choice of the l1 penalty. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.covariance import GraphicalLasso + >>> true_cov = np.array([[0.8, 0.0, 0.2, 0.0], + ... [0.0, 0.4, 0.0, 0.0], + ... [0.2, 0.0, 0.3, 0.1], + ... [0.0, 0.0, 0.1, 0.7]]) + >>> np.random.seed(0) + >>> X = np.random.multivariate_normal(mean=[0, 0, 0, 0], + ... cov=true_cov, + ... size=200) + >>> cov = GraphicalLasso().fit(X) + >>> np.around(cov.covariance_, decimals=3) + array([[0.816, 0.049, 0.218, 0.019], + [0.049, 0.364, 0.017, 0.034], + [0.218, 0.017, 0.322, 0.093], + [0.019, 0.034, 0.093, 0.69 ]]) + >>> np.around(cov.location_, decimals=3) + array([0.073, 0.04 , 0.038, 0.143]) + """ + + _parameter_constraints: dict = { + **BaseGraphicalLasso._parameter_constraints, + "alpha": [Interval(Real, 0, None, closed="both")], + "covariance": [StrOptions({"precomputed"}), None], + } + + def __init__( + self, + alpha=0.01, + *, + mode="cd", + covariance=None, + tol=1e-4, + enet_tol=1e-4, + max_iter=100, + verbose=False, + eps=np.finfo(np.float64).eps, + assume_centered=False, + ): + super().__init__( + tol=tol, + enet_tol=enet_tol, + max_iter=max_iter, + mode=mode, + verbose=verbose, + eps=eps, + assume_centered=assume_centered, + ) + self.alpha = alpha + self.covariance = covariance + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """Fit the GraphicalLasso model to X. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Data from which to compute the covariance estimate. + + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + self : object + Returns the instance itself. + """ + # Covariance does not make sense for a single feature + X = self._validate_data(X, ensure_min_features=2, ensure_min_samples=2) + + if self.covariance == "precomputed": + emp_cov = X.copy() + self.location_ = np.zeros(X.shape[1]) + else: + emp_cov = empirical_covariance(X, assume_centered=self.assume_centered) + if self.assume_centered: + self.location_ = np.zeros(X.shape[1]) + else: + self.location_ = X.mean(0) + + self.covariance_, self.precision_, self.costs_, self.n_iter_ = _graphical_lasso( + emp_cov, + alpha=self.alpha, + cov_init=None, + mode=self.mode, + tol=self.tol, + enet_tol=self.enet_tol, + max_iter=self.max_iter, + verbose=self.verbose, + eps=self.eps, + ) + return self + + +# Cross-validation with GraphicalLasso +def graphical_lasso_path( + X, + alphas, + cov_init=None, + X_test=None, + mode="cd", + tol=1e-4, + enet_tol=1e-4, + max_iter=100, + verbose=False, + eps=np.finfo(np.float64).eps, +): + """l1-penalized covariance estimator along a path of decreasing alphas + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + X : ndarray of shape (n_samples, n_features) + Data from which to compute the covariance estimate. + + alphas : array-like of shape (n_alphas,) + The list of regularization parameters, decreasing order. + + cov_init : array of shape (n_features, n_features), default=None + The initial guess for the covariance. + + X_test : array of shape (n_test_samples, n_features), default=None + Optional test matrix to measure generalisation error. + + mode : {'cd', 'lars'}, default='cd' + The Lasso solver to use: coordinate descent or LARS. Use LARS for + very sparse underlying graphs, where p > n. Elsewhere prefer cd + which is more numerically stable. + + tol : float, default=1e-4 + The tolerance to declare convergence: if the dual gap goes below + this value, iterations are stopped. The tolerance must be a positive + number. + + enet_tol : float, default=1e-4 + The tolerance for the elastic net solver used to calculate the descent + direction. This parameter controls the accuracy of the search direction + for a given column update, not of the overall parameter estimate. Only + used for mode='cd'. The tolerance must be a positive number. + + max_iter : int, default=100 + The maximum number of iterations. This parameter should be a strictly + positive integer. + + verbose : int or bool, default=False + The higher the verbosity flag, the more information is printed + during the fitting. + + eps : float, default=eps + The machine-precision regularization in the computation of the + Cholesky diagonal factors. Increase this for very ill-conditioned + systems. Default is `np.finfo(np.float64).eps`. + + .. versionadded:: 1.3 + + Returns + ------- + covariances_ : list of shape (n_alphas,) of ndarray of shape \ + (n_features, n_features) + The estimated covariance matrices. + + precisions_ : list of shape (n_alphas,) of ndarray of shape \ + (n_features, n_features) + The estimated (sparse) precision matrices. + + scores_ : list of shape (n_alphas,), dtype=float + The generalisation error (log-likelihood) on the test data. + Returned only if test data is passed. + """ + inner_verbose = max(0, verbose - 1) + emp_cov = empirical_covariance(X) + if cov_init is None: + covariance_ = emp_cov.copy() + else: + covariance_ = cov_init + covariances_ = list() + precisions_ = list() + scores_ = list() + if X_test is not None: + test_emp_cov = empirical_covariance(X_test) + + for alpha in alphas: + try: + # Capture the errors, and move on + covariance_, precision_, _, _ = _graphical_lasso( + emp_cov, + alpha=alpha, + cov_init=covariance_, + mode=mode, + tol=tol, + enet_tol=enet_tol, + max_iter=max_iter, + verbose=inner_verbose, + eps=eps, + ) + covariances_.append(covariance_) + precisions_.append(precision_) + if X_test is not None: + this_score = log_likelihood(test_emp_cov, precision_) + except FloatingPointError: + this_score = -np.inf + covariances_.append(np.nan) + precisions_.append(np.nan) + if X_test is not None: + if not np.isfinite(this_score): + this_score = -np.inf + scores_.append(this_score) + if verbose == 1: + sys.stderr.write(".") + elif verbose > 1: + if X_test is not None: + print( + "[graphical_lasso_path] alpha: %.2e, score: %.2e" + % (alpha, this_score) + ) + else: + print("[graphical_lasso_path] alpha: %.2e" % alpha) + if X_test is not None: + return covariances_, precisions_, scores_ + return covariances_, precisions_ + + +class GraphicalLassoCV(_RoutingNotSupportedMixin, BaseGraphicalLasso): + """Sparse inverse covariance w/ cross-validated choice of the l1 penalty. + + See glossary entry for :term:`cross-validation estimator`. + + Read more in the :ref:`User Guide `. + + .. versionchanged:: v0.20 + GraphLassoCV has been renamed to GraphicalLassoCV + + Parameters + ---------- + alphas : int or array-like of shape (n_alphas,), dtype=float, default=4 + If an integer is given, it fixes the number of points on the + grids of alpha to be used. If a list is given, it gives the + grid to be used. See the notes in the class docstring for + more details. Range is [1, inf) for an integer. + Range is (0, inf] for an array-like of floats. + + n_refinements : int, default=4 + The number of times the grid is refined. Not used if explicit + values of alphas are passed. Range is [1, inf). + + cv : int, cross-validation generator or iterable, default=None + Determines the cross-validation splitting strategy. + Possible inputs for cv are: + + - None, to use the default 5-fold cross-validation, + - integer, to specify the number of folds. + - :term:`CV splitter`, + - An iterable yielding (train, test) splits as arrays of indices. + + For integer/None inputs :class:`~sklearn.model_selection.KFold` is used. + + Refer :ref:`User Guide ` for the various + cross-validation strategies that can be used here. + + .. versionchanged:: 0.20 + ``cv`` default value if None changed from 3-fold to 5-fold. + + tol : float, default=1e-4 + The tolerance to declare convergence: if the dual gap goes below + this value, iterations are stopped. Range is (0, inf]. + + enet_tol : float, default=1e-4 + The tolerance for the elastic net solver used to calculate the descent + direction. This parameter controls the accuracy of the search direction + for a given column update, not of the overall parameter estimate. Only + used for mode='cd'. Range is (0, inf]. + + max_iter : int, default=100 + Maximum number of iterations. + + mode : {'cd', 'lars'}, default='cd' + The Lasso solver to use: coordinate descent or LARS. Use LARS for + very sparse underlying graphs, where number of features is greater + than number of samples. Elsewhere prefer cd which is more numerically + stable. + + n_jobs : int, default=None + Number of jobs to run in parallel. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + .. versionchanged:: v0.20 + `n_jobs` default changed from 1 to None + + verbose : bool, default=False + If verbose is True, the objective function and duality gap are + printed at each iteration. + + eps : float, default=eps + The machine-precision regularization in the computation of the + Cholesky diagonal factors. Increase this for very ill-conditioned + systems. Default is `np.finfo(np.float64).eps`. + + .. versionadded:: 1.3 + + assume_centered : bool, default=False + If True, data are not centered before computation. + Useful when working with data whose mean is almost, but not exactly + zero. + If False, data are centered before computation. + + Attributes + ---------- + location_ : ndarray of shape (n_features,) + Estimated location, i.e. the estimated mean. + + covariance_ : ndarray of shape (n_features, n_features) + Estimated covariance matrix. + + precision_ : ndarray of shape (n_features, n_features) + Estimated precision matrix (inverse covariance). + + costs_ : list of (objective, dual_gap) pairs + The list of values of the objective function and the dual gap at + each iteration. Returned only if return_costs is True. + + .. versionadded:: 1.3 + + alpha_ : float + Penalization parameter selected. + + cv_results_ : dict of ndarrays + A dict with keys: + + alphas : ndarray of shape (n_alphas,) + All penalization parameters explored. + + split(k)_test_score : ndarray of shape (n_alphas,) + Log-likelihood score on left-out data across (k)th fold. + + .. versionadded:: 1.0 + + mean_test_score : ndarray of shape (n_alphas,) + Mean of scores over the folds. + + .. versionadded:: 1.0 + + std_test_score : ndarray of shape (n_alphas,) + Standard deviation of scores over the folds. + + .. versionadded:: 1.0 + + n_iter_ : int + Number of iterations run for the optimal alpha. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + graphical_lasso : L1-penalized covariance estimator. + GraphicalLasso : Sparse inverse covariance estimation + with an l1-penalized estimator. + + Notes + ----- + The search for the optimal penalization parameter (`alpha`) is done on an + iteratively refined grid: first the cross-validated scores on a grid are + computed, then a new refined grid is centered around the maximum, and so + on. + + One of the challenges which is faced here is that the solvers can + fail to converge to a well-conditioned estimate. The corresponding + values of `alpha` then come out as missing values, but the optimum may + be close to these missing values. + + In `fit`, once the best parameter `alpha` is found through + cross-validation, the model is fit again using the entire training set. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.covariance import GraphicalLassoCV + >>> true_cov = np.array([[0.8, 0.0, 0.2, 0.0], + ... [0.0, 0.4, 0.0, 0.0], + ... [0.2, 0.0, 0.3, 0.1], + ... [0.0, 0.0, 0.1, 0.7]]) + >>> np.random.seed(0) + >>> X = np.random.multivariate_normal(mean=[0, 0, 0, 0], + ... cov=true_cov, + ... size=200) + >>> cov = GraphicalLassoCV().fit(X) + >>> np.around(cov.covariance_, decimals=3) + array([[0.816, 0.051, 0.22 , 0.017], + [0.051, 0.364, 0.018, 0.036], + [0.22 , 0.018, 0.322, 0.094], + [0.017, 0.036, 0.094, 0.69 ]]) + >>> np.around(cov.location_, decimals=3) + array([0.073, 0.04 , 0.038, 0.143]) + """ + + _parameter_constraints: dict = { + **BaseGraphicalLasso._parameter_constraints, + "alphas": [Interval(Integral, 0, None, closed="left"), "array-like"], + "n_refinements": [Interval(Integral, 1, None, closed="left")], + "cv": ["cv_object"], + "n_jobs": [Integral, None], + } + + def __init__( + self, + *, + alphas=4, + n_refinements=4, + cv=None, + tol=1e-4, + enet_tol=1e-4, + max_iter=100, + mode="cd", + n_jobs=None, + verbose=False, + eps=np.finfo(np.float64).eps, + assume_centered=False, + ): + super().__init__( + tol=tol, + enet_tol=enet_tol, + max_iter=max_iter, + mode=mode, + verbose=verbose, + eps=eps, + assume_centered=assume_centered, + ) + self.alphas = alphas + self.n_refinements = n_refinements + self.cv = cv + self.n_jobs = n_jobs + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """Fit the GraphicalLasso covariance model to X. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Data from which to compute the covariance estimate. + + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + self : object + Returns the instance itself. + """ + # Covariance does not make sense for a single feature + X = self._validate_data(X, ensure_min_features=2) + if self.assume_centered: + self.location_ = np.zeros(X.shape[1]) + else: + self.location_ = X.mean(0) + emp_cov = empirical_covariance(X, assume_centered=self.assume_centered) + + cv = check_cv(self.cv, y, classifier=False) + + # List of (alpha, scores, covs) + path = list() + n_alphas = self.alphas + inner_verbose = max(0, self.verbose - 1) + + if _is_arraylike_not_scalar(n_alphas): + for alpha in self.alphas: + check_scalar( + alpha, + "alpha", + Real, + min_val=0, + max_val=np.inf, + include_boundaries="right", + ) + alphas = self.alphas + n_refinements = 1 + else: + n_refinements = self.n_refinements + alpha_1 = alpha_max(emp_cov) + alpha_0 = 1e-2 * alpha_1 + alphas = np.logspace(np.log10(alpha_0), np.log10(alpha_1), n_alphas)[::-1] + + t0 = time.time() + for i in range(n_refinements): + with warnings.catch_warnings(): + # No need to see the convergence warnings on this grid: + # they will always be points that will not converge + # during the cross-validation + warnings.simplefilter("ignore", ConvergenceWarning) + # Compute the cross-validated loss on the current grid + + # NOTE: Warm-restarting graphical_lasso_path has been tried, + # and this did not allow to gain anything + # (same execution time with or without). + this_path = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)( + delayed(graphical_lasso_path)( + X[train], + alphas=alphas, + X_test=X[test], + mode=self.mode, + tol=self.tol, + enet_tol=self.enet_tol, + max_iter=int(0.1 * self.max_iter), + verbose=inner_verbose, + eps=self.eps, + ) + for train, test in cv.split(X, y) + ) + + # Little danse to transform the list in what we need + covs, _, scores = zip(*this_path) + covs = zip(*covs) + scores = zip(*scores) + path.extend(zip(alphas, scores, covs)) + path = sorted(path, key=operator.itemgetter(0), reverse=True) + + # Find the maximum (avoid using built in 'max' function to + # have a fully-reproducible selection of the smallest alpha + # in case of equality) + best_score = -np.inf + last_finite_idx = 0 + for index, (alpha, scores, _) in enumerate(path): + this_score = np.mean(scores) + if this_score >= 0.1 / np.finfo(np.float64).eps: + this_score = np.nan + if np.isfinite(this_score): + last_finite_idx = index + if this_score >= best_score: + best_score = this_score + best_index = index + + # Refine the grid + if best_index == 0: + # We do not need to go back: we have chosen + # the highest value of alpha for which there are + # non-zero coefficients + alpha_1 = path[0][0] + alpha_0 = path[1][0] + elif best_index == last_finite_idx and not best_index == len(path) - 1: + # We have non-converged models on the upper bound of the + # grid, we need to refine the grid there + alpha_1 = path[best_index][0] + alpha_0 = path[best_index + 1][0] + elif best_index == len(path) - 1: + alpha_1 = path[best_index][0] + alpha_0 = 0.01 * path[best_index][0] + else: + alpha_1 = path[best_index - 1][0] + alpha_0 = path[best_index + 1][0] + + if not _is_arraylike_not_scalar(n_alphas): + alphas = np.logspace(np.log10(alpha_1), np.log10(alpha_0), n_alphas + 2) + alphas = alphas[1:-1] + + if self.verbose and n_refinements > 1: + print( + "[GraphicalLassoCV] Done refinement % 2i out of %i: % 3is" + % (i + 1, n_refinements, time.time() - t0) + ) + + path = list(zip(*path)) + grid_scores = list(path[1]) + alphas = list(path[0]) + # Finally, compute the score with alpha = 0 + alphas.append(0) + grid_scores.append( + cross_val_score( + EmpiricalCovariance(), + X, + cv=cv, + n_jobs=self.n_jobs, + verbose=inner_verbose, + ) + ) + grid_scores = np.array(grid_scores) + + self.cv_results_ = {"alphas": np.array(alphas)} + + for i in range(grid_scores.shape[1]): + self.cv_results_[f"split{i}_test_score"] = grid_scores[:, i] + + self.cv_results_["mean_test_score"] = np.mean(grid_scores, axis=1) + self.cv_results_["std_test_score"] = np.std(grid_scores, axis=1) + + best_alpha = alphas[best_index] + self.alpha_ = best_alpha + + # Finally fit the model with the selected alpha + self.covariance_, self.precision_, self.costs_, self.n_iter_ = _graphical_lasso( + emp_cov, + alpha=best_alpha, + mode=self.mode, + tol=self.tol, + enet_tol=self.enet_tol, + max_iter=self.max_iter, + verbose=inner_verbose, + eps=self.eps, + ) + return self diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/covariance/_robust_covariance.py b/env-llmeval/lib/python3.10/site-packages/sklearn/covariance/_robust_covariance.py new file mode 100644 index 0000000000000000000000000000000000000000..c90e855ca67681984a6bc4186ca1cb2e7b9fff59 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/covariance/_robust_covariance.py @@ -0,0 +1,868 @@ +""" +Robust location and covariance estimators. + +Here are implemented estimators that are resistant to outliers. + +""" +# Author: Virgile Fritsch +# +# License: BSD 3 clause + +import warnings +from numbers import Integral, Real + +import numpy as np +from scipy import linalg +from scipy.stats import chi2 + +from ..base import _fit_context +from ..utils import check_array, check_random_state +from ..utils._param_validation import Interval +from ..utils.extmath import fast_logdet +from ._empirical_covariance import EmpiricalCovariance, empirical_covariance + + +# Minimum Covariance Determinant +# Implementing of an algorithm by Rousseeuw & Van Driessen described in +# (A Fast Algorithm for the Minimum Covariance Determinant Estimator, +# 1999, American Statistical Association and the American Society +# for Quality, TECHNOMETRICS) +# XXX Is this really a public function? It's not listed in the docs or +# exported by sklearn.covariance. Deprecate? +def c_step( + X, + n_support, + remaining_iterations=30, + initial_estimates=None, + verbose=False, + cov_computation_method=empirical_covariance, + random_state=None, +): + """C_step procedure described in [Rouseeuw1984]_ aiming at computing MCD. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Data set in which we look for the n_support observations whose + scatter matrix has minimum determinant. + + n_support : int + Number of observations to compute the robust estimates of location + and covariance from. This parameter must be greater than + `n_samples / 2`. + + remaining_iterations : int, default=30 + Number of iterations to perform. + According to [Rouseeuw1999]_, two iterations are sufficient to get + close to the minimum, and we never need more than 30 to reach + convergence. + + initial_estimates : tuple of shape (2,), default=None + Initial estimates of location and shape from which to run the c_step + procedure: + - initial_estimates[0]: an initial location estimate + - initial_estimates[1]: an initial covariance estimate + + verbose : bool, default=False + Verbose mode. + + cov_computation_method : callable, \ + default=:func:`sklearn.covariance.empirical_covariance` + The function which will be used to compute the covariance. + Must return array of shape (n_features, n_features). + + random_state : int, RandomState instance or None, default=None + Determines the pseudo random number generator for shuffling the data. + Pass an int for reproducible results across multiple function calls. + See :term:`Glossary `. + + Returns + ------- + location : ndarray of shape (n_features,) + Robust location estimates. + + covariance : ndarray of shape (n_features, n_features) + Robust covariance estimates. + + support : ndarray of shape (n_samples,) + A mask for the `n_support` observations whose scatter matrix has + minimum determinant. + + References + ---------- + .. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance Determinant + Estimator, 1999, American Statistical Association and the American + Society for Quality, TECHNOMETRICS + """ + X = np.asarray(X) + random_state = check_random_state(random_state) + return _c_step( + X, + n_support, + remaining_iterations=remaining_iterations, + initial_estimates=initial_estimates, + verbose=verbose, + cov_computation_method=cov_computation_method, + random_state=random_state, + ) + + +def _c_step( + X, + n_support, + random_state, + remaining_iterations=30, + initial_estimates=None, + verbose=False, + cov_computation_method=empirical_covariance, +): + n_samples, n_features = X.shape + dist = np.inf + + # Initialisation + support = np.zeros(n_samples, dtype=bool) + if initial_estimates is None: + # compute initial robust estimates from a random subset + support[random_state.permutation(n_samples)[:n_support]] = True + else: + # get initial robust estimates from the function parameters + location = initial_estimates[0] + covariance = initial_estimates[1] + # run a special iteration for that case (to get an initial support) + precision = linalg.pinvh(covariance) + X_centered = X - location + dist = (np.dot(X_centered, precision) * X_centered).sum(1) + # compute new estimates + support[np.argsort(dist)[:n_support]] = True + + X_support = X[support] + location = X_support.mean(0) + covariance = cov_computation_method(X_support) + + # Iterative procedure for Minimum Covariance Determinant computation + det = fast_logdet(covariance) + # If the data already has singular covariance, calculate the precision, + # as the loop below will not be entered. + if np.isinf(det): + precision = linalg.pinvh(covariance) + + previous_det = np.inf + while det < previous_det and remaining_iterations > 0 and not np.isinf(det): + # save old estimates values + previous_location = location + previous_covariance = covariance + previous_det = det + previous_support = support + # compute a new support from the full data set mahalanobis distances + precision = linalg.pinvh(covariance) + X_centered = X - location + dist = (np.dot(X_centered, precision) * X_centered).sum(axis=1) + # compute new estimates + support = np.zeros(n_samples, dtype=bool) + support[np.argsort(dist)[:n_support]] = True + X_support = X[support] + location = X_support.mean(axis=0) + covariance = cov_computation_method(X_support) + det = fast_logdet(covariance) + # update remaining iterations for early stopping + remaining_iterations -= 1 + + previous_dist = dist + dist = (np.dot(X - location, precision) * (X - location)).sum(axis=1) + # Check if best fit already found (det => 0, logdet => -inf) + if np.isinf(det): + results = location, covariance, det, support, dist + # Check convergence + if np.allclose(det, previous_det): + # c_step procedure converged + if verbose: + print( + "Optimal couple (location, covariance) found before" + " ending iterations (%d left)" % (remaining_iterations) + ) + results = location, covariance, det, support, dist + elif det > previous_det: + # determinant has increased (should not happen) + warnings.warn( + "Determinant has increased; this should not happen: " + "log(det) > log(previous_det) (%.15f > %.15f). " + "You may want to try with a higher value of " + "support_fraction (current value: %.3f)." + % (det, previous_det, n_support / n_samples), + RuntimeWarning, + ) + results = ( + previous_location, + previous_covariance, + previous_det, + previous_support, + previous_dist, + ) + + # Check early stopping + if remaining_iterations == 0: + if verbose: + print("Maximum number of iterations reached") + results = location, covariance, det, support, dist + + return results + + +def select_candidates( + X, + n_support, + n_trials, + select=1, + n_iter=30, + verbose=False, + cov_computation_method=empirical_covariance, + random_state=None, +): + """Finds the best pure subset of observations to compute MCD from it. + + The purpose of this function is to find the best sets of n_support + observations with respect to a minimization of their covariance + matrix determinant. Equivalently, it removes n_samples-n_support + observations to construct what we call a pure data set (i.e. not + containing outliers). The list of the observations of the pure + data set is referred to as the `support`. + + Starting from a random support, the pure data set is found by the + c_step procedure introduced by Rousseeuw and Van Driessen in + [RV]_. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Data (sub)set in which we look for the n_support purest observations. + + n_support : int + The number of samples the pure data set must contain. + This parameter must be in the range `[(n + p + 1)/2] < n_support < n`. + + n_trials : int or tuple of shape (2,) + Number of different initial sets of observations from which to + run the algorithm. This parameter should be a strictly positive + integer. + Instead of giving a number of trials to perform, one can provide a + list of initial estimates that will be used to iteratively run + c_step procedures. In this case: + - n_trials[0]: array-like, shape (n_trials, n_features) + is the list of `n_trials` initial location estimates + - n_trials[1]: array-like, shape (n_trials, n_features, n_features) + is the list of `n_trials` initial covariances estimates + + select : int, default=1 + Number of best candidates results to return. This parameter must be + a strictly positive integer. + + n_iter : int, default=30 + Maximum number of iterations for the c_step procedure. + (2 is enough to be close to the final solution. "Never" exceeds 20). + This parameter must be a strictly positive integer. + + verbose : bool, default=False + Control the output verbosity. + + cov_computation_method : callable, \ + default=:func:`sklearn.covariance.empirical_covariance` + The function which will be used to compute the covariance. + Must return an array of shape (n_features, n_features). + + random_state : int, RandomState instance or None, default=None + Determines the pseudo random number generator for shuffling the data. + Pass an int for reproducible results across multiple function calls. + See :term:`Glossary `. + + See Also + --------- + c_step + + Returns + ------- + best_locations : ndarray of shape (select, n_features) + The `select` location estimates computed from the `select` best + supports found in the data set (`X`). + + best_covariances : ndarray of shape (select, n_features, n_features) + The `select` covariance estimates computed from the `select` + best supports found in the data set (`X`). + + best_supports : ndarray of shape (select, n_samples) + The `select` best supports found in the data set (`X`). + + References + ---------- + .. [RV] A Fast Algorithm for the Minimum Covariance Determinant + Estimator, 1999, American Statistical Association and the American + Society for Quality, TECHNOMETRICS + """ + random_state = check_random_state(random_state) + + if isinstance(n_trials, Integral): + run_from_estimates = False + elif isinstance(n_trials, tuple): + run_from_estimates = True + estimates_list = n_trials + n_trials = estimates_list[0].shape[0] + else: + raise TypeError( + "Invalid 'n_trials' parameter, expected tuple or integer, got %s (%s)" + % (n_trials, type(n_trials)) + ) + + # compute `n_trials` location and shape estimates candidates in the subset + all_estimates = [] + if not run_from_estimates: + # perform `n_trials` computations from random initial supports + for j in range(n_trials): + all_estimates.append( + _c_step( + X, + n_support, + remaining_iterations=n_iter, + verbose=verbose, + cov_computation_method=cov_computation_method, + random_state=random_state, + ) + ) + else: + # perform computations from every given initial estimates + for j in range(n_trials): + initial_estimates = (estimates_list[0][j], estimates_list[1][j]) + all_estimates.append( + _c_step( + X, + n_support, + remaining_iterations=n_iter, + initial_estimates=initial_estimates, + verbose=verbose, + cov_computation_method=cov_computation_method, + random_state=random_state, + ) + ) + all_locs_sub, all_covs_sub, all_dets_sub, all_supports_sub, all_ds_sub = zip( + *all_estimates + ) + # find the `n_best` best results among the `n_trials` ones + index_best = np.argsort(all_dets_sub)[:select] + best_locations = np.asarray(all_locs_sub)[index_best] + best_covariances = np.asarray(all_covs_sub)[index_best] + best_supports = np.asarray(all_supports_sub)[index_best] + best_ds = np.asarray(all_ds_sub)[index_best] + + return best_locations, best_covariances, best_supports, best_ds + + +def fast_mcd( + X, + support_fraction=None, + cov_computation_method=empirical_covariance, + random_state=None, +): + """Estimate the Minimum Covariance Determinant matrix. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The data matrix, with p features and n samples. + + support_fraction : float, default=None + The proportion of points to be included in the support of the raw + MCD estimate. Default is `None`, which implies that the minimum + value of `support_fraction` will be used within the algorithm: + `(n_samples + n_features + 1) / 2 * n_samples`. This parameter must be + in the range (0, 1). + + cov_computation_method : callable, \ + default=:func:`sklearn.covariance.empirical_covariance` + The function which will be used to compute the covariance. + Must return an array of shape (n_features, n_features). + + random_state : int, RandomState instance or None, default=None + Determines the pseudo random number generator for shuffling the data. + Pass an int for reproducible results across multiple function calls. + See :term:`Glossary `. + + Returns + ------- + location : ndarray of shape (n_features,) + Robust location of the data. + + covariance : ndarray of shape (n_features, n_features) + Robust covariance of the features. + + support : ndarray of shape (n_samples,), dtype=bool + A mask of the observations that have been used to compute + the robust location and covariance estimates of the data set. + + Notes + ----- + The FastMCD algorithm has been introduced by Rousseuw and Van Driessen + in "A Fast Algorithm for the Minimum Covariance Determinant Estimator, + 1999, American Statistical Association and the American Society + for Quality, TECHNOMETRICS". + The principle is to compute robust estimates and random subsets before + pooling them into a larger subsets, and finally into the full data set. + Depending on the size of the initial sample, we have one, two or three + such computation levels. + + Note that only raw estimates are returned. If one is interested in + the correction and reweighting steps described in [RouseeuwVan]_, + see the MinCovDet object. + + References + ---------- + + .. [RouseeuwVan] A Fast Algorithm for the Minimum Covariance + Determinant Estimator, 1999, American Statistical Association + and the American Society for Quality, TECHNOMETRICS + + .. [Butler1993] R. W. Butler, P. L. Davies and M. Jhun, + Asymptotics For The Minimum Covariance Determinant Estimator, + The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400 + """ + random_state = check_random_state(random_state) + + X = check_array(X, ensure_min_samples=2, estimator="fast_mcd") + n_samples, n_features = X.shape + + # minimum breakdown value + if support_fraction is None: + n_support = int(np.ceil(0.5 * (n_samples + n_features + 1))) + else: + n_support = int(support_fraction * n_samples) + + # 1-dimensional case quick computation + # (Rousseeuw, P. J. and Leroy, A. M. (2005) References, in Robust + # Regression and Outlier Detection, John Wiley & Sons, chapter 4) + if n_features == 1: + if n_support < n_samples: + # find the sample shortest halves + X_sorted = np.sort(np.ravel(X)) + diff = X_sorted[n_support:] - X_sorted[: (n_samples - n_support)] + halves_start = np.where(diff == np.min(diff))[0] + # take the middle points' mean to get the robust location estimate + location = ( + 0.5 + * (X_sorted[n_support + halves_start] + X_sorted[halves_start]).mean() + ) + support = np.zeros(n_samples, dtype=bool) + X_centered = X - location + support[np.argsort(np.abs(X_centered), 0)[:n_support]] = True + covariance = np.asarray([[np.var(X[support])]]) + location = np.array([location]) + # get precision matrix in an optimized way + precision = linalg.pinvh(covariance) + dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1) + else: + support = np.ones(n_samples, dtype=bool) + covariance = np.asarray([[np.var(X)]]) + location = np.asarray([np.mean(X)]) + X_centered = X - location + # get precision matrix in an optimized way + precision = linalg.pinvh(covariance) + dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1) + # Starting FastMCD algorithm for p-dimensional case + if (n_samples > 500) and (n_features > 1): + # 1. Find candidate supports on subsets + # a. split the set in subsets of size ~ 300 + n_subsets = n_samples // 300 + n_samples_subsets = n_samples // n_subsets + samples_shuffle = random_state.permutation(n_samples) + h_subset = int(np.ceil(n_samples_subsets * (n_support / float(n_samples)))) + # b. perform a total of 500 trials + n_trials_tot = 500 + # c. select 10 best (location, covariance) for each subset + n_best_sub = 10 + n_trials = max(10, n_trials_tot // n_subsets) + n_best_tot = n_subsets * n_best_sub + all_best_locations = np.zeros((n_best_tot, n_features)) + try: + all_best_covariances = np.zeros((n_best_tot, n_features, n_features)) + except MemoryError: + # The above is too big. Let's try with something much small + # (and less optimal) + n_best_tot = 10 + all_best_covariances = np.zeros((n_best_tot, n_features, n_features)) + n_best_sub = 2 + for i in range(n_subsets): + low_bound = i * n_samples_subsets + high_bound = low_bound + n_samples_subsets + current_subset = X[samples_shuffle[low_bound:high_bound]] + best_locations_sub, best_covariances_sub, _, _ = select_candidates( + current_subset, + h_subset, + n_trials, + select=n_best_sub, + n_iter=2, + cov_computation_method=cov_computation_method, + random_state=random_state, + ) + subset_slice = np.arange(i * n_best_sub, (i + 1) * n_best_sub) + all_best_locations[subset_slice] = best_locations_sub + all_best_covariances[subset_slice] = best_covariances_sub + # 2. Pool the candidate supports into a merged set + # (possibly the full dataset) + n_samples_merged = min(1500, n_samples) + h_merged = int(np.ceil(n_samples_merged * (n_support / float(n_samples)))) + if n_samples > 1500: + n_best_merged = 10 + else: + n_best_merged = 1 + # find the best couples (location, covariance) on the merged set + selection = random_state.permutation(n_samples)[:n_samples_merged] + locations_merged, covariances_merged, supports_merged, d = select_candidates( + X[selection], + h_merged, + n_trials=(all_best_locations, all_best_covariances), + select=n_best_merged, + cov_computation_method=cov_computation_method, + random_state=random_state, + ) + # 3. Finally get the overall best (locations, covariance) couple + if n_samples < 1500: + # directly get the best couple (location, covariance) + location = locations_merged[0] + covariance = covariances_merged[0] + support = np.zeros(n_samples, dtype=bool) + dist = np.zeros(n_samples) + support[selection] = supports_merged[0] + dist[selection] = d[0] + else: + # select the best couple on the full dataset + locations_full, covariances_full, supports_full, d = select_candidates( + X, + n_support, + n_trials=(locations_merged, covariances_merged), + select=1, + cov_computation_method=cov_computation_method, + random_state=random_state, + ) + location = locations_full[0] + covariance = covariances_full[0] + support = supports_full[0] + dist = d[0] + elif n_features > 1: + # 1. Find the 10 best couples (location, covariance) + # considering two iterations + n_trials = 30 + n_best = 10 + locations_best, covariances_best, _, _ = select_candidates( + X, + n_support, + n_trials=n_trials, + select=n_best, + n_iter=2, + cov_computation_method=cov_computation_method, + random_state=random_state, + ) + # 2. Select the best couple on the full dataset amongst the 10 + locations_full, covariances_full, supports_full, d = select_candidates( + X, + n_support, + n_trials=(locations_best, covariances_best), + select=1, + cov_computation_method=cov_computation_method, + random_state=random_state, + ) + location = locations_full[0] + covariance = covariances_full[0] + support = supports_full[0] + dist = d[0] + + return location, covariance, support, dist + + +class MinCovDet(EmpiricalCovariance): + """Minimum Covariance Determinant (MCD): robust estimator of covariance. + + The Minimum Covariance Determinant covariance estimator is to be applied + on Gaussian-distributed data, but could still be relevant on data + drawn from a unimodal, symmetric distribution. It is not meant to be used + with multi-modal data (the algorithm used to fit a MinCovDet object is + likely to fail in such a case). + One should consider projection pursuit methods to deal with multi-modal + datasets. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + store_precision : bool, default=True + Specify if the estimated precision is stored. + + assume_centered : bool, default=False + If True, the support of the robust location and the covariance + estimates is computed, and a covariance estimate is recomputed from + it, without centering the data. + Useful to work with data whose mean is significantly equal to + zero but is not exactly zero. + If False, the robust location and covariance are directly computed + with the FastMCD algorithm without additional treatment. + + support_fraction : float, default=None + The proportion of points to be included in the support of the raw + MCD estimate. Default is None, which implies that the minimum + value of support_fraction will be used within the algorithm: + `(n_samples + n_features + 1) / 2 * n_samples`. The parameter must be + in the range (0, 1]. + + random_state : int, RandomState instance or None, default=None + Determines the pseudo random number generator for shuffling the data. + Pass an int for reproducible results across multiple function calls. + See :term:`Glossary `. + + Attributes + ---------- + raw_location_ : ndarray of shape (n_features,) + The raw robust estimated location before correction and re-weighting. + + raw_covariance_ : ndarray of shape (n_features, n_features) + The raw robust estimated covariance before correction and re-weighting. + + raw_support_ : ndarray of shape (n_samples,) + A mask of the observations that have been used to compute + the raw robust estimates of location and shape, before correction + and re-weighting. + + location_ : ndarray of shape (n_features,) + Estimated robust location. + + covariance_ : ndarray of shape (n_features, n_features) + Estimated robust covariance matrix. + + precision_ : ndarray of shape (n_features, n_features) + Estimated pseudo inverse matrix. + (stored only if store_precision is True) + + support_ : ndarray of shape (n_samples,) + A mask of the observations that have been used to compute + the robust estimates of location and shape. + + dist_ : ndarray of shape (n_samples,) + Mahalanobis distances of the training set (on which :meth:`fit` is + called) observations. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + EllipticEnvelope : An object for detecting outliers in + a Gaussian distributed dataset. + EmpiricalCovariance : Maximum likelihood covariance estimator. + GraphicalLasso : Sparse inverse covariance estimation + with an l1-penalized estimator. + GraphicalLassoCV : Sparse inverse covariance with cross-validated + choice of the l1 penalty. + LedoitWolf : LedoitWolf Estimator. + OAS : Oracle Approximating Shrinkage Estimator. + ShrunkCovariance : Covariance estimator with shrinkage. + + References + ---------- + + .. [Rouseeuw1984] P. J. Rousseeuw. Least median of squares regression. + J. Am Stat Ass, 79:871, 1984. + .. [Rousseeuw] A Fast Algorithm for the Minimum Covariance Determinant + Estimator, 1999, American Statistical Association and the American + Society for Quality, TECHNOMETRICS + .. [ButlerDavies] R. W. Butler, P. L. Davies and M. Jhun, + Asymptotics For The Minimum Covariance Determinant Estimator, + The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400 + + Examples + -------- + >>> import numpy as np + >>> from sklearn.covariance import MinCovDet + >>> from sklearn.datasets import make_gaussian_quantiles + >>> real_cov = np.array([[.8, .3], + ... [.3, .4]]) + >>> rng = np.random.RandomState(0) + >>> X = rng.multivariate_normal(mean=[0, 0], + ... cov=real_cov, + ... size=500) + >>> cov = MinCovDet(random_state=0).fit(X) + >>> cov.covariance_ + array([[0.7411..., 0.2535...], + [0.2535..., 0.3053...]]) + >>> cov.location_ + array([0.0813... , 0.0427...]) + """ + + _parameter_constraints: dict = { + **EmpiricalCovariance._parameter_constraints, + "support_fraction": [Interval(Real, 0, 1, closed="right"), None], + "random_state": ["random_state"], + } + _nonrobust_covariance = staticmethod(empirical_covariance) + + def __init__( + self, + *, + store_precision=True, + assume_centered=False, + support_fraction=None, + random_state=None, + ): + self.store_precision = store_precision + self.assume_centered = assume_centered + self.support_fraction = support_fraction + self.random_state = random_state + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """Fit a Minimum Covariance Determinant with the FastMCD algorithm. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data, where `n_samples` is the number of samples + and `n_features` is the number of features. + + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + self : object + Returns the instance itself. + """ + X = self._validate_data(X, ensure_min_samples=2, estimator="MinCovDet") + random_state = check_random_state(self.random_state) + n_samples, n_features = X.shape + # check that the empirical covariance is full rank + if (linalg.svdvals(np.dot(X.T, X)) > 1e-8).sum() != n_features: + warnings.warn( + "The covariance matrix associated to your dataset is not full rank" + ) + # compute and store raw estimates + raw_location, raw_covariance, raw_support, raw_dist = fast_mcd( + X, + support_fraction=self.support_fraction, + cov_computation_method=self._nonrobust_covariance, + random_state=random_state, + ) + if self.assume_centered: + raw_location = np.zeros(n_features) + raw_covariance = self._nonrobust_covariance( + X[raw_support], assume_centered=True + ) + # get precision matrix in an optimized way + precision = linalg.pinvh(raw_covariance) + raw_dist = np.sum(np.dot(X, precision) * X, 1) + self.raw_location_ = raw_location + self.raw_covariance_ = raw_covariance + self.raw_support_ = raw_support + self.location_ = raw_location + self.support_ = raw_support + self.dist_ = raw_dist + # obtain consistency at normal models + self.correct_covariance(X) + # re-weight estimator + self.reweight_covariance(X) + + return self + + def correct_covariance(self, data): + """Apply a correction to raw Minimum Covariance Determinant estimates. + + Correction using the empirical correction factor suggested + by Rousseeuw and Van Driessen in [RVD]_. + + Parameters + ---------- + data : array-like of shape (n_samples, n_features) + The data matrix, with p features and n samples. + The data set must be the one which was used to compute + the raw estimates. + + Returns + ------- + covariance_corrected : ndarray of shape (n_features, n_features) + Corrected robust covariance estimate. + + References + ---------- + + .. [RVD] A Fast Algorithm for the Minimum Covariance + Determinant Estimator, 1999, American Statistical Association + and the American Society for Quality, TECHNOMETRICS + """ + + # Check that the covariance of the support data is not equal to 0. + # Otherwise self.dist_ = 0 and thus correction = 0. + n_samples = len(self.dist_) + n_support = np.sum(self.support_) + if n_support < n_samples and np.allclose(self.raw_covariance_, 0): + raise ValueError( + "The covariance matrix of the support data " + "is equal to 0, try to increase support_fraction" + ) + correction = np.median(self.dist_) / chi2(data.shape[1]).isf(0.5) + covariance_corrected = self.raw_covariance_ * correction + self.dist_ /= correction + return covariance_corrected + + def reweight_covariance(self, data): + """Re-weight raw Minimum Covariance Determinant estimates. + + Re-weight observations using Rousseeuw's method (equivalent to + deleting outlying observations from the data set before + computing location and covariance estimates) described + in [RVDriessen]_. + + Parameters + ---------- + data : array-like of shape (n_samples, n_features) + The data matrix, with p features and n samples. + The data set must be the one which was used to compute + the raw estimates. + + Returns + ------- + location_reweighted : ndarray of shape (n_features,) + Re-weighted robust location estimate. + + covariance_reweighted : ndarray of shape (n_features, n_features) + Re-weighted robust covariance estimate. + + support_reweighted : ndarray of shape (n_samples,), dtype=bool + A mask of the observations that have been used to compute + the re-weighted robust location and covariance estimates. + + References + ---------- + + .. [RVDriessen] A Fast Algorithm for the Minimum Covariance + Determinant Estimator, 1999, American Statistical Association + and the American Society for Quality, TECHNOMETRICS + """ + n_samples, n_features = data.shape + mask = self.dist_ < chi2(n_features).isf(0.025) + if self.assume_centered: + location_reweighted = np.zeros(n_features) + else: + location_reweighted = data[mask].mean(0) + covariance_reweighted = self._nonrobust_covariance( + data[mask], assume_centered=self.assume_centered + ) + support_reweighted = np.zeros(n_samples, dtype=bool) + support_reweighted[mask] = True + self._set_covariance(covariance_reweighted) + self.location_ = location_reweighted + self.support_ = support_reweighted + X_centered = data - self.location_ + self.dist_ = np.sum(np.dot(X_centered, self.get_precision()) * X_centered, 1) + return location_reweighted, covariance_reweighted, support_reweighted diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/covariance/_shrunk_covariance.py b/env-llmeval/lib/python3.10/site-packages/sklearn/covariance/_shrunk_covariance.py new file mode 100644 index 0000000000000000000000000000000000000000..2c8248d0f65025b3cd5f1e4e2c969c4b4fa9bf91 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/covariance/_shrunk_covariance.py @@ -0,0 +1,816 @@ +""" +Covariance estimators using shrinkage. + +Shrinkage corresponds to regularising `cov` using a convex combination: +shrunk_cov = (1-shrinkage)*cov + shrinkage*structured_estimate. + +""" + +# Author: Alexandre Gramfort +# Gael Varoquaux +# Virgile Fritsch +# +# License: BSD 3 clause + +# avoid division truncation +import warnings +from numbers import Integral, Real + +import numpy as np + +from ..base import _fit_context +from ..utils import check_array +from ..utils._param_validation import Interval, validate_params +from . import EmpiricalCovariance, empirical_covariance + + +def _ledoit_wolf(X, *, assume_centered, block_size): + """Estimate the shrunk Ledoit-Wolf covariance matrix.""" + # for only one feature, the result is the same whatever the shrinkage + if len(X.shape) == 2 and X.shape[1] == 1: + if not assume_centered: + X = X - X.mean() + return np.atleast_2d((X**2).mean()), 0.0 + n_features = X.shape[1] + + # get Ledoit-Wolf shrinkage + shrinkage = ledoit_wolf_shrinkage( + X, assume_centered=assume_centered, block_size=block_size + ) + emp_cov = empirical_covariance(X, assume_centered=assume_centered) + mu = np.sum(np.trace(emp_cov)) / n_features + shrunk_cov = (1.0 - shrinkage) * emp_cov + shrunk_cov.flat[:: n_features + 1] += shrinkage * mu + + return shrunk_cov, shrinkage + + +def _oas(X, *, assume_centered=False): + """Estimate covariance with the Oracle Approximating Shrinkage algorithm. + + The formulation is based on [1]_. + [1] "Shrinkage algorithms for MMSE covariance estimation.", + Chen, Y., Wiesel, A., Eldar, Y. C., & Hero, A. O. + IEEE Transactions on Signal Processing, 58(10), 5016-5029, 2010. + https://arxiv.org/pdf/0907.4698.pdf + """ + if len(X.shape) == 2 and X.shape[1] == 1: + # for only one feature, the result is the same whatever the shrinkage + if not assume_centered: + X = X - X.mean() + return np.atleast_2d((X**2).mean()), 0.0 + + n_samples, n_features = X.shape + + emp_cov = empirical_covariance(X, assume_centered=assume_centered) + + # The shrinkage is defined as: + # shrinkage = min( + # trace(S @ S.T) + trace(S)**2) / ((n + 1) (trace(S @ S.T) - trace(S)**2 / p), 1 + # ) + # where n and p are n_samples and n_features, respectively (cf. Eq. 23 in [1]). + # The factor 2 / p is omitted since it does not impact the value of the estimator + # for large p. + + # Instead of computing trace(S)**2, we can compute the average of the squared + # elements of S that is equal to trace(S)**2 / p**2. + # See the definition of the Frobenius norm: + # https://en.wikipedia.org/wiki/Matrix_norm#Frobenius_norm + alpha = np.mean(emp_cov**2) + mu = np.trace(emp_cov) / n_features + mu_squared = mu**2 + + # The factor 1 / p**2 will cancel out since it is in both the numerator and + # denominator + num = alpha + mu_squared + den = (n_samples + 1) * (alpha - mu_squared / n_features) + shrinkage = 1.0 if den == 0 else min(num / den, 1.0) + + # The shrunk covariance is defined as: + # (1 - shrinkage) * S + shrinkage * F (cf. Eq. 4 in [1]) + # where S is the empirical covariance and F is the shrinkage target defined as + # F = trace(S) / n_features * np.identity(n_features) (cf. Eq. 3 in [1]) + shrunk_cov = (1.0 - shrinkage) * emp_cov + shrunk_cov.flat[:: n_features + 1] += shrinkage * mu + + return shrunk_cov, shrinkage + + +############################################################################### +# Public API +# ShrunkCovariance estimator + + +@validate_params( + { + "emp_cov": ["array-like"], + "shrinkage": [Interval(Real, 0, 1, closed="both")], + }, + prefer_skip_nested_validation=True, +) +def shrunk_covariance(emp_cov, shrinkage=0.1): + """Calculate covariance matrices shrunk on the diagonal. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + emp_cov : array-like of shape (..., n_features, n_features) + Covariance matrices to be shrunk, at least 2D ndarray. + + shrinkage : float, default=0.1 + Coefficient in the convex combination used for the computation + of the shrunk estimate. Range is [0, 1]. + + Returns + ------- + shrunk_cov : ndarray of shape (..., n_features, n_features) + Shrunk covariance matrices. + + Notes + ----- + The regularized (shrunk) covariance is given by:: + + (1 - shrinkage) * cov + shrinkage * mu * np.identity(n_features) + + where `mu = trace(cov) / n_features`. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.datasets import make_gaussian_quantiles + >>> from sklearn.covariance import empirical_covariance, shrunk_covariance + >>> real_cov = np.array([[.8, .3], [.3, .4]]) + >>> rng = np.random.RandomState(0) + >>> X = rng.multivariate_normal(mean=[0, 0], cov=real_cov, size=500) + >>> shrunk_covariance(empirical_covariance(X)) + array([[0.73..., 0.25...], + [0.25..., 0.41...]]) + """ + emp_cov = check_array(emp_cov, allow_nd=True) + n_features = emp_cov.shape[-1] + + shrunk_cov = (1.0 - shrinkage) * emp_cov + mu = np.trace(emp_cov, axis1=-2, axis2=-1) / n_features + mu = np.expand_dims(mu, axis=tuple(range(mu.ndim, emp_cov.ndim))) + shrunk_cov += shrinkage * mu * np.eye(n_features) + + return shrunk_cov + + +class ShrunkCovariance(EmpiricalCovariance): + """Covariance estimator with shrinkage. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + store_precision : bool, default=True + Specify if the estimated precision is stored. + + assume_centered : bool, default=False + If True, data will not be centered before computation. + Useful when working with data whose mean is almost, but not exactly + zero. + If False, data will be centered before computation. + + shrinkage : float, default=0.1 + Coefficient in the convex combination used for the computation + of the shrunk estimate. Range is [0, 1]. + + Attributes + ---------- + covariance_ : ndarray of shape (n_features, n_features) + Estimated covariance matrix + + location_ : ndarray of shape (n_features,) + Estimated location, i.e. the estimated mean. + + precision_ : ndarray of shape (n_features, n_features) + Estimated pseudo inverse matrix. + (stored only if store_precision is True) + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + EllipticEnvelope : An object for detecting outliers in + a Gaussian distributed dataset. + EmpiricalCovariance : Maximum likelihood covariance estimator. + GraphicalLasso : Sparse inverse covariance estimation + with an l1-penalized estimator. + GraphicalLassoCV : Sparse inverse covariance with cross-validated + choice of the l1 penalty. + LedoitWolf : LedoitWolf Estimator. + MinCovDet : Minimum Covariance Determinant + (robust estimator of covariance). + OAS : Oracle Approximating Shrinkage Estimator. + + Notes + ----- + The regularized covariance is given by: + + (1 - shrinkage) * cov + shrinkage * mu * np.identity(n_features) + + where mu = trace(cov) / n_features + + Examples + -------- + >>> import numpy as np + >>> from sklearn.covariance import ShrunkCovariance + >>> from sklearn.datasets import make_gaussian_quantiles + >>> real_cov = np.array([[.8, .3], + ... [.3, .4]]) + >>> rng = np.random.RandomState(0) + >>> X = rng.multivariate_normal(mean=[0, 0], + ... cov=real_cov, + ... size=500) + >>> cov = ShrunkCovariance().fit(X) + >>> cov.covariance_ + array([[0.7387..., 0.2536...], + [0.2536..., 0.4110...]]) + >>> cov.location_ + array([0.0622..., 0.0193...]) + """ + + _parameter_constraints: dict = { + **EmpiricalCovariance._parameter_constraints, + "shrinkage": [Interval(Real, 0, 1, closed="both")], + } + + def __init__(self, *, store_precision=True, assume_centered=False, shrinkage=0.1): + super().__init__( + store_precision=store_precision, assume_centered=assume_centered + ) + self.shrinkage = shrinkage + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """Fit the shrunk covariance model to X. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data, where `n_samples` is the number of samples + and `n_features` is the number of features. + + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + self : object + Returns the instance itself. + """ + X = self._validate_data(X) + # Not calling the parent object to fit, to avoid a potential + # matrix inversion when setting the precision + if self.assume_centered: + self.location_ = np.zeros(X.shape[1]) + else: + self.location_ = X.mean(0) + covariance = empirical_covariance(X, assume_centered=self.assume_centered) + covariance = shrunk_covariance(covariance, self.shrinkage) + self._set_covariance(covariance) + + return self + + +# Ledoit-Wolf estimator + + +@validate_params( + { + "X": ["array-like"], + "assume_centered": ["boolean"], + "block_size": [Interval(Integral, 1, None, closed="left")], + }, + prefer_skip_nested_validation=True, +) +def ledoit_wolf_shrinkage(X, assume_centered=False, block_size=1000): + """Estimate the shrunk Ledoit-Wolf covariance matrix. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Data from which to compute the Ledoit-Wolf shrunk covariance shrinkage. + + assume_centered : bool, default=False + If True, data will not be centered before computation. + Useful to work with data whose mean is significantly equal to + zero but is not exactly zero. + If False, data will be centered before computation. + + block_size : int, default=1000 + Size of blocks into which the covariance matrix will be split. + + Returns + ------- + shrinkage : float + Coefficient in the convex combination used for the computation + of the shrunk estimate. + + Notes + ----- + The regularized (shrunk) covariance is: + + (1 - shrinkage) * cov + shrinkage * mu * np.identity(n_features) + + where mu = trace(cov) / n_features + + Examples + -------- + >>> import numpy as np + >>> from sklearn.covariance import ledoit_wolf_shrinkage + >>> real_cov = np.array([[.4, .2], [.2, .8]]) + >>> rng = np.random.RandomState(0) + >>> X = rng.multivariate_normal(mean=[0, 0], cov=real_cov, size=50) + >>> shrinkage_coefficient = ledoit_wolf_shrinkage(X) + >>> shrinkage_coefficient + 0.23... + """ + X = check_array(X) + # for only one feature, the result is the same whatever the shrinkage + if len(X.shape) == 2 and X.shape[1] == 1: + return 0.0 + if X.ndim == 1: + X = np.reshape(X, (1, -1)) + + if X.shape[0] == 1: + warnings.warn( + "Only one sample available. You may want to reshape your data array" + ) + n_samples, n_features = X.shape + + # optionally center data + if not assume_centered: + X = X - X.mean(0) + + # A non-blocked version of the computation is present in the tests + # in tests/test_covariance.py + + # number of blocks to split the covariance matrix into + n_splits = int(n_features / block_size) + X2 = X**2 + emp_cov_trace = np.sum(X2, axis=0) / n_samples + mu = np.sum(emp_cov_trace) / n_features + beta_ = 0.0 # sum of the coefficients of + delta_ = 0.0 # sum of the *squared* coefficients of + # starting block computation + for i in range(n_splits): + for j in range(n_splits): + rows = slice(block_size * i, block_size * (i + 1)) + cols = slice(block_size * j, block_size * (j + 1)) + beta_ += np.sum(np.dot(X2.T[rows], X2[:, cols])) + delta_ += np.sum(np.dot(X.T[rows], X[:, cols]) ** 2) + rows = slice(block_size * i, block_size * (i + 1)) + beta_ += np.sum(np.dot(X2.T[rows], X2[:, block_size * n_splits :])) + delta_ += np.sum(np.dot(X.T[rows], X[:, block_size * n_splits :]) ** 2) + for j in range(n_splits): + cols = slice(block_size * j, block_size * (j + 1)) + beta_ += np.sum(np.dot(X2.T[block_size * n_splits :], X2[:, cols])) + delta_ += np.sum(np.dot(X.T[block_size * n_splits :], X[:, cols]) ** 2) + delta_ += np.sum( + np.dot(X.T[block_size * n_splits :], X[:, block_size * n_splits :]) ** 2 + ) + delta_ /= n_samples**2 + beta_ += np.sum( + np.dot(X2.T[block_size * n_splits :], X2[:, block_size * n_splits :]) + ) + # use delta_ to compute beta + beta = 1.0 / (n_features * n_samples) * (beta_ / n_samples - delta_) + # delta is the sum of the squared coefficients of ( - mu*Id) / p + delta = delta_ - 2.0 * mu * emp_cov_trace.sum() + n_features * mu**2 + delta /= n_features + # get final beta as the min between beta and delta + # We do this to prevent shrinking more than "1", which would invert + # the value of covariances + beta = min(beta, delta) + # finally get shrinkage + shrinkage = 0 if beta == 0 else beta / delta + return shrinkage + + +@validate_params( + {"X": ["array-like"]}, + prefer_skip_nested_validation=False, +) +def ledoit_wolf(X, *, assume_centered=False, block_size=1000): + """Estimate the shrunk Ledoit-Wolf covariance matrix. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Data from which to compute the covariance estimate. + + assume_centered : bool, default=False + If True, data will not be centered before computation. + Useful to work with data whose mean is significantly equal to + zero but is not exactly zero. + If False, data will be centered before computation. + + block_size : int, default=1000 + Size of blocks into which the covariance matrix will be split. + This is purely a memory optimization and does not affect results. + + Returns + ------- + shrunk_cov : ndarray of shape (n_features, n_features) + Shrunk covariance. + + shrinkage : float + Coefficient in the convex combination used for the computation + of the shrunk estimate. + + Notes + ----- + The regularized (shrunk) covariance is: + + (1 - shrinkage) * cov + shrinkage * mu * np.identity(n_features) + + where mu = trace(cov) / n_features + + Examples + -------- + >>> import numpy as np + >>> from sklearn.covariance import empirical_covariance, ledoit_wolf + >>> real_cov = np.array([[.4, .2], [.2, .8]]) + >>> rng = np.random.RandomState(0) + >>> X = rng.multivariate_normal(mean=[0, 0], cov=real_cov, size=50) + >>> covariance, shrinkage = ledoit_wolf(X) + >>> covariance + array([[0.44..., 0.16...], + [0.16..., 0.80...]]) + >>> shrinkage + 0.23... + """ + estimator = LedoitWolf( + assume_centered=assume_centered, + block_size=block_size, + store_precision=False, + ).fit(X) + + return estimator.covariance_, estimator.shrinkage_ + + +class LedoitWolf(EmpiricalCovariance): + """LedoitWolf Estimator. + + Ledoit-Wolf is a particular form of shrinkage, where the shrinkage + coefficient is computed using O. Ledoit and M. Wolf's formula as + described in "A Well-Conditioned Estimator for Large-Dimensional + Covariance Matrices", Ledoit and Wolf, Journal of Multivariate + Analysis, Volume 88, Issue 2, February 2004, pages 365-411. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + store_precision : bool, default=True + Specify if the estimated precision is stored. + + assume_centered : bool, default=False + If True, data will not be centered before computation. + Useful when working with data whose mean is almost, but not exactly + zero. + If False (default), data will be centered before computation. + + block_size : int, default=1000 + Size of blocks into which the covariance matrix will be split + during its Ledoit-Wolf estimation. This is purely a memory + optimization and does not affect results. + + Attributes + ---------- + covariance_ : ndarray of shape (n_features, n_features) + Estimated covariance matrix. + + location_ : ndarray of shape (n_features,) + Estimated location, i.e. the estimated mean. + + precision_ : ndarray of shape (n_features, n_features) + Estimated pseudo inverse matrix. + (stored only if store_precision is True) + + shrinkage_ : float + Coefficient in the convex combination used for the computation + of the shrunk estimate. Range is [0, 1]. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + EllipticEnvelope : An object for detecting outliers in + a Gaussian distributed dataset. + EmpiricalCovariance : Maximum likelihood covariance estimator. + GraphicalLasso : Sparse inverse covariance estimation + with an l1-penalized estimator. + GraphicalLassoCV : Sparse inverse covariance with cross-validated + choice of the l1 penalty. + MinCovDet : Minimum Covariance Determinant + (robust estimator of covariance). + OAS : Oracle Approximating Shrinkage Estimator. + ShrunkCovariance : Covariance estimator with shrinkage. + + Notes + ----- + The regularised covariance is: + + (1 - shrinkage) * cov + shrinkage * mu * np.identity(n_features) + + where mu = trace(cov) / n_features + and shrinkage is given by the Ledoit and Wolf formula (see References) + + References + ---------- + "A Well-Conditioned Estimator for Large-Dimensional Covariance Matrices", + Ledoit and Wolf, Journal of Multivariate Analysis, Volume 88, Issue 2, + February 2004, pages 365-411. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.covariance import LedoitWolf + >>> real_cov = np.array([[.4, .2], + ... [.2, .8]]) + >>> np.random.seed(0) + >>> X = np.random.multivariate_normal(mean=[0, 0], + ... cov=real_cov, + ... size=50) + >>> cov = LedoitWolf().fit(X) + >>> cov.covariance_ + array([[0.4406..., 0.1616...], + [0.1616..., 0.8022...]]) + >>> cov.location_ + array([ 0.0595... , -0.0075...]) + """ + + _parameter_constraints: dict = { + **EmpiricalCovariance._parameter_constraints, + "block_size": [Interval(Integral, 1, None, closed="left")], + } + + def __init__(self, *, store_precision=True, assume_centered=False, block_size=1000): + super().__init__( + store_precision=store_precision, assume_centered=assume_centered + ) + self.block_size = block_size + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """Fit the Ledoit-Wolf shrunk covariance model to X. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data, where `n_samples` is the number of samples + and `n_features` is the number of features. + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + self : object + Returns the instance itself. + """ + # Not calling the parent object to fit, to avoid computing the + # covariance matrix (and potentially the precision) + X = self._validate_data(X) + if self.assume_centered: + self.location_ = np.zeros(X.shape[1]) + else: + self.location_ = X.mean(0) + covariance, shrinkage = _ledoit_wolf( + X - self.location_, assume_centered=True, block_size=self.block_size + ) + self.shrinkage_ = shrinkage + self._set_covariance(covariance) + + return self + + +# OAS estimator +@validate_params( + {"X": ["array-like"]}, + prefer_skip_nested_validation=False, +) +def oas(X, *, assume_centered=False): + """Estimate covariance with the Oracle Approximating Shrinkage as proposed in [1]_. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Data from which to compute the covariance estimate. + + assume_centered : bool, default=False + If True, data will not be centered before computation. + Useful to work with data whose mean is significantly equal to + zero but is not exactly zero. + If False, data will be centered before computation. + + Returns + ------- + shrunk_cov : array-like of shape (n_features, n_features) + Shrunk covariance. + + shrinkage : float + Coefficient in the convex combination used for the computation + of the shrunk estimate. + + Notes + ----- + The regularised covariance is: + + (1 - shrinkage) * cov + shrinkage * mu * np.identity(n_features), + + where mu = trace(cov) / n_features and shrinkage is given by the OAS formula + (see [1]_). + + The shrinkage formulation implemented here differs from Eq. 23 in [1]_. In + the original article, formula (23) states that 2/p (p being the number of + features) is multiplied by Trace(cov*cov) in both the numerator and + denominator, but this operation is omitted because for a large p, the value + of 2/p is so small that it doesn't affect the value of the estimator. + + References + ---------- + .. [1] :arxiv:`"Shrinkage algorithms for MMSE covariance estimation.", + Chen, Y., Wiesel, A., Eldar, Y. C., & Hero, A. O. + IEEE Transactions on Signal Processing, 58(10), 5016-5029, 2010. + <0907.4698>` + + Examples + -------- + >>> import numpy as np + >>> from sklearn.covariance import oas + >>> rng = np.random.RandomState(0) + >>> real_cov = [[.8, .3], [.3, .4]] + >>> X = rng.multivariate_normal(mean=[0, 0], cov=real_cov, size=500) + >>> shrunk_cov, shrinkage = oas(X) + >>> shrunk_cov + array([[0.7533..., 0.2763...], + [0.2763..., 0.3964...]]) + >>> shrinkage + 0.0195... + """ + estimator = OAS( + assume_centered=assume_centered, + ).fit(X) + return estimator.covariance_, estimator.shrinkage_ + + +class OAS(EmpiricalCovariance): + """Oracle Approximating Shrinkage Estimator as proposed in [1]_. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + store_precision : bool, default=True + Specify if the estimated precision is stored. + + assume_centered : bool, default=False + If True, data will not be centered before computation. + Useful when working with data whose mean is almost, but not exactly + zero. + If False (default), data will be centered before computation. + + Attributes + ---------- + covariance_ : ndarray of shape (n_features, n_features) + Estimated covariance matrix. + + location_ : ndarray of shape (n_features,) + Estimated location, i.e. the estimated mean. + + precision_ : ndarray of shape (n_features, n_features) + Estimated pseudo inverse matrix. + (stored only if store_precision is True) + + shrinkage_ : float + coefficient in the convex combination used for the computation + of the shrunk estimate. Range is [0, 1]. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + EllipticEnvelope : An object for detecting outliers in + a Gaussian distributed dataset. + EmpiricalCovariance : Maximum likelihood covariance estimator. + GraphicalLasso : Sparse inverse covariance estimation + with an l1-penalized estimator. + GraphicalLassoCV : Sparse inverse covariance with cross-validated + choice of the l1 penalty. + LedoitWolf : LedoitWolf Estimator. + MinCovDet : Minimum Covariance Determinant + (robust estimator of covariance). + ShrunkCovariance : Covariance estimator with shrinkage. + + Notes + ----- + The regularised covariance is: + + (1 - shrinkage) * cov + shrinkage * mu * np.identity(n_features), + + where mu = trace(cov) / n_features and shrinkage is given by the OAS formula + (see [1]_). + + The shrinkage formulation implemented here differs from Eq. 23 in [1]_. In + the original article, formula (23) states that 2/p (p being the number of + features) is multiplied by Trace(cov*cov) in both the numerator and + denominator, but this operation is omitted because for a large p, the value + of 2/p is so small that it doesn't affect the value of the estimator. + + References + ---------- + .. [1] :arxiv:`"Shrinkage algorithms for MMSE covariance estimation.", + Chen, Y., Wiesel, A., Eldar, Y. C., & Hero, A. O. + IEEE Transactions on Signal Processing, 58(10), 5016-5029, 2010. + <0907.4698>` + + Examples + -------- + >>> import numpy as np + >>> from sklearn.covariance import OAS + >>> from sklearn.datasets import make_gaussian_quantiles + >>> real_cov = np.array([[.8, .3], + ... [.3, .4]]) + >>> rng = np.random.RandomState(0) + >>> X = rng.multivariate_normal(mean=[0, 0], + ... cov=real_cov, + ... size=500) + >>> oas = OAS().fit(X) + >>> oas.covariance_ + array([[0.7533..., 0.2763...], + [0.2763..., 0.3964...]]) + >>> oas.precision_ + array([[ 1.7833..., -1.2431... ], + [-1.2431..., 3.3889...]]) + >>> oas.shrinkage_ + 0.0195... + """ + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """Fit the Oracle Approximating Shrinkage covariance model to X. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data, where `n_samples` is the number of samples + and `n_features` is the number of features. + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + self : object + Returns the instance itself. + """ + X = self._validate_data(X) + # Not calling the parent object to fit, to avoid computing the + # covariance matrix (and potentially the precision) + if self.assume_centered: + self.location_ = np.zeros(X.shape[1]) + else: + self.location_ = X.mean(0) + + covariance, shrinkage = _oas(X - self.location_, assume_centered=True) + self.shrinkage_ = shrinkage + self._set_covariance(covariance) + + return self diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/experimental/__init__.py b/env-llmeval/lib/python3.10/site-packages/sklearn/experimental/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..0effaf5b05fa0cb20c05f807e57bcc51f7924de1 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/experimental/__init__.py @@ -0,0 +1,7 @@ +""" +The :mod:`sklearn.experimental` module provides importable modules that enable +the use of experimental features or estimators. + +The features and estimators that are experimental aren't subject to +deprecation cycles. Use them at your own risks! +""" diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/experimental/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/experimental/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b837c983b5c305b2a345f932386394a2ac0d8ede Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/experimental/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/experimental/__pycache__/enable_halving_search_cv.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/experimental/__pycache__/enable_halving_search_cv.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c744f8756933859e491411c1f81b2b58af881414 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/experimental/__pycache__/enable_halving_search_cv.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/experimental/__pycache__/enable_hist_gradient_boosting.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/experimental/__pycache__/enable_hist_gradient_boosting.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..90bc7154491609f131d00c3e948aeea630592419 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/experimental/__pycache__/enable_hist_gradient_boosting.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/experimental/__pycache__/enable_iterative_imputer.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/experimental/__pycache__/enable_iterative_imputer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..48478c00df9ea89486fc77255ee6a56e04dbea7e Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/experimental/__pycache__/enable_iterative_imputer.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/experimental/enable_halving_search_cv.py b/env-llmeval/lib/python3.10/site-packages/sklearn/experimental/enable_halving_search_cv.py new file mode 100644 index 0000000000000000000000000000000000000000..dd399ef35b6f7fae4b579beefcf0ee52692d8dc8 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/experimental/enable_halving_search_cv.py @@ -0,0 +1,32 @@ +"""Enables Successive Halving search-estimators + +The API and results of these estimators might change without any deprecation +cycle. + +Importing this file dynamically sets the +:class:`~sklearn.model_selection.HalvingRandomSearchCV` and +:class:`~sklearn.model_selection.HalvingGridSearchCV` as attributes of the +`model_selection` module:: + + >>> # explicitly require this experimental feature + >>> from sklearn.experimental import enable_halving_search_cv # noqa + >>> # now you can import normally from model_selection + >>> from sklearn.model_selection import HalvingRandomSearchCV + >>> from sklearn.model_selection import HalvingGridSearchCV + + +The ``# noqa`` comment comment can be removed: it just tells linters like +flake8 to ignore the import, which appears as unused. +""" + +from .. import model_selection +from ..model_selection._search_successive_halving import ( + HalvingGridSearchCV, + HalvingRandomSearchCV, +) + +# use settattr to avoid mypy errors when monkeypatching +setattr(model_selection, "HalvingRandomSearchCV", HalvingRandomSearchCV) +setattr(model_selection, "HalvingGridSearchCV", HalvingGridSearchCV) + +model_selection.__all__ += ["HalvingRandomSearchCV", "HalvingGridSearchCV"] diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/experimental/enable_hist_gradient_boosting.py b/env-llmeval/lib/python3.10/site-packages/sklearn/experimental/enable_hist_gradient_boosting.py new file mode 100644 index 0000000000000000000000000000000000000000..d287400c7999f4ef83ae779b56c1c32b16446851 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/experimental/enable_hist_gradient_boosting.py @@ -0,0 +1,20 @@ +"""This is now a no-op and can be safely removed from your code. + +It used to enable the use of +:class:`~sklearn.ensemble.HistGradientBoostingClassifier` and +:class:`~sklearn.ensemble.HistGradientBoostingRegressor` when they were still +:term:`experimental`, but these estimators are now stable and can be imported +normally from `sklearn.ensemble`. +""" +# Don't remove this file, we don't want to break users code just because the +# feature isn't experimental anymore. + + +import warnings + +warnings.warn( + "Since version 1.0, " + "it is not needed to import enable_hist_gradient_boosting anymore. " + "HistGradientBoostingClassifier and HistGradientBoostingRegressor are now " + "stable and can be normally imported from sklearn.ensemble." +) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/experimental/enable_iterative_imputer.py b/env-llmeval/lib/python3.10/site-packages/sklearn/experimental/enable_iterative_imputer.py new file mode 100644 index 0000000000000000000000000000000000000000..0b906961ca184ee87e8dc6ded76ca188ee20138f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/experimental/enable_iterative_imputer.py @@ -0,0 +1,20 @@ +"""Enables IterativeImputer + +The API and results of this estimator might change without any deprecation +cycle. + +Importing this file dynamically sets :class:`~sklearn.impute.IterativeImputer` +as an attribute of the impute module:: + + >>> # explicitly require this experimental feature + >>> from sklearn.experimental import enable_iterative_imputer # noqa + >>> # now you can import normally from impute + >>> from sklearn.impute import IterativeImputer +""" + +from .. import impute +from ..impute._iterative import IterativeImputer + +# use settattr to avoid mypy errors when monkeypatching +setattr(impute, "IterativeImputer", IterativeImputer) +impute.__all__ += ["IterativeImputer"] diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/experimental/tests/__init__.py b/env-llmeval/lib/python3.10/site-packages/sklearn/experimental/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/experimental/tests/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/experimental/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..abb2a9d190663e092ff0cda057d36c53db5bf692 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/experimental/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/experimental/tests/__pycache__/test_enable_hist_gradient_boosting.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/experimental/tests/__pycache__/test_enable_hist_gradient_boosting.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..afcc5ca0085e6e17bfc876e82d7b619b298458a6 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/experimental/tests/__pycache__/test_enable_hist_gradient_boosting.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/experimental/tests/__pycache__/test_enable_iterative_imputer.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/experimental/tests/__pycache__/test_enable_iterative_imputer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b4dbcdfe4c8b7eb1fe75cf09a46a131c2b7a746e Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/experimental/tests/__pycache__/test_enable_iterative_imputer.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/experimental/tests/__pycache__/test_enable_successive_halving.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/experimental/tests/__pycache__/test_enable_successive_halving.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a64c9c525887eb91d94d5181a21541b1aae026ab Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/experimental/tests/__pycache__/test_enable_successive_halving.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/experimental/tests/test_enable_hist_gradient_boosting.py b/env-llmeval/lib/python3.10/site-packages/sklearn/experimental/tests/test_enable_hist_gradient_boosting.py new file mode 100644 index 0000000000000000000000000000000000000000..0a90d63fcb37cffa20c6b919b55b2db59d67c31b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/experimental/tests/test_enable_hist_gradient_boosting.py @@ -0,0 +1,19 @@ +"""Tests for making sure experimental imports work as expected.""" + +import textwrap + +import pytest + +from sklearn.utils import _IS_WASM +from sklearn.utils._testing import assert_run_python_script_without_output + + +@pytest.mark.xfail(_IS_WASM, reason="cannot start subprocess") +def test_import_raises_warning(): + code = """ + import pytest + with pytest.warns(UserWarning, match="it is not needed to import"): + from sklearn.experimental import enable_hist_gradient_boosting # noqa + """ + pattern = "it is not needed to import enable_hist_gradient_boosting anymore" + assert_run_python_script_without_output(textwrap.dedent(code), pattern=pattern) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/experimental/tests/test_enable_iterative_imputer.py b/env-llmeval/lib/python3.10/site-packages/sklearn/experimental/tests/test_enable_iterative_imputer.py new file mode 100644 index 0000000000000000000000000000000000000000..617d921eb8f88e66cd6e7e6d05507ba062ca2e41 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/experimental/tests/test_enable_iterative_imputer.py @@ -0,0 +1,51 @@ +"""Tests for making sure experimental imports work as expected.""" + +import textwrap + +import pytest + +from sklearn.utils import _IS_WASM +from sklearn.utils._testing import assert_run_python_script_without_output + + +@pytest.mark.xfail(_IS_WASM, reason="cannot start subprocess") +def test_imports_strategies(): + # Make sure different import strategies work or fail as expected. + + # Since Python caches the imported modules, we need to run a child process + # for every test case. Else, the tests would not be independent + # (manually removing the imports from the cache (sys.modules) is not + # recommended and can lead to many complications). + pattern = "IterativeImputer is experimental" + good_import = """ + from sklearn.experimental import enable_iterative_imputer + from sklearn.impute import IterativeImputer + """ + assert_run_python_script_without_output( + textwrap.dedent(good_import), pattern=pattern + ) + + good_import_with_ensemble_first = """ + import sklearn.ensemble + from sklearn.experimental import enable_iterative_imputer + from sklearn.impute import IterativeImputer + """ + assert_run_python_script_without_output( + textwrap.dedent(good_import_with_ensemble_first), + pattern=pattern, + ) + + bad_imports = f""" + import pytest + + with pytest.raises(ImportError, match={pattern!r}): + from sklearn.impute import IterativeImputer + + import sklearn.experimental + with pytest.raises(ImportError, match={pattern!r}): + from sklearn.impute import IterativeImputer + """ + assert_run_python_script_without_output( + textwrap.dedent(bad_imports), + pattern=pattern, + ) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/experimental/tests/test_enable_successive_halving.py b/env-llmeval/lib/python3.10/site-packages/sklearn/experimental/tests/test_enable_successive_halving.py new file mode 100644 index 0000000000000000000000000000000000000000..0abbf07eced00d1709b0b86c3e9c66dca01374af --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/experimental/tests/test_enable_successive_halving.py @@ -0,0 +1,53 @@ +"""Tests for making sure experimental imports work as expected.""" + +import textwrap + +import pytest + +from sklearn.utils import _IS_WASM +from sklearn.utils._testing import assert_run_python_script_without_output + + +@pytest.mark.xfail(_IS_WASM, reason="cannot start subprocess") +def test_imports_strategies(): + # Make sure different import strategies work or fail as expected. + + # Since Python caches the imported modules, we need to run a child process + # for every test case. Else, the tests would not be independent + # (manually removing the imports from the cache (sys.modules) is not + # recommended and can lead to many complications). + pattern = "Halving(Grid|Random)SearchCV is experimental" + good_import = """ + from sklearn.experimental import enable_halving_search_cv + from sklearn.model_selection import HalvingGridSearchCV + from sklearn.model_selection import HalvingRandomSearchCV + """ + assert_run_python_script_without_output( + textwrap.dedent(good_import), pattern=pattern + ) + + good_import_with_model_selection_first = """ + import sklearn.model_selection + from sklearn.experimental import enable_halving_search_cv + from sklearn.model_selection import HalvingGridSearchCV + from sklearn.model_selection import HalvingRandomSearchCV + """ + assert_run_python_script_without_output( + textwrap.dedent(good_import_with_model_selection_first), + pattern=pattern, + ) + + bad_imports = f""" + import pytest + + with pytest.raises(ImportError, match={pattern!r}): + from sklearn.model_selection import HalvingGridSearchCV + + import sklearn.experimental + with pytest.raises(ImportError, match={pattern!r}): + from sklearn.model_selection import HalvingRandomSearchCV + """ + assert_run_python_script_without_output( + textwrap.dedent(bad_imports), + pattern=pattern, + ) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/__init__.py b/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ce697656b4c2e3b1a181d845d898b6447dbdaa72 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/__init__.py @@ -0,0 +1,42 @@ +""" +The :mod:`sklearn.neighbors` module implements the k-nearest neighbors +algorithm. +""" + +from ._ball_tree import BallTree +from ._base import VALID_METRICS, VALID_METRICS_SPARSE, sort_graph_by_row_values +from ._classification import KNeighborsClassifier, RadiusNeighborsClassifier +from ._graph import ( + KNeighborsTransformer, + RadiusNeighborsTransformer, + kneighbors_graph, + radius_neighbors_graph, +) +from ._kd_tree import KDTree +from ._kde import KernelDensity +from ._lof import LocalOutlierFactor +from ._nca import NeighborhoodComponentsAnalysis +from ._nearest_centroid import NearestCentroid +from ._regression import KNeighborsRegressor, RadiusNeighborsRegressor +from ._unsupervised import NearestNeighbors + +__all__ = [ + "BallTree", + "KDTree", + "KNeighborsClassifier", + "KNeighborsRegressor", + "KNeighborsTransformer", + "NearestCentroid", + "NearestNeighbors", + "RadiusNeighborsClassifier", + "RadiusNeighborsRegressor", + "RadiusNeighborsTransformer", + "kneighbors_graph", + "radius_neighbors_graph", + "KernelDensity", + "LocalOutlierFactor", + "NeighborhoodComponentsAnalysis", + "sort_graph_by_row_values", + "VALID_METRICS", + "VALID_METRICS_SPARSE", +] diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/__pycache__/_graph.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/__pycache__/_graph.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d25d4113862b88274f2a5a4dde91718930ba192b Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/__pycache__/_graph.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/_ball_tree.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/_ball_tree.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..7a80b38a5359a2de68c5ed9ba674c812695d0d02 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/_ball_tree.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/_base.py b/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/_base.py new file mode 100644 index 0000000000000000000000000000000000000000..6df0f2030877e3b548decb2dc8cd5a4ef6ef31c7 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/_base.py @@ -0,0 +1,1387 @@ +"""Base and mixin classes for nearest neighbors.""" +# Authors: Jake Vanderplas +# Fabian Pedregosa +# Alexandre Gramfort +# Sparseness support by Lars Buitinck +# Multi-output support by Arnaud Joly +# +# License: BSD 3 clause (C) INRIA, University of Amsterdam +import itertools +import numbers +import warnings +from abc import ABCMeta, abstractmethod +from functools import partial +from numbers import Integral, Real + +import numpy as np +from joblib import effective_n_jobs +from scipy.sparse import csr_matrix, issparse + +from ..base import BaseEstimator, MultiOutputMixin, is_classifier +from ..exceptions import DataConversionWarning, EfficiencyWarning +from ..metrics import DistanceMetric, pairwise_distances_chunked +from ..metrics._pairwise_distances_reduction import ( + ArgKmin, + RadiusNeighbors, +) +from ..metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS +from ..utils import ( + _to_object_array, + check_array, + gen_even_slices, +) +from ..utils._param_validation import Interval, StrOptions, validate_params +from ..utils.fixes import parse_version, sp_base_version +from ..utils.multiclass import check_classification_targets +from ..utils.parallel import Parallel, delayed +from ..utils.validation import check_is_fitted, check_non_negative +from ._ball_tree import BallTree +from ._kd_tree import KDTree + +SCIPY_METRICS = [ + "braycurtis", + "canberra", + "chebyshev", + "correlation", + "cosine", + "dice", + "hamming", + "jaccard", + "mahalanobis", + "minkowski", + "rogerstanimoto", + "russellrao", + "seuclidean", + "sokalmichener", + "sokalsneath", + "sqeuclidean", + "yule", +] +if sp_base_version < parse_version("1.11"): + # Deprecated in SciPy 1.9 and removed in SciPy 1.11 + SCIPY_METRICS += ["kulsinski"] +if sp_base_version < parse_version("1.9"): + # Deprecated in SciPy 1.0 and removed in SciPy 1.9 + SCIPY_METRICS += ["matching"] + +VALID_METRICS = dict( + ball_tree=BallTree.valid_metrics, + kd_tree=KDTree.valid_metrics, + # The following list comes from the + # sklearn.metrics.pairwise doc string + brute=sorted(set(PAIRWISE_DISTANCE_FUNCTIONS).union(SCIPY_METRICS)), +) + +VALID_METRICS_SPARSE = dict( + ball_tree=[], + kd_tree=[], + brute=(PAIRWISE_DISTANCE_FUNCTIONS.keys() - {"haversine", "nan_euclidean"}), +) + + +def _get_weights(dist, weights): + """Get the weights from an array of distances and a parameter ``weights``. + + Assume weights have already been validated. + + Parameters + ---------- + dist : ndarray + The input distances. + + weights : {'uniform', 'distance'}, callable or None + The kind of weighting used. + + Returns + ------- + weights_arr : array of the same shape as ``dist`` + If ``weights == 'uniform'``, then returns None. + """ + if weights in (None, "uniform"): + return None + + if weights == "distance": + # if user attempts to classify a point that was zero distance from one + # or more training points, those training points are weighted as 1.0 + # and the other points as 0.0 + if dist.dtype is np.dtype(object): + for point_dist_i, point_dist in enumerate(dist): + # check if point_dist is iterable + # (ex: RadiusNeighborClassifier.predict may set an element of + # dist to 1e-6 to represent an 'outlier') + if hasattr(point_dist, "__contains__") and 0.0 in point_dist: + dist[point_dist_i] = point_dist == 0.0 + else: + dist[point_dist_i] = 1.0 / point_dist + else: + with np.errstate(divide="ignore"): + dist = 1.0 / dist + inf_mask = np.isinf(dist) + inf_row = np.any(inf_mask, axis=1) + dist[inf_row] = inf_mask[inf_row] + return dist + + if callable(weights): + return weights(dist) + + +def _is_sorted_by_data(graph): + """Return whether the graph's non-zero entries are sorted by data. + + The non-zero entries are stored in graph.data and graph.indices. + For each row (or sample), the non-zero entries can be either: + - sorted by indices, as after graph.sort_indices(); + - sorted by data, as after _check_precomputed(graph); + - not sorted. + + Parameters + ---------- + graph : sparse matrix of shape (n_samples, n_samples) + Neighbors graph as given by `kneighbors_graph` or + `radius_neighbors_graph`. Matrix should be of format CSR format. + + Returns + ------- + res : bool + Whether input graph is sorted by data. + """ + assert graph.format == "csr" + out_of_order = graph.data[:-1] > graph.data[1:] + line_change = np.unique(graph.indptr[1:-1] - 1) + line_change = line_change[line_change < out_of_order.shape[0]] + return out_of_order.sum() == out_of_order[line_change].sum() + + +def _check_precomputed(X): + """Check precomputed distance matrix. + + If the precomputed distance matrix is sparse, it checks that the non-zero + entries are sorted by distances. If not, the matrix is copied and sorted. + + Parameters + ---------- + X : {sparse matrix, array-like}, (n_samples, n_samples) + Distance matrix to other samples. X may be a sparse matrix, in which + case only non-zero elements may be considered neighbors. + + Returns + ------- + X : {sparse matrix, array-like}, (n_samples, n_samples) + Distance matrix to other samples. X may be a sparse matrix, in which + case only non-zero elements may be considered neighbors. + """ + if not issparse(X): + X = check_array(X) + check_non_negative(X, whom="precomputed distance matrix.") + return X + else: + graph = X + + if graph.format not in ("csr", "csc", "coo", "lil"): + raise TypeError( + "Sparse matrix in {!r} format is not supported due to " + "its handling of explicit zeros".format(graph.format) + ) + copied = graph.format != "csr" + graph = check_array(graph, accept_sparse="csr") + check_non_negative(graph, whom="precomputed distance matrix.") + graph = sort_graph_by_row_values(graph, copy=not copied, warn_when_not_sorted=True) + + return graph + + +@validate_params( + { + "graph": ["sparse matrix"], + "copy": ["boolean"], + "warn_when_not_sorted": ["boolean"], + }, + prefer_skip_nested_validation=True, +) +def sort_graph_by_row_values(graph, copy=False, warn_when_not_sorted=True): + """Sort a sparse graph such that each row is stored with increasing values. + + .. versionadded:: 1.2 + + Parameters + ---------- + graph : sparse matrix of shape (n_samples, n_samples) + Distance matrix to other samples, where only non-zero elements are + considered neighbors. Matrix is converted to CSR format if not already. + + copy : bool, default=False + If True, the graph is copied before sorting. If False, the sorting is + performed inplace. If the graph is not of CSR format, `copy` must be + True to allow the conversion to CSR format, otherwise an error is + raised. + + warn_when_not_sorted : bool, default=True + If True, a :class:`~sklearn.exceptions.EfficiencyWarning` is raised + when the input graph is not sorted by row values. + + Returns + ------- + graph : sparse matrix of shape (n_samples, n_samples) + Distance matrix to other samples, where only non-zero elements are + considered neighbors. Matrix is in CSR format. + + Examples + -------- + >>> from scipy.sparse import csr_matrix + >>> from sklearn.neighbors import sort_graph_by_row_values + >>> X = csr_matrix( + ... [[0., 3., 1.], + ... [3., 0., 2.], + ... [1., 2., 0.]]) + >>> X.data + array([3., 1., 3., 2., 1., 2.]) + >>> X_ = sort_graph_by_row_values(X) + >>> X_.data + array([1., 3., 2., 3., 1., 2.]) + """ + if graph.format == "csr" and _is_sorted_by_data(graph): + return graph + + if warn_when_not_sorted: + warnings.warn( + ( + "Precomputed sparse input was not sorted by row values. Use the" + " function sklearn.neighbors.sort_graph_by_row_values to sort the input" + " by row values, with warn_when_not_sorted=False to remove this" + " warning." + ), + EfficiencyWarning, + ) + + if graph.format not in ("csr", "csc", "coo", "lil"): + raise TypeError( + f"Sparse matrix in {graph.format!r} format is not supported due to " + "its handling of explicit zeros" + ) + elif graph.format != "csr": + if not copy: + raise ValueError( + "The input graph is not in CSR format. Use copy=True to allow " + "the conversion to CSR format." + ) + graph = graph.asformat("csr") + elif copy: # csr format with copy=True + graph = graph.copy() + + row_nnz = np.diff(graph.indptr) + if row_nnz.max() == row_nnz.min(): + # if each sample has the same number of provided neighbors + n_samples = graph.shape[0] + distances = graph.data.reshape(n_samples, -1) + + order = np.argsort(distances, kind="mergesort") + order += np.arange(n_samples)[:, None] * row_nnz[0] + order = order.ravel() + graph.data = graph.data[order] + graph.indices = graph.indices[order] + + else: + for start, stop in zip(graph.indptr, graph.indptr[1:]): + order = np.argsort(graph.data[start:stop], kind="mergesort") + graph.data[start:stop] = graph.data[start:stop][order] + graph.indices[start:stop] = graph.indices[start:stop][order] + + return graph + + +def _kneighbors_from_graph(graph, n_neighbors, return_distance): + """Decompose a nearest neighbors sparse graph into distances and indices. + + Parameters + ---------- + graph : sparse matrix of shape (n_samples, n_samples) + Neighbors graph as given by `kneighbors_graph` or + `radius_neighbors_graph`. Matrix should be of format CSR format. + + n_neighbors : int + Number of neighbors required for each sample. + + return_distance : bool + Whether or not to return the distances. + + Returns + ------- + neigh_dist : ndarray of shape (n_samples, n_neighbors) + Distances to nearest neighbors. Only present if `return_distance=True`. + + neigh_ind : ndarray of shape (n_samples, n_neighbors) + Indices of nearest neighbors. + """ + n_samples = graph.shape[0] + assert graph.format == "csr" + + # number of neighbors by samples + row_nnz = np.diff(graph.indptr) + row_nnz_min = row_nnz.min() + if n_neighbors is not None and row_nnz_min < n_neighbors: + raise ValueError( + "%d neighbors per samples are required, but some samples have only" + " %d neighbors in precomputed graph matrix. Decrease number of " + "neighbors used or recompute the graph with more neighbors." + % (n_neighbors, row_nnz_min) + ) + + def extract(a): + # if each sample has the same number of provided neighbors + if row_nnz.max() == row_nnz_min: + return a.reshape(n_samples, -1)[:, :n_neighbors] + else: + idx = np.tile(np.arange(n_neighbors), (n_samples, 1)) + idx += graph.indptr[:-1, None] + return a.take(idx, mode="clip").reshape(n_samples, n_neighbors) + + if return_distance: + return extract(graph.data), extract(graph.indices) + else: + return extract(graph.indices) + + +def _radius_neighbors_from_graph(graph, radius, return_distance): + """Decompose a nearest neighbors sparse graph into distances and indices. + + Parameters + ---------- + graph : sparse matrix of shape (n_samples, n_samples) + Neighbors graph as given by `kneighbors_graph` or + `radius_neighbors_graph`. Matrix should be of format CSR format. + + radius : float + Radius of neighborhoods which should be strictly positive. + + return_distance : bool + Whether or not to return the distances. + + Returns + ------- + neigh_dist : ndarray of shape (n_samples,) of arrays + Distances to nearest neighbors. Only present if `return_distance=True`. + + neigh_ind : ndarray of shape (n_samples,) of arrays + Indices of nearest neighbors. + """ + assert graph.format == "csr" + + no_filter_needed = bool(graph.data.max() <= radius) + + if no_filter_needed: + data, indices, indptr = graph.data, graph.indices, graph.indptr + else: + mask = graph.data <= radius + if return_distance: + data = np.compress(mask, graph.data) + indices = np.compress(mask, graph.indices) + indptr = np.concatenate(([0], np.cumsum(mask)))[graph.indptr] + + indices = indices.astype(np.intp, copy=no_filter_needed) + + if return_distance: + neigh_dist = _to_object_array(np.split(data, indptr[1:-1])) + neigh_ind = _to_object_array(np.split(indices, indptr[1:-1])) + + if return_distance: + return neigh_dist, neigh_ind + else: + return neigh_ind + + +class NeighborsBase(MultiOutputMixin, BaseEstimator, metaclass=ABCMeta): + """Base class for nearest neighbors estimators.""" + + _parameter_constraints: dict = { + "n_neighbors": [Interval(Integral, 1, None, closed="left"), None], + "radius": [Interval(Real, 0, None, closed="both"), None], + "algorithm": [StrOptions({"auto", "ball_tree", "kd_tree", "brute"})], + "leaf_size": [Interval(Integral, 1, None, closed="left")], + "p": [Interval(Real, 0, None, closed="right"), None], + "metric": [StrOptions(set(itertools.chain(*VALID_METRICS.values()))), callable], + "metric_params": [dict, None], + "n_jobs": [Integral, None], + } + + @abstractmethod + def __init__( + self, + n_neighbors=None, + radius=None, + algorithm="auto", + leaf_size=30, + metric="minkowski", + p=2, + metric_params=None, + n_jobs=None, + ): + self.n_neighbors = n_neighbors + self.radius = radius + self.algorithm = algorithm + self.leaf_size = leaf_size + self.metric = metric + self.metric_params = metric_params + self.p = p + self.n_jobs = n_jobs + + def _check_algorithm_metric(self): + if self.algorithm == "auto": + if self.metric == "precomputed": + alg_check = "brute" + elif ( + callable(self.metric) + or self.metric in VALID_METRICS["ball_tree"] + or isinstance(self.metric, DistanceMetric) + ): + alg_check = "ball_tree" + else: + alg_check = "brute" + else: + alg_check = self.algorithm + + if callable(self.metric): + if self.algorithm == "kd_tree": + # callable metric is only valid for brute force and ball_tree + raise ValueError( + "kd_tree does not support callable metric '%s'" + "Function call overhead will result" + "in very poor performance." + % self.metric + ) + elif self.metric not in VALID_METRICS[alg_check] and not isinstance( + self.metric, DistanceMetric + ): + raise ValueError( + "Metric '%s' not valid. Use " + "sorted(sklearn.neighbors.VALID_METRICS['%s']) " + "to get valid options. " + "Metric can also be a callable function." % (self.metric, alg_check) + ) + + if self.metric_params is not None and "p" in self.metric_params: + if self.p is not None: + warnings.warn( + ( + "Parameter p is found in metric_params. " + "The corresponding parameter from __init__ " + "is ignored." + ), + SyntaxWarning, + stacklevel=3, + ) + + def _fit(self, X, y=None): + if self._get_tags()["requires_y"]: + if not isinstance(X, (KDTree, BallTree, NeighborsBase)): + X, y = self._validate_data( + X, y, accept_sparse="csr", multi_output=True, order="C" + ) + + if is_classifier(self): + # Classification targets require a specific format + if y.ndim == 1 or y.ndim == 2 and y.shape[1] == 1: + if y.ndim != 1: + warnings.warn( + ( + "A column-vector y was passed when a " + "1d array was expected. Please change " + "the shape of y to (n_samples,), for " + "example using ravel()." + ), + DataConversionWarning, + stacklevel=2, + ) + + self.outputs_2d_ = False + y = y.reshape((-1, 1)) + else: + self.outputs_2d_ = True + + check_classification_targets(y) + self.classes_ = [] + # Using `dtype=np.intp` is necessary since `np.bincount` + # (called in _classification.py) fails when dealing + # with a float64 array on 32bit systems. + self._y = np.empty(y.shape, dtype=np.intp) + for k in range(self._y.shape[1]): + classes, self._y[:, k] = np.unique(y[:, k], return_inverse=True) + self.classes_.append(classes) + + if not self.outputs_2d_: + self.classes_ = self.classes_[0] + self._y = self._y.ravel() + else: + self._y = y + + else: + if not isinstance(X, (KDTree, BallTree, NeighborsBase)): + X = self._validate_data(X, accept_sparse="csr", order="C") + + self._check_algorithm_metric() + if self.metric_params is None: + self.effective_metric_params_ = {} + else: + self.effective_metric_params_ = self.metric_params.copy() + + effective_p = self.effective_metric_params_.get("p", self.p) + if self.metric == "minkowski": + self.effective_metric_params_["p"] = effective_p + + self.effective_metric_ = self.metric + # For minkowski distance, use more efficient methods where available + if self.metric == "minkowski": + p = self.effective_metric_params_.pop("p", 2) + w = self.effective_metric_params_.pop("w", None) + + if p == 1 and w is None: + self.effective_metric_ = "manhattan" + elif p == 2 and w is None: + self.effective_metric_ = "euclidean" + elif p == np.inf and w is None: + self.effective_metric_ = "chebyshev" + else: + # Use the generic minkowski metric, possibly weighted. + self.effective_metric_params_["p"] = p + self.effective_metric_params_["w"] = w + + if isinstance(X, NeighborsBase): + self._fit_X = X._fit_X + self._tree = X._tree + self._fit_method = X._fit_method + self.n_samples_fit_ = X.n_samples_fit_ + return self + + elif isinstance(X, BallTree): + self._fit_X = X.data + self._tree = X + self._fit_method = "ball_tree" + self.n_samples_fit_ = X.data.shape[0] + return self + + elif isinstance(X, KDTree): + self._fit_X = X.data + self._tree = X + self._fit_method = "kd_tree" + self.n_samples_fit_ = X.data.shape[0] + return self + + if self.metric == "precomputed": + X = _check_precomputed(X) + # Precomputed matrix X must be squared + if X.shape[0] != X.shape[1]: + raise ValueError( + "Precomputed matrix must be square." + " Input is a {}x{} matrix.".format(X.shape[0], X.shape[1]) + ) + self.n_features_in_ = X.shape[1] + + n_samples = X.shape[0] + if n_samples == 0: + raise ValueError("n_samples must be greater than 0") + + if issparse(X): + if self.algorithm not in ("auto", "brute"): + warnings.warn("cannot use tree with sparse input: using brute force") + + if ( + self.effective_metric_ not in VALID_METRICS_SPARSE["brute"] + and not callable(self.effective_metric_) + and not isinstance(self.effective_metric_, DistanceMetric) + ): + raise ValueError( + "Metric '%s' not valid for sparse input. " + "Use sorted(sklearn.neighbors." + "VALID_METRICS_SPARSE['brute']) " + "to get valid options. " + "Metric can also be a callable function." % (self.effective_metric_) + ) + self._fit_X = X.copy() + self._tree = None + self._fit_method = "brute" + self.n_samples_fit_ = X.shape[0] + return self + + self._fit_method = self.algorithm + self._fit_X = X + self.n_samples_fit_ = X.shape[0] + + if self._fit_method == "auto": + # A tree approach is better for small number of neighbors or small + # number of features, with KDTree generally faster when available + if ( + self.metric == "precomputed" + or self._fit_X.shape[1] > 15 + or ( + self.n_neighbors is not None + and self.n_neighbors >= self._fit_X.shape[0] // 2 + ) + ): + self._fit_method = "brute" + else: + if ( + self.effective_metric_ == "minkowski" + and self.effective_metric_params_["p"] < 1 + ): + self._fit_method = "brute" + elif ( + self.effective_metric_ == "minkowski" + and self.effective_metric_params_.get("w") is not None + ): + # 'minkowski' with weights is not supported by KDTree but is + # supported byBallTree. + self._fit_method = "ball_tree" + elif self.effective_metric_ in VALID_METRICS["kd_tree"]: + self._fit_method = "kd_tree" + elif ( + callable(self.effective_metric_) + or self.effective_metric_ in VALID_METRICS["ball_tree"] + ): + self._fit_method = "ball_tree" + else: + self._fit_method = "brute" + + if ( + self.effective_metric_ == "minkowski" + and self.effective_metric_params_["p"] < 1 + ): + # For 0 < p < 1 Minkowski distances aren't valid distance + # metric as they do not satisfy triangular inequality: + # they are semi-metrics. + # algorithm="kd_tree" and algorithm="ball_tree" can't be used because + # KDTree and BallTree require a proper distance metric to work properly. + # However, the brute-force algorithm supports semi-metrics. + if self._fit_method == "brute": + warnings.warn( + "Mind that for 0 < p < 1, Minkowski metrics are not distance" + " metrics. Continuing the execution with `algorithm='brute'`." + ) + else: # self._fit_method in ("kd_tree", "ball_tree") + raise ValueError( + f'algorithm="{self._fit_method}" does not support 0 < p < 1 for ' + "the Minkowski metric. To resolve this problem either " + 'set p >= 1 or algorithm="brute".' + ) + + if self._fit_method == "ball_tree": + self._tree = BallTree( + X, + self.leaf_size, + metric=self.effective_metric_, + **self.effective_metric_params_, + ) + elif self._fit_method == "kd_tree": + if ( + self.effective_metric_ == "minkowski" + and self.effective_metric_params_.get("w") is not None + ): + raise ValueError( + "algorithm='kd_tree' is not valid for " + "metric='minkowski' with a weight parameter 'w': " + "try algorithm='ball_tree' " + "or algorithm='brute' instead." + ) + self._tree = KDTree( + X, + self.leaf_size, + metric=self.effective_metric_, + **self.effective_metric_params_, + ) + elif self._fit_method == "brute": + self._tree = None + + return self + + def _more_tags(self): + # For cross-validation routines to split data correctly + return {"pairwise": self.metric == "precomputed"} + + +def _tree_query_parallel_helper(tree, *args, **kwargs): + """Helper for the Parallel calls in KNeighborsMixin.kneighbors. + + The Cython method tree.query is not directly picklable by cloudpickle + under PyPy. + """ + return tree.query(*args, **kwargs) + + +class KNeighborsMixin: + """Mixin for k-neighbors searches.""" + + def _kneighbors_reduce_func(self, dist, start, n_neighbors, return_distance): + """Reduce a chunk of distances to the nearest neighbors. + + Callback to :func:`sklearn.metrics.pairwise.pairwise_distances_chunked` + + Parameters + ---------- + dist : ndarray of shape (n_samples_chunk, n_samples) + The distance matrix. + + start : int + The index in X which the first row of dist corresponds to. + + n_neighbors : int + Number of neighbors required for each sample. + + return_distance : bool + Whether or not to return the distances. + + Returns + ------- + dist : array of shape (n_samples_chunk, n_neighbors) + Returned only if `return_distance=True`. + + neigh : array of shape (n_samples_chunk, n_neighbors) + The neighbors indices. + """ + sample_range = np.arange(dist.shape[0])[:, None] + neigh_ind = np.argpartition(dist, n_neighbors - 1, axis=1) + neigh_ind = neigh_ind[:, :n_neighbors] + # argpartition doesn't guarantee sorted order, so we sort again + neigh_ind = neigh_ind[sample_range, np.argsort(dist[sample_range, neigh_ind])] + if return_distance: + if self.effective_metric_ == "euclidean": + result = np.sqrt(dist[sample_range, neigh_ind]), neigh_ind + else: + result = dist[sample_range, neigh_ind], neigh_ind + else: + result = neigh_ind + return result + + def kneighbors(self, X=None, n_neighbors=None, return_distance=True): + """Find the K-neighbors of a point. + + Returns indices of and distances to the neighbors of each point. + + Parameters + ---------- + X : {array-like, sparse matrix}, shape (n_queries, n_features), \ + or (n_queries, n_indexed) if metric == 'precomputed', default=None + The query point or points. + If not provided, neighbors of each indexed point are returned. + In this case, the query point is not considered its own neighbor. + + n_neighbors : int, default=None + Number of neighbors required for each sample. The default is the + value passed to the constructor. + + return_distance : bool, default=True + Whether or not to return the distances. + + Returns + ------- + neigh_dist : ndarray of shape (n_queries, n_neighbors) + Array representing the lengths to points, only present if + return_distance=True. + + neigh_ind : ndarray of shape (n_queries, n_neighbors) + Indices of the nearest points in the population matrix. + + Examples + -------- + In the following example, we construct a NearestNeighbors + class from an array representing our data set and ask who's + the closest point to [1,1,1] + + >>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]] + >>> from sklearn.neighbors import NearestNeighbors + >>> neigh = NearestNeighbors(n_neighbors=1) + >>> neigh.fit(samples) + NearestNeighbors(n_neighbors=1) + >>> print(neigh.kneighbors([[1., 1., 1.]])) + (array([[0.5]]), array([[2]])) + + As you can see, it returns [[0.5]], and [[2]], which means that the + element is at distance 0.5 and is the third element of samples + (indexes start at 0). You can also query for multiple points: + + >>> X = [[0., 1., 0.], [1., 0., 1.]] + >>> neigh.kneighbors(X, return_distance=False) + array([[1], + [2]]...) + """ + check_is_fitted(self) + + if n_neighbors is None: + n_neighbors = self.n_neighbors + elif n_neighbors <= 0: + raise ValueError("Expected n_neighbors > 0. Got %d" % n_neighbors) + elif not isinstance(n_neighbors, numbers.Integral): + raise TypeError( + "n_neighbors does not take %s value, enter integer value" + % type(n_neighbors) + ) + + query_is_train = X is None + if query_is_train: + X = self._fit_X + # Include an extra neighbor to account for the sample itself being + # returned, which is removed later + n_neighbors += 1 + else: + if self.metric == "precomputed": + X = _check_precomputed(X) + else: + X = self._validate_data(X, accept_sparse="csr", reset=False, order="C") + + n_samples_fit = self.n_samples_fit_ + if n_neighbors > n_samples_fit: + if query_is_train: + n_neighbors -= 1 # ok to modify inplace because an error is raised + inequality_str = "n_neighbors < n_samples_fit" + else: + inequality_str = "n_neighbors <= n_samples_fit" + raise ValueError( + f"Expected {inequality_str}, but " + f"n_neighbors = {n_neighbors}, n_samples_fit = {n_samples_fit}, " + f"n_samples = {X.shape[0]}" # include n_samples for common tests + ) + + n_jobs = effective_n_jobs(self.n_jobs) + chunked_results = None + use_pairwise_distances_reductions = ( + self._fit_method == "brute" + and ArgKmin.is_usable_for( + X if X is not None else self._fit_X, self._fit_X, self.effective_metric_ + ) + ) + if use_pairwise_distances_reductions: + results = ArgKmin.compute( + X=X, + Y=self._fit_X, + k=n_neighbors, + metric=self.effective_metric_, + metric_kwargs=self.effective_metric_params_, + strategy="auto", + return_distance=return_distance, + ) + + elif ( + self._fit_method == "brute" and self.metric == "precomputed" and issparse(X) + ): + results = _kneighbors_from_graph( + X, n_neighbors=n_neighbors, return_distance=return_distance + ) + + elif self._fit_method == "brute": + # Joblib-based backend, which is used when user-defined callable + # are passed for metric. + + # This won't be used in the future once PairwiseDistancesReductions + # support: + # - DistanceMetrics which work on supposedly binary data + # - CSR-dense and dense-CSR case if 'euclidean' in metric. + reduce_func = partial( + self._kneighbors_reduce_func, + n_neighbors=n_neighbors, + return_distance=return_distance, + ) + + # for efficiency, use squared euclidean distances + if self.effective_metric_ == "euclidean": + kwds = {"squared": True} + else: + kwds = self.effective_metric_params_ + + chunked_results = list( + pairwise_distances_chunked( + X, + self._fit_X, + reduce_func=reduce_func, + metric=self.effective_metric_, + n_jobs=n_jobs, + **kwds, + ) + ) + + elif self._fit_method in ["ball_tree", "kd_tree"]: + if issparse(X): + raise ValueError( + "%s does not work with sparse matrices. Densify the data, " + "or set algorithm='brute'" + % self._fit_method + ) + chunked_results = Parallel(n_jobs, prefer="threads")( + delayed(_tree_query_parallel_helper)( + self._tree, X[s], n_neighbors, return_distance + ) + for s in gen_even_slices(X.shape[0], n_jobs) + ) + else: + raise ValueError("internal: _fit_method not recognized") + + if chunked_results is not None: + if return_distance: + neigh_dist, neigh_ind = zip(*chunked_results) + results = np.vstack(neigh_dist), np.vstack(neigh_ind) + else: + results = np.vstack(chunked_results) + + if not query_is_train: + return results + else: + # If the query data is the same as the indexed data, we would like + # to ignore the first nearest neighbor of every sample, i.e + # the sample itself. + if return_distance: + neigh_dist, neigh_ind = results + else: + neigh_ind = results + + n_queries, _ = X.shape + sample_range = np.arange(n_queries)[:, None] + sample_mask = neigh_ind != sample_range + + # Corner case: When the number of duplicates are more + # than the number of neighbors, the first NN will not + # be the sample, but a duplicate. + # In that case mask the first duplicate. + dup_gr_nbrs = np.all(sample_mask, axis=1) + sample_mask[:, 0][dup_gr_nbrs] = False + neigh_ind = np.reshape(neigh_ind[sample_mask], (n_queries, n_neighbors - 1)) + + if return_distance: + neigh_dist = np.reshape( + neigh_dist[sample_mask], (n_queries, n_neighbors - 1) + ) + return neigh_dist, neigh_ind + return neigh_ind + + def kneighbors_graph(self, X=None, n_neighbors=None, mode="connectivity"): + """Compute the (weighted) graph of k-Neighbors for points in X. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_queries, n_features), \ + or (n_queries, n_indexed) if metric == 'precomputed', default=None + The query point or points. + If not provided, neighbors of each indexed point are returned. + In this case, the query point is not considered its own neighbor. + For ``metric='precomputed'`` the shape should be + (n_queries, n_indexed). Otherwise the shape should be + (n_queries, n_features). + + n_neighbors : int, default=None + Number of neighbors for each sample. The default is the value + passed to the constructor. + + mode : {'connectivity', 'distance'}, default='connectivity' + Type of returned matrix: 'connectivity' will return the + connectivity matrix with ones and zeros, in 'distance' the + edges are distances between points, type of distance + depends on the selected metric parameter in + NearestNeighbors class. + + Returns + ------- + A : sparse-matrix of shape (n_queries, n_samples_fit) + `n_samples_fit` is the number of samples in the fitted data. + `A[i, j]` gives the weight of the edge connecting `i` to `j`. + The matrix is of CSR format. + + See Also + -------- + NearestNeighbors.radius_neighbors_graph : Compute the (weighted) graph + of Neighbors for points in X. + + Examples + -------- + >>> X = [[0], [3], [1]] + >>> from sklearn.neighbors import NearestNeighbors + >>> neigh = NearestNeighbors(n_neighbors=2) + >>> neigh.fit(X) + NearestNeighbors(n_neighbors=2) + >>> A = neigh.kneighbors_graph(X) + >>> A.toarray() + array([[1., 0., 1.], + [0., 1., 1.], + [1., 0., 1.]]) + """ + check_is_fitted(self) + if n_neighbors is None: + n_neighbors = self.n_neighbors + + # check the input only in self.kneighbors + + # construct CSR matrix representation of the k-NN graph + if mode == "connectivity": + A_ind = self.kneighbors(X, n_neighbors, return_distance=False) + n_queries = A_ind.shape[0] + A_data = np.ones(n_queries * n_neighbors) + + elif mode == "distance": + A_data, A_ind = self.kneighbors(X, n_neighbors, return_distance=True) + A_data = np.ravel(A_data) + + else: + raise ValueError( + 'Unsupported mode, must be one of "connectivity", ' + f'or "distance" but got "{mode}" instead' + ) + + n_queries = A_ind.shape[0] + n_samples_fit = self.n_samples_fit_ + n_nonzero = n_queries * n_neighbors + A_indptr = np.arange(0, n_nonzero + 1, n_neighbors) + + kneighbors_graph = csr_matrix( + (A_data, A_ind.ravel(), A_indptr), shape=(n_queries, n_samples_fit) + ) + + return kneighbors_graph + + +def _tree_query_radius_parallel_helper(tree, *args, **kwargs): + """Helper for the Parallel calls in RadiusNeighborsMixin.radius_neighbors. + + The Cython method tree.query_radius is not directly picklable by + cloudpickle under PyPy. + """ + return tree.query_radius(*args, **kwargs) + + +class RadiusNeighborsMixin: + """Mixin for radius-based neighbors searches.""" + + def _radius_neighbors_reduce_func(self, dist, start, radius, return_distance): + """Reduce a chunk of distances to the nearest neighbors. + + Callback to :func:`sklearn.metrics.pairwise.pairwise_distances_chunked` + + Parameters + ---------- + dist : ndarray of shape (n_samples_chunk, n_samples) + The distance matrix. + + start : int + The index in X which the first row of dist corresponds to. + + radius : float + The radius considered when making the nearest neighbors search. + + return_distance : bool + Whether or not to return the distances. + + Returns + ------- + dist : list of ndarray of shape (n_samples_chunk,) + Returned only if `return_distance=True`. + + neigh : list of ndarray of shape (n_samples_chunk,) + The neighbors indices. + """ + neigh_ind = [np.where(d <= radius)[0] for d in dist] + + if return_distance: + if self.effective_metric_ == "euclidean": + dist = [np.sqrt(d[neigh_ind[i]]) for i, d in enumerate(dist)] + else: + dist = [d[neigh_ind[i]] for i, d in enumerate(dist)] + results = dist, neigh_ind + else: + results = neigh_ind + return results + + def radius_neighbors( + self, X=None, radius=None, return_distance=True, sort_results=False + ): + """Find the neighbors within a given radius of a point or points. + + Return the indices and distances of each point from the dataset + lying in a ball with size ``radius`` around the points of the query + array. Points lying on the boundary are included in the results. + + The result points are *not* necessarily sorted by distance to their + query point. + + Parameters + ---------- + X : {array-like, sparse matrix} of (n_samples, n_features), default=None + The query point or points. + If not provided, neighbors of each indexed point are returned. + In this case, the query point is not considered its own neighbor. + + radius : float, default=None + Limiting distance of neighbors to return. The default is the value + passed to the constructor. + + return_distance : bool, default=True + Whether or not to return the distances. + + sort_results : bool, default=False + If True, the distances and indices will be sorted by increasing + distances before being returned. If False, the results may not + be sorted. If `return_distance=False`, setting `sort_results=True` + will result in an error. + + .. versionadded:: 0.22 + + Returns + ------- + neigh_dist : ndarray of shape (n_samples,) of arrays + Array representing the distances to each point, only present if + `return_distance=True`. The distance values are computed according + to the ``metric`` constructor parameter. + + neigh_ind : ndarray of shape (n_samples,) of arrays + An array of arrays of indices of the approximate nearest points + from the population matrix that lie within a ball of size + ``radius`` around the query points. + + Notes + ----- + Because the number of neighbors of each point is not necessarily + equal, the results for multiple query points cannot be fit in a + standard data array. + For efficiency, `radius_neighbors` returns arrays of objects, where + each object is a 1D array of indices or distances. + + Examples + -------- + In the following example, we construct a NeighborsClassifier + class from an array representing our data set and ask who's + the closest point to [1, 1, 1]: + + >>> import numpy as np + >>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]] + >>> from sklearn.neighbors import NearestNeighbors + >>> neigh = NearestNeighbors(radius=1.6) + >>> neigh.fit(samples) + NearestNeighbors(radius=1.6) + >>> rng = neigh.radius_neighbors([[1., 1., 1.]]) + >>> print(np.asarray(rng[0][0])) + [1.5 0.5] + >>> print(np.asarray(rng[1][0])) + [1 2] + + The first array returned contains the distances to all points which + are closer than 1.6, while the second array returned contains their + indices. In general, multiple points can be queried at the same time. + """ + check_is_fitted(self) + + if sort_results and not return_distance: + raise ValueError("return_distance must be True if sort_results is True.") + + query_is_train = X is None + if query_is_train: + X = self._fit_X + else: + if self.metric == "precomputed": + X = _check_precomputed(X) + else: + X = self._validate_data(X, accept_sparse="csr", reset=False, order="C") + + if radius is None: + radius = self.radius + + use_pairwise_distances_reductions = ( + self._fit_method == "brute" + and RadiusNeighbors.is_usable_for( + X if X is not None else self._fit_X, self._fit_X, self.effective_metric_ + ) + ) + + if use_pairwise_distances_reductions: + results = RadiusNeighbors.compute( + X=X, + Y=self._fit_X, + radius=radius, + metric=self.effective_metric_, + metric_kwargs=self.effective_metric_params_, + strategy="auto", + return_distance=return_distance, + sort_results=sort_results, + ) + + elif ( + self._fit_method == "brute" and self.metric == "precomputed" and issparse(X) + ): + results = _radius_neighbors_from_graph( + X, radius=radius, return_distance=return_distance + ) + + elif self._fit_method == "brute": + # Joblib-based backend, which is used when user-defined callable + # are passed for metric. + + # This won't be used in the future once PairwiseDistancesReductions + # support: + # - DistanceMetrics which work on supposedly binary data + # - CSR-dense and dense-CSR case if 'euclidean' in metric. + + # for efficiency, use squared euclidean distances + if self.effective_metric_ == "euclidean": + radius *= radius + kwds = {"squared": True} + else: + kwds = self.effective_metric_params_ + + reduce_func = partial( + self._radius_neighbors_reduce_func, + radius=radius, + return_distance=return_distance, + ) + + chunked_results = pairwise_distances_chunked( + X, + self._fit_X, + reduce_func=reduce_func, + metric=self.effective_metric_, + n_jobs=self.n_jobs, + **kwds, + ) + if return_distance: + neigh_dist_chunks, neigh_ind_chunks = zip(*chunked_results) + neigh_dist_list = sum(neigh_dist_chunks, []) + neigh_ind_list = sum(neigh_ind_chunks, []) + neigh_dist = _to_object_array(neigh_dist_list) + neigh_ind = _to_object_array(neigh_ind_list) + results = neigh_dist, neigh_ind + else: + neigh_ind_list = sum(chunked_results, []) + results = _to_object_array(neigh_ind_list) + + if sort_results: + for ii in range(len(neigh_dist)): + order = np.argsort(neigh_dist[ii], kind="mergesort") + neigh_ind[ii] = neigh_ind[ii][order] + neigh_dist[ii] = neigh_dist[ii][order] + results = neigh_dist, neigh_ind + + elif self._fit_method in ["ball_tree", "kd_tree"]: + if issparse(X): + raise ValueError( + "%s does not work with sparse matrices. Densify the data, " + "or set algorithm='brute'" + % self._fit_method + ) + + n_jobs = effective_n_jobs(self.n_jobs) + delayed_query = delayed(_tree_query_radius_parallel_helper) + chunked_results = Parallel(n_jobs, prefer="threads")( + delayed_query( + self._tree, X[s], radius, return_distance, sort_results=sort_results + ) + for s in gen_even_slices(X.shape[0], n_jobs) + ) + if return_distance: + neigh_ind, neigh_dist = tuple(zip(*chunked_results)) + results = np.hstack(neigh_dist), np.hstack(neigh_ind) + else: + results = np.hstack(chunked_results) + else: + raise ValueError("internal: _fit_method not recognized") + + if not query_is_train: + return results + else: + # If the query data is the same as the indexed data, we would like + # to ignore the first nearest neighbor of every sample, i.e + # the sample itself. + if return_distance: + neigh_dist, neigh_ind = results + else: + neigh_ind = results + + for ind, ind_neighbor in enumerate(neigh_ind): + mask = ind_neighbor != ind + + neigh_ind[ind] = ind_neighbor[mask] + if return_distance: + neigh_dist[ind] = neigh_dist[ind][mask] + + if return_distance: + return neigh_dist, neigh_ind + return neigh_ind + + def radius_neighbors_graph( + self, X=None, radius=None, mode="connectivity", sort_results=False + ): + """Compute the (weighted) graph of Neighbors for points in X. + + Neighborhoods are restricted the points at a distance lower than + radius. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features), default=None + The query point or points. + If not provided, neighbors of each indexed point are returned. + In this case, the query point is not considered its own neighbor. + + radius : float, default=None + Radius of neighborhoods. The default is the value passed to the + constructor. + + mode : {'connectivity', 'distance'}, default='connectivity' + Type of returned matrix: 'connectivity' will return the + connectivity matrix with ones and zeros, in 'distance' the + edges are distances between points, type of distance + depends on the selected metric parameter in + NearestNeighbors class. + + sort_results : bool, default=False + If True, in each row of the result, the non-zero entries will be + sorted by increasing distances. If False, the non-zero entries may + not be sorted. Only used with mode='distance'. + + .. versionadded:: 0.22 + + Returns + ------- + A : sparse-matrix of shape (n_queries, n_samples_fit) + `n_samples_fit` is the number of samples in the fitted data. + `A[i, j]` gives the weight of the edge connecting `i` to `j`. + The matrix is of CSR format. + + See Also + -------- + kneighbors_graph : Compute the (weighted) graph of k-Neighbors for + points in X. + + Examples + -------- + >>> X = [[0], [3], [1]] + >>> from sklearn.neighbors import NearestNeighbors + >>> neigh = NearestNeighbors(radius=1.5) + >>> neigh.fit(X) + NearestNeighbors(radius=1.5) + >>> A = neigh.radius_neighbors_graph(X) + >>> A.toarray() + array([[1., 0., 1.], + [0., 1., 0.], + [1., 0., 1.]]) + """ + check_is_fitted(self) + + # check the input only in self.radius_neighbors + + if radius is None: + radius = self.radius + + # construct CSR matrix representation of the NN graph + if mode == "connectivity": + A_ind = self.radius_neighbors(X, radius, return_distance=False) + A_data = None + elif mode == "distance": + dist, A_ind = self.radius_neighbors( + X, radius, return_distance=True, sort_results=sort_results + ) + A_data = np.concatenate(list(dist)) + else: + raise ValueError( + 'Unsupported mode, must be one of "connectivity", ' + f'or "distance" but got "{mode}" instead' + ) + + n_queries = A_ind.shape[0] + n_samples_fit = self.n_samples_fit_ + n_neighbors = np.array([len(a) for a in A_ind]) + A_ind = np.concatenate(list(A_ind)) + if A_data is None: + A_data = np.ones(len(A_ind)) + A_indptr = np.concatenate((np.zeros(1, dtype=int), np.cumsum(n_neighbors))) + + return csr_matrix((A_data, A_ind, A_indptr), shape=(n_queries, n_samples_fit)) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/_classification.py b/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/_classification.py new file mode 100644 index 0000000000000000000000000000000000000000..26ffa273d0a60b68f51eb4edff0e83e188cb6d5c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/_classification.py @@ -0,0 +1,839 @@ +"""Nearest Neighbor Classification""" + +# Authors: Jake Vanderplas +# Fabian Pedregosa +# Alexandre Gramfort +# Sparseness support by Lars Buitinck +# Multi-output support by Arnaud Joly +# +# License: BSD 3 clause (C) INRIA, University of Amsterdam +import warnings +from numbers import Integral + +import numpy as np + +from sklearn.neighbors._base import _check_precomputed + +from ..base import ClassifierMixin, _fit_context +from ..metrics._pairwise_distances_reduction import ( + ArgKminClassMode, + RadiusNeighborsClassMode, +) +from ..utils._param_validation import StrOptions +from ..utils.arrayfuncs import _all_with_any_reduction_axis_1 +from ..utils.extmath import weighted_mode +from ..utils.fixes import _mode +from ..utils.validation import _is_arraylike, _num_samples, check_is_fitted +from ._base import KNeighborsMixin, NeighborsBase, RadiusNeighborsMixin, _get_weights + + +def _adjusted_metric(metric, metric_kwargs, p=None): + metric_kwargs = metric_kwargs or {} + if metric == "minkowski": + metric_kwargs["p"] = p + if p == 2: + metric = "euclidean" + return metric, metric_kwargs + + +class KNeighborsClassifier(KNeighborsMixin, ClassifierMixin, NeighborsBase): + """Classifier implementing the k-nearest neighbors vote. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_neighbors : int, default=5 + Number of neighbors to use by default for :meth:`kneighbors` queries. + + weights : {'uniform', 'distance'}, callable or None, default='uniform' + Weight function used in prediction. Possible values: + + - 'uniform' : uniform weights. All points in each neighborhood + are weighted equally. + - 'distance' : weight points by the inverse of their distance. + in this case, closer neighbors of a query point will have a + greater influence than neighbors which are further away. + - [callable] : a user-defined function which accepts an + array of distances, and returns an array of the same shape + containing the weights. + + Refer to the example entitled + :ref:`sphx_glr_auto_examples_neighbors_plot_classification.py` + showing the impact of the `weights` parameter on the decision + boundary. + + algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto' + Algorithm used to compute the nearest neighbors: + + - 'ball_tree' will use :class:`BallTree` + - 'kd_tree' will use :class:`KDTree` + - 'brute' will use a brute-force search. + - 'auto' will attempt to decide the most appropriate algorithm + based on the values passed to :meth:`fit` method. + + Note: fitting on sparse input will override the setting of + this parameter, using brute force. + + leaf_size : int, default=30 + Leaf size passed to BallTree or KDTree. This can affect the + speed of the construction and query, as well as the memory + required to store the tree. The optimal value depends on the + nature of the problem. + + p : float, default=2 + Power parameter for the Minkowski metric. When p = 1, this is equivalent + to using manhattan_distance (l1), and euclidean_distance (l2) for p = 2. + For arbitrary p, minkowski_distance (l_p) is used. This parameter is expected + to be positive. + + metric : str or callable, default='minkowski' + Metric to use for distance computation. Default is "minkowski", which + results in the standard Euclidean distance when p = 2. See the + documentation of `scipy.spatial.distance + `_ and + the metrics listed in + :class:`~sklearn.metrics.pairwise.distance_metrics` for valid metric + values. + + If metric is "precomputed", X is assumed to be a distance matrix and + must be square during fit. X may be a :term:`sparse graph`, in which + case only "nonzero" elements may be considered neighbors. + + If metric is a callable function, it takes two arrays representing 1D + vectors as inputs and must return one value indicating the distance + between those vectors. This works for Scipy's metrics, but is less + efficient than passing the metric name as a string. + + metric_params : dict, default=None + Additional keyword arguments for the metric function. + + n_jobs : int, default=None + The number of parallel jobs to run for neighbors search. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + Doesn't affect :meth:`fit` method. + + Attributes + ---------- + classes_ : array of shape (n_classes,) + Class labels known to the classifier + + effective_metric_ : str or callble + The distance metric used. It will be same as the `metric` parameter + or a synonym of it, e.g. 'euclidean' if the `metric` parameter set to + 'minkowski' and `p` parameter set to 2. + + effective_metric_params_ : dict + Additional keyword arguments for the metric function. For most metrics + will be same with `metric_params` parameter, but may also contain the + `p` parameter value if the `effective_metric_` attribute is set to + 'minkowski'. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + n_samples_fit_ : int + Number of samples in the fitted data. + + outputs_2d_ : bool + False when `y`'s shape is (n_samples, ) or (n_samples, 1) during fit + otherwise True. + + See Also + -------- + RadiusNeighborsClassifier: Classifier based on neighbors within a fixed radius. + KNeighborsRegressor: Regression based on k-nearest neighbors. + RadiusNeighborsRegressor: Regression based on neighbors within a fixed radius. + NearestNeighbors: Unsupervised learner for implementing neighbor searches. + + Notes + ----- + See :ref:`Nearest Neighbors ` in the online documentation + for a discussion of the choice of ``algorithm`` and ``leaf_size``. + + .. warning:: + + Regarding the Nearest Neighbors algorithms, if it is found that two + neighbors, neighbor `k+1` and `k`, have identical distances + but different labels, the results will depend on the ordering of the + training data. + + https://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm + + Examples + -------- + >>> X = [[0], [1], [2], [3]] + >>> y = [0, 0, 1, 1] + >>> from sklearn.neighbors import KNeighborsClassifier + >>> neigh = KNeighborsClassifier(n_neighbors=3) + >>> neigh.fit(X, y) + KNeighborsClassifier(...) + >>> print(neigh.predict([[1.1]])) + [0] + >>> print(neigh.predict_proba([[0.9]])) + [[0.666... 0.333...]] + """ + + _parameter_constraints: dict = {**NeighborsBase._parameter_constraints} + _parameter_constraints.pop("radius") + _parameter_constraints.update( + {"weights": [StrOptions({"uniform", "distance"}), callable, None]} + ) + + def __init__( + self, + n_neighbors=5, + *, + weights="uniform", + algorithm="auto", + leaf_size=30, + p=2, + metric="minkowski", + metric_params=None, + n_jobs=None, + ): + super().__init__( + n_neighbors=n_neighbors, + algorithm=algorithm, + leaf_size=leaf_size, + metric=metric, + p=p, + metric_params=metric_params, + n_jobs=n_jobs, + ) + self.weights = weights + + @_fit_context( + # KNeighborsClassifier.metric is not validated yet + prefer_skip_nested_validation=False + ) + def fit(self, X, y): + """Fit the k-nearest neighbors classifier from the training dataset. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) or \ + (n_samples, n_samples) if metric='precomputed' + Training data. + + y : {array-like, sparse matrix} of shape (n_samples,) or \ + (n_samples, n_outputs) + Target values. + + Returns + ------- + self : KNeighborsClassifier + The fitted k-nearest neighbors classifier. + """ + return self._fit(X, y) + + def predict(self, X): + """Predict the class labels for the provided data. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_queries, n_features), \ + or (n_queries, n_indexed) if metric == 'precomputed' + Test samples. + + Returns + ------- + y : ndarray of shape (n_queries,) or (n_queries, n_outputs) + Class labels for each data sample. + """ + check_is_fitted(self, "_fit_method") + if self.weights == "uniform": + if self._fit_method == "brute" and ArgKminClassMode.is_usable_for( + X, self._fit_X, self.metric + ): + probabilities = self.predict_proba(X) + if self.outputs_2d_: + return np.stack( + [ + self.classes_[idx][np.argmax(probas, axis=1)] + for idx, probas in enumerate(probabilities) + ], + axis=1, + ) + return self.classes_[np.argmax(probabilities, axis=1)] + # In that case, we do not need the distances to perform + # the weighting so we do not compute them. + neigh_ind = self.kneighbors(X, return_distance=False) + neigh_dist = None + else: + neigh_dist, neigh_ind = self.kneighbors(X) + + classes_ = self.classes_ + _y = self._y + if not self.outputs_2d_: + _y = self._y.reshape((-1, 1)) + classes_ = [self.classes_] + + n_outputs = len(classes_) + n_queries = _num_samples(X) + weights = _get_weights(neigh_dist, self.weights) + if weights is not None and _all_with_any_reduction_axis_1(weights, value=0): + raise ValueError( + "All neighbors of some sample is getting zero weights. " + "Please modify 'weights' to avoid this case if you are " + "using a user-defined function." + ) + + y_pred = np.empty((n_queries, n_outputs), dtype=classes_[0].dtype) + for k, classes_k in enumerate(classes_): + if weights is None: + mode, _ = _mode(_y[neigh_ind, k], axis=1) + else: + mode, _ = weighted_mode(_y[neigh_ind, k], weights, axis=1) + + mode = np.asarray(mode.ravel(), dtype=np.intp) + y_pred[:, k] = classes_k.take(mode) + + if not self.outputs_2d_: + y_pred = y_pred.ravel() + + return y_pred + + def predict_proba(self, X): + """Return probability estimates for the test data X. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_queries, n_features), \ + or (n_queries, n_indexed) if metric == 'precomputed' + Test samples. + + Returns + ------- + p : ndarray of shape (n_queries, n_classes), or a list of n_outputs \ + of such arrays if n_outputs > 1. + The class probabilities of the input samples. Classes are ordered + by lexicographic order. + """ + check_is_fitted(self, "_fit_method") + if self.weights == "uniform": + # TODO: systematize this mapping of metric for + # PairwiseDistancesReductions. + metric, metric_kwargs = _adjusted_metric( + metric=self.metric, metric_kwargs=self.metric_params, p=self.p + ) + if ( + self._fit_method == "brute" + and ArgKminClassMode.is_usable_for(X, self._fit_X, metric) + # TODO: Implement efficient multi-output solution + and not self.outputs_2d_ + ): + if self.metric == "precomputed": + X = _check_precomputed(X) + else: + X = self._validate_data( + X, accept_sparse="csr", reset=False, order="C" + ) + + probabilities = ArgKminClassMode.compute( + X, + self._fit_X, + k=self.n_neighbors, + weights=self.weights, + Y_labels=self._y, + unique_Y_labels=self.classes_, + metric=metric, + metric_kwargs=metric_kwargs, + # `strategy="parallel_on_X"` has in practice be shown + # to be more efficient than `strategy="parallel_on_Y`` + # on many combination of datasets. + # Hence, we choose to enforce it here. + # For more information, see: + # https://github.com/scikit-learn/scikit-learn/pull/24076#issuecomment-1445258342 # noqa + # TODO: adapt the heuristic for `strategy="auto"` for + # `ArgKminClassMode` and use `strategy="auto"`. + strategy="parallel_on_X", + ) + return probabilities + + # In that case, we do not need the distances to perform + # the weighting so we do not compute them. + neigh_ind = self.kneighbors(X, return_distance=False) + neigh_dist = None + else: + neigh_dist, neigh_ind = self.kneighbors(X) + + classes_ = self.classes_ + _y = self._y + if not self.outputs_2d_: + _y = self._y.reshape((-1, 1)) + classes_ = [self.classes_] + + n_queries = _num_samples(X) + + weights = _get_weights(neigh_dist, self.weights) + if weights is None: + weights = np.ones_like(neigh_ind) + elif _all_with_any_reduction_axis_1(weights, value=0): + raise ValueError( + "All neighbors of some sample is getting zero weights. " + "Please modify 'weights' to avoid this case if you are " + "using a user-defined function." + ) + + all_rows = np.arange(n_queries) + probabilities = [] + for k, classes_k in enumerate(classes_): + pred_labels = _y[:, k][neigh_ind] + proba_k = np.zeros((n_queries, classes_k.size)) + + # a simple ':' index doesn't work right + for i, idx in enumerate(pred_labels.T): # loop is O(n_neighbors) + proba_k[all_rows, idx] += weights[:, i] + + # normalize 'votes' into real [0,1] probabilities + normalizer = proba_k.sum(axis=1)[:, np.newaxis] + proba_k /= normalizer + + probabilities.append(proba_k) + + if not self.outputs_2d_: + probabilities = probabilities[0] + + return probabilities + + def _more_tags(self): + return {"multilabel": True} + + +class RadiusNeighborsClassifier(RadiusNeighborsMixin, ClassifierMixin, NeighborsBase): + """Classifier implementing a vote among neighbors within a given radius. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + radius : float, default=1.0 + Range of parameter space to use by default for :meth:`radius_neighbors` + queries. + + weights : {'uniform', 'distance'}, callable or None, default='uniform' + Weight function used in prediction. Possible values: + + - 'uniform' : uniform weights. All points in each neighborhood + are weighted equally. + - 'distance' : weight points by the inverse of their distance. + in this case, closer neighbors of a query point will have a + greater influence than neighbors which are further away. + - [callable] : a user-defined function which accepts an + array of distances, and returns an array of the same shape + containing the weights. + + Uniform weights are used by default. + + algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto' + Algorithm used to compute the nearest neighbors: + + - 'ball_tree' will use :class:`BallTree` + - 'kd_tree' will use :class:`KDTree` + - 'brute' will use a brute-force search. + - 'auto' will attempt to decide the most appropriate algorithm + based on the values passed to :meth:`fit` method. + + Note: fitting on sparse input will override the setting of + this parameter, using brute force. + + leaf_size : int, default=30 + Leaf size passed to BallTree or KDTree. This can affect the + speed of the construction and query, as well as the memory + required to store the tree. The optimal value depends on the + nature of the problem. + + p : float, default=2 + Power parameter for the Minkowski metric. When p = 1, this is + equivalent to using manhattan_distance (l1), and euclidean_distance + (l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used. + This parameter is expected to be positive. + + metric : str or callable, default='minkowski' + Metric to use for distance computation. Default is "minkowski", which + results in the standard Euclidean distance when p = 2. See the + documentation of `scipy.spatial.distance + `_ and + the metrics listed in + :class:`~sklearn.metrics.pairwise.distance_metrics` for valid metric + values. + + If metric is "precomputed", X is assumed to be a distance matrix and + must be square during fit. X may be a :term:`sparse graph`, in which + case only "nonzero" elements may be considered neighbors. + + If metric is a callable function, it takes two arrays representing 1D + vectors as inputs and must return one value indicating the distance + between those vectors. This works for Scipy's metrics, but is less + efficient than passing the metric name as a string. + + outlier_label : {manual label, 'most_frequent'}, default=None + Label for outlier samples (samples with no neighbors in given radius). + + - manual label: str or int label (should be the same type as y) + or list of manual labels if multi-output is used. + - 'most_frequent' : assign the most frequent label of y to outliers. + - None : when any outlier is detected, ValueError will be raised. + + The outlier label should be selected from among the unique 'Y' labels. + If it is specified with a different value a warning will be raised and + all class probabilities of outliers will be assigned to be 0. + + metric_params : dict, default=None + Additional keyword arguments for the metric function. + + n_jobs : int, default=None + The number of parallel jobs to run for neighbors search. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + Attributes + ---------- + classes_ : ndarray of shape (n_classes,) + Class labels known to the classifier. + + effective_metric_ : str or callable + The distance metric used. It will be same as the `metric` parameter + or a synonym of it, e.g. 'euclidean' if the `metric` parameter set to + 'minkowski' and `p` parameter set to 2. + + effective_metric_params_ : dict + Additional keyword arguments for the metric function. For most metrics + will be same with `metric_params` parameter, but may also contain the + `p` parameter value if the `effective_metric_` attribute is set to + 'minkowski'. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + n_samples_fit_ : int + Number of samples in the fitted data. + + outlier_label_ : int or array-like of shape (n_class,) + Label which is given for outlier samples (samples with no neighbors + on given radius). + + outputs_2d_ : bool + False when `y`'s shape is (n_samples, ) or (n_samples, 1) during fit + otherwise True. + + See Also + -------- + KNeighborsClassifier : Classifier implementing the k-nearest neighbors + vote. + RadiusNeighborsRegressor : Regression based on neighbors within a + fixed radius. + KNeighborsRegressor : Regression based on k-nearest neighbors. + NearestNeighbors : Unsupervised learner for implementing neighbor + searches. + + Notes + ----- + See :ref:`Nearest Neighbors ` in the online documentation + for a discussion of the choice of ``algorithm`` and ``leaf_size``. + + https://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm + + Examples + -------- + >>> X = [[0], [1], [2], [3]] + >>> y = [0, 0, 1, 1] + >>> from sklearn.neighbors import RadiusNeighborsClassifier + >>> neigh = RadiusNeighborsClassifier(radius=1.0) + >>> neigh.fit(X, y) + RadiusNeighborsClassifier(...) + >>> print(neigh.predict([[1.5]])) + [0] + >>> print(neigh.predict_proba([[1.0]])) + [[0.66666667 0.33333333]] + """ + + _parameter_constraints: dict = { + **NeighborsBase._parameter_constraints, + "weights": [StrOptions({"uniform", "distance"}), callable, None], + "outlier_label": [Integral, str, "array-like", None], + } + _parameter_constraints.pop("n_neighbors") + + def __init__( + self, + radius=1.0, + *, + weights="uniform", + algorithm="auto", + leaf_size=30, + p=2, + metric="minkowski", + outlier_label=None, + metric_params=None, + n_jobs=None, + ): + super().__init__( + radius=radius, + algorithm=algorithm, + leaf_size=leaf_size, + metric=metric, + p=p, + metric_params=metric_params, + n_jobs=n_jobs, + ) + self.weights = weights + self.outlier_label = outlier_label + + @_fit_context( + # RadiusNeighborsClassifier.metric is not validated yet + prefer_skip_nested_validation=False + ) + def fit(self, X, y): + """Fit the radius neighbors classifier from the training dataset. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) or \ + (n_samples, n_samples) if metric='precomputed' + Training data. + + y : {array-like, sparse matrix} of shape (n_samples,) or \ + (n_samples, n_outputs) + Target values. + + Returns + ------- + self : RadiusNeighborsClassifier + The fitted radius neighbors classifier. + """ + self._fit(X, y) + + classes_ = self.classes_ + _y = self._y + if not self.outputs_2d_: + _y = self._y.reshape((-1, 1)) + classes_ = [self.classes_] + + if self.outlier_label is None: + outlier_label_ = None + + elif self.outlier_label == "most_frequent": + outlier_label_ = [] + # iterate over multi-output, get the most frequent label for each + # output. + for k, classes_k in enumerate(classes_): + label_count = np.bincount(_y[:, k]) + outlier_label_.append(classes_k[label_count.argmax()]) + + else: + if _is_arraylike(self.outlier_label) and not isinstance( + self.outlier_label, str + ): + if len(self.outlier_label) != len(classes_): + raise ValueError( + "The length of outlier_label: {} is " + "inconsistent with the output " + "length: {}".format(self.outlier_label, len(classes_)) + ) + outlier_label_ = self.outlier_label + else: + outlier_label_ = [self.outlier_label] * len(classes_) + + for classes, label in zip(classes_, outlier_label_): + if _is_arraylike(label) and not isinstance(label, str): + # ensure the outlier label for each output is a scalar. + raise TypeError( + "The outlier_label of classes {} is " + "supposed to be a scalar, got " + "{}.".format(classes, label) + ) + if np.append(classes, label).dtype != classes.dtype: + # ensure the dtype of outlier label is consistent with y. + raise TypeError( + "The dtype of outlier_label {} is " + "inconsistent with classes {} in " + "y.".format(label, classes) + ) + + self.outlier_label_ = outlier_label_ + + return self + + def predict(self, X): + """Predict the class labels for the provided data. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_queries, n_features), \ + or (n_queries, n_indexed) if metric == 'precomputed' + Test samples. + + Returns + ------- + y : ndarray of shape (n_queries,) or (n_queries, n_outputs) + Class labels for each data sample. + """ + + probs = self.predict_proba(X) + classes_ = self.classes_ + + if not self.outputs_2d_: + probs = [probs] + classes_ = [self.classes_] + + n_outputs = len(classes_) + n_queries = probs[0].shape[0] + y_pred = np.empty((n_queries, n_outputs), dtype=classes_[0].dtype) + + for k, prob in enumerate(probs): + # iterate over multi-output, assign labels based on probabilities + # of each output. + max_prob_index = prob.argmax(axis=1) + y_pred[:, k] = classes_[k].take(max_prob_index) + + outlier_zero_probs = (prob == 0).all(axis=1) + if outlier_zero_probs.any(): + zero_prob_index = np.flatnonzero(outlier_zero_probs) + y_pred[zero_prob_index, k] = self.outlier_label_[k] + + if not self.outputs_2d_: + y_pred = y_pred.ravel() + + return y_pred + + def predict_proba(self, X): + """Return probability estimates for the test data X. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_queries, n_features), \ + or (n_queries, n_indexed) if metric == 'precomputed' + Test samples. + + Returns + ------- + p : ndarray of shape (n_queries, n_classes), or a list of \ + n_outputs of such arrays if n_outputs > 1. + The class probabilities of the input samples. Classes are ordered + by lexicographic order. + """ + check_is_fitted(self, "_fit_method") + n_queries = _num_samples(X) + + metric, metric_kwargs = _adjusted_metric( + metric=self.metric, metric_kwargs=self.metric_params, p=self.p + ) + + if ( + self.weights == "uniform" + and self._fit_method == "brute" + and not self.outputs_2d_ + and RadiusNeighborsClassMode.is_usable_for(X, self._fit_X, metric) + ): + probabilities = RadiusNeighborsClassMode.compute( + X=X, + Y=self._fit_X, + radius=self.radius, + weights=self.weights, + Y_labels=self._y, + unique_Y_labels=self.classes_, + outlier_label=self.outlier_label, + metric=metric, + metric_kwargs=metric_kwargs, + strategy="parallel_on_X", + # `strategy="parallel_on_X"` has in practice be shown + # to be more efficient than `strategy="parallel_on_Y`` + # on many combination of datasets. + # Hence, we choose to enforce it here. + # For more information, see: + # https://github.com/scikit-learn/scikit-learn/pull/26828/files#r1282398471 # noqa + ) + return probabilities + + neigh_dist, neigh_ind = self.radius_neighbors(X) + outlier_mask = np.zeros(n_queries, dtype=bool) + outlier_mask[:] = [len(nind) == 0 for nind in neigh_ind] + outliers = np.flatnonzero(outlier_mask) + inliers = np.flatnonzero(~outlier_mask) + + classes_ = self.classes_ + _y = self._y + if not self.outputs_2d_: + _y = self._y.reshape((-1, 1)) + classes_ = [self.classes_] + + if self.outlier_label_ is None and outliers.size > 0: + raise ValueError( + "No neighbors found for test samples %r, " + "you can try using larger radius, " + "giving a label for outliers, " + "or considering removing them from your dataset." % outliers + ) + + weights = _get_weights(neigh_dist, self.weights) + if weights is not None: + weights = weights[inliers] + + probabilities = [] + # iterate over multi-output, measure probabilities of the k-th output. + for k, classes_k in enumerate(classes_): + pred_labels = np.zeros(len(neigh_ind), dtype=object) + pred_labels[:] = [_y[ind, k] for ind in neigh_ind] + + proba_k = np.zeros((n_queries, classes_k.size)) + proba_inl = np.zeros((len(inliers), classes_k.size)) + + # samples have different size of neighbors within the same radius + if weights is None: + for i, idx in enumerate(pred_labels[inliers]): + proba_inl[i, :] = np.bincount(idx, minlength=classes_k.size) + else: + for i, idx in enumerate(pred_labels[inliers]): + proba_inl[i, :] = np.bincount( + idx, weights[i], minlength=classes_k.size + ) + proba_k[inliers, :] = proba_inl + + if outliers.size > 0: + _outlier_label = self.outlier_label_[k] + label_index = np.flatnonzero(classes_k == _outlier_label) + if label_index.size == 1: + proba_k[outliers, label_index[0]] = 1.0 + else: + warnings.warn( + "Outlier label {} is not in training " + "classes. All class probabilities of " + "outliers will be assigned with 0." + "".format(self.outlier_label_[k]) + ) + + # normalize 'votes' into real [0,1] probabilities + normalizer = proba_k.sum(axis=1)[:, np.newaxis] + normalizer[normalizer == 0.0] = 1.0 + proba_k /= normalizer + + probabilities.append(proba_k) + + if not self.outputs_2d_: + probabilities = probabilities[0] + + return probabilities + + def _more_tags(self): + return {"multilabel": True} diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/_graph.py b/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/_graph.py new file mode 100644 index 0000000000000000000000000000000000000000..2ff27d07514e05d4d6edc2fb5c9a8461ed4defd1 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/_graph.py @@ -0,0 +1,719 @@ +"""Nearest Neighbors graph functions""" + +# Author: Jake Vanderplas +# Tom Dupre la Tour +# +# License: BSD 3 clause (C) INRIA, University of Amsterdam +import itertools + +from ..base import ClassNamePrefixFeaturesOutMixin, TransformerMixin, _fit_context +from ..utils._param_validation import ( + Integral, + Interval, + Real, + StrOptions, + validate_params, +) +from ..utils.validation import check_is_fitted +from ._base import VALID_METRICS, KNeighborsMixin, NeighborsBase, RadiusNeighborsMixin +from ._unsupervised import NearestNeighbors + + +def _check_params(X, metric, p, metric_params): + """Check the validity of the input parameters""" + params = zip(["metric", "p", "metric_params"], [metric, p, metric_params]) + est_params = X.get_params() + for param_name, func_param in params: + if func_param != est_params[param_name]: + raise ValueError( + "Got %s for %s, while the estimator has %s for the same parameter." + % (func_param, param_name, est_params[param_name]) + ) + + +def _query_include_self(X, include_self, mode): + """Return the query based on include_self param""" + if include_self == "auto": + include_self = mode == "connectivity" + + # it does not include each sample as its own neighbors + if not include_self: + X = None + + return X + + +@validate_params( + { + "X": ["array-like", KNeighborsMixin], + "n_neighbors": [Interval(Integral, 1, None, closed="left")], + "mode": [StrOptions({"connectivity", "distance"})], + "metric": [StrOptions(set(itertools.chain(*VALID_METRICS.values()))), callable], + "p": [Interval(Real, 0, None, closed="right"), None], + "metric_params": [dict, None], + "include_self": ["boolean", StrOptions({"auto"})], + "n_jobs": [Integral, None], + }, + prefer_skip_nested_validation=False, # metric is not validated yet +) +def kneighbors_graph( + X, + n_neighbors, + *, + mode="connectivity", + metric="minkowski", + p=2, + metric_params=None, + include_self=False, + n_jobs=None, +): + """Compute the (weighted) graph of k-Neighbors for points in X. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Sample data. + + n_neighbors : int + Number of neighbors for each sample. + + mode : {'connectivity', 'distance'}, default='connectivity' + Type of returned matrix: 'connectivity' will return the connectivity + matrix with ones and zeros, and 'distance' will return the distances + between neighbors according to the given metric. + + metric : str, default='minkowski' + Metric to use for distance computation. Default is "minkowski", which + results in the standard Euclidean distance when p = 2. See the + documentation of `scipy.spatial.distance + `_ and + the metrics listed in + :class:`~sklearn.metrics.pairwise.distance_metrics` for valid metric + values. + + p : float, default=2 + Power parameter for the Minkowski metric. When p = 1, this is equivalent + to using manhattan_distance (l1), and euclidean_distance (l2) for p = 2. + For arbitrary p, minkowski_distance (l_p) is used. This parameter is expected + to be positive. + + metric_params : dict, default=None + Additional keyword arguments for the metric function. + + include_self : bool or 'auto', default=False + Whether or not to mark each sample as the first nearest neighbor to + itself. If 'auto', then True is used for mode='connectivity' and False + for mode='distance'. + + n_jobs : int, default=None + The number of parallel jobs to run for neighbors search. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + Returns + ------- + A : sparse matrix of shape (n_samples, n_samples) + Graph where A[i, j] is assigned the weight of edge that + connects i to j. The matrix is of CSR format. + + See Also + -------- + radius_neighbors_graph: Compute the (weighted) graph of Neighbors for points in X. + + Examples + -------- + >>> X = [[0], [3], [1]] + >>> from sklearn.neighbors import kneighbors_graph + >>> A = kneighbors_graph(X, 2, mode='connectivity', include_self=True) + >>> A.toarray() + array([[1., 0., 1.], + [0., 1., 1.], + [1., 0., 1.]]) + """ + if not isinstance(X, KNeighborsMixin): + X = NearestNeighbors( + n_neighbors=n_neighbors, + metric=metric, + p=p, + metric_params=metric_params, + n_jobs=n_jobs, + ).fit(X) + else: + _check_params(X, metric, p, metric_params) + + query = _query_include_self(X._fit_X, include_self, mode) + return X.kneighbors_graph(X=query, n_neighbors=n_neighbors, mode=mode) + + +@validate_params( + { + "X": ["array-like", RadiusNeighborsMixin], + "radius": [Interval(Real, 0, None, closed="both")], + "mode": [StrOptions({"connectivity", "distance"})], + "metric": [StrOptions(set(itertools.chain(*VALID_METRICS.values()))), callable], + "p": [Interval(Real, 0, None, closed="right"), None], + "metric_params": [dict, None], + "include_self": ["boolean", StrOptions({"auto"})], + "n_jobs": [Integral, None], + }, + prefer_skip_nested_validation=False, # metric is not validated yet +) +def radius_neighbors_graph( + X, + radius, + *, + mode="connectivity", + metric="minkowski", + p=2, + metric_params=None, + include_self=False, + n_jobs=None, +): + """Compute the (weighted) graph of Neighbors for points in X. + + Neighborhoods are restricted the points at a distance lower than + radius. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Sample data. + + radius : float + Radius of neighborhoods. + + mode : {'connectivity', 'distance'}, default='connectivity' + Type of returned matrix: 'connectivity' will return the connectivity + matrix with ones and zeros, and 'distance' will return the distances + between neighbors according to the given metric. + + metric : str, default='minkowski' + Metric to use for distance computation. Default is "minkowski", which + results in the standard Euclidean distance when p = 2. See the + documentation of `scipy.spatial.distance + `_ and + the metrics listed in + :class:`~sklearn.metrics.pairwise.distance_metrics` for valid metric + values. + + p : float, default=2 + Power parameter for the Minkowski metric. When p = 1, this is + equivalent to using manhattan_distance (l1), and euclidean_distance + (l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used. + + metric_params : dict, default=None + Additional keyword arguments for the metric function. + + include_self : bool or 'auto', default=False + Whether or not to mark each sample as the first nearest neighbor to + itself. If 'auto', then True is used for mode='connectivity' and False + for mode='distance'. + + n_jobs : int, default=None + The number of parallel jobs to run for neighbors search. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + Returns + ------- + A : sparse matrix of shape (n_samples, n_samples) + Graph where A[i, j] is assigned the weight of edge that connects + i to j. The matrix is of CSR format. + + See Also + -------- + kneighbors_graph: Compute the weighted graph of k-neighbors for points in X. + + Examples + -------- + >>> X = [[0], [3], [1]] + >>> from sklearn.neighbors import radius_neighbors_graph + >>> A = radius_neighbors_graph(X, 1.5, mode='connectivity', + ... include_self=True) + >>> A.toarray() + array([[1., 0., 1.], + [0., 1., 0.], + [1., 0., 1.]]) + """ + if not isinstance(X, RadiusNeighborsMixin): + X = NearestNeighbors( + radius=radius, + metric=metric, + p=p, + metric_params=metric_params, + n_jobs=n_jobs, + ).fit(X) + else: + _check_params(X, metric, p, metric_params) + + query = _query_include_self(X._fit_X, include_self, mode) + return X.radius_neighbors_graph(query, radius, mode) + + +class KNeighborsTransformer( + ClassNamePrefixFeaturesOutMixin, KNeighborsMixin, TransformerMixin, NeighborsBase +): + """Transform X into a (weighted) graph of k nearest neighbors. + + The transformed data is a sparse graph as returned by kneighbors_graph. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.22 + + Parameters + ---------- + mode : {'distance', 'connectivity'}, default='distance' + Type of returned matrix: 'connectivity' will return the connectivity + matrix with ones and zeros, and 'distance' will return the distances + between neighbors according to the given metric. + + n_neighbors : int, default=5 + Number of neighbors for each sample in the transformed sparse graph. + For compatibility reasons, as each sample is considered as its own + neighbor, one extra neighbor will be computed when mode == 'distance'. + In this case, the sparse graph contains (n_neighbors + 1) neighbors. + + algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto' + Algorithm used to compute the nearest neighbors: + + - 'ball_tree' will use :class:`BallTree` + - 'kd_tree' will use :class:`KDTree` + - 'brute' will use a brute-force search. + - 'auto' will attempt to decide the most appropriate algorithm + based on the values passed to :meth:`fit` method. + + Note: fitting on sparse input will override the setting of + this parameter, using brute force. + + leaf_size : int, default=30 + Leaf size passed to BallTree or KDTree. This can affect the + speed of the construction and query, as well as the memory + required to store the tree. The optimal value depends on the + nature of the problem. + + metric : str or callable, default='minkowski' + Metric to use for distance computation. Default is "minkowski", which + results in the standard Euclidean distance when p = 2. See the + documentation of `scipy.spatial.distance + `_ and + the metrics listed in + :class:`~sklearn.metrics.pairwise.distance_metrics` for valid metric + values. + + If metric is a callable function, it takes two arrays representing 1D + vectors as inputs and must return one value indicating the distance + between those vectors. This works for Scipy's metrics, but is less + efficient than passing the metric name as a string. + + Distance matrices are not supported. + + p : float, default=2 + Parameter for the Minkowski metric from + sklearn.metrics.pairwise.pairwise_distances. When p = 1, this is + equivalent to using manhattan_distance (l1), and euclidean_distance + (l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used. + This parameter is expected to be positive. + + metric_params : dict, default=None + Additional keyword arguments for the metric function. + + n_jobs : int, default=None + The number of parallel jobs to run for neighbors search. + If ``-1``, then the number of jobs is set to the number of CPU cores. + + Attributes + ---------- + effective_metric_ : str or callable + The distance metric used. It will be same as the `metric` parameter + or a synonym of it, e.g. 'euclidean' if the `metric` parameter set to + 'minkowski' and `p` parameter set to 2. + + effective_metric_params_ : dict + Additional keyword arguments for the metric function. For most metrics + will be same with `metric_params` parameter, but may also contain the + `p` parameter value if the `effective_metric_` attribute is set to + 'minkowski'. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + n_samples_fit_ : int + Number of samples in the fitted data. + + See Also + -------- + kneighbors_graph : Compute the weighted graph of k-neighbors for + points in X. + RadiusNeighborsTransformer : Transform X into a weighted graph of + neighbors nearer than a radius. + + Notes + ----- + For an example of using :class:`~sklearn.neighbors.KNeighborsTransformer` + in combination with :class:`~sklearn.manifold.TSNE` see + :ref:`sphx_glr_auto_examples_neighbors_approximate_nearest_neighbors.py`. + + Examples + -------- + >>> from sklearn.datasets import load_wine + >>> from sklearn.neighbors import KNeighborsTransformer + >>> X, _ = load_wine(return_X_y=True) + >>> X.shape + (178, 13) + >>> transformer = KNeighborsTransformer(n_neighbors=5, mode='distance') + >>> X_dist_graph = transformer.fit_transform(X) + >>> X_dist_graph.shape + (178, 178) + """ + + _parameter_constraints: dict = { + **NeighborsBase._parameter_constraints, + "mode": [StrOptions({"distance", "connectivity"})], + } + _parameter_constraints.pop("radius") + + def __init__( + self, + *, + mode="distance", + n_neighbors=5, + algorithm="auto", + leaf_size=30, + metric="minkowski", + p=2, + metric_params=None, + n_jobs=None, + ): + super(KNeighborsTransformer, self).__init__( + n_neighbors=n_neighbors, + radius=None, + algorithm=algorithm, + leaf_size=leaf_size, + metric=metric, + p=p, + metric_params=metric_params, + n_jobs=n_jobs, + ) + self.mode = mode + + @_fit_context( + # KNeighborsTransformer.metric is not validated yet + prefer_skip_nested_validation=False + ) + def fit(self, X, y=None): + """Fit the k-nearest neighbors transformer from the training dataset. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) or \ + (n_samples, n_samples) if metric='precomputed' + Training data. + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + self : KNeighborsTransformer + The fitted k-nearest neighbors transformer. + """ + self._fit(X) + self._n_features_out = self.n_samples_fit_ + return self + + def transform(self, X): + """Compute the (weighted) graph of Neighbors for points in X. + + Parameters + ---------- + X : array-like of shape (n_samples_transform, n_features) + Sample data. + + Returns + ------- + Xt : sparse matrix of shape (n_samples_transform, n_samples_fit) + Xt[i, j] is assigned the weight of edge that connects i to j. + Only the neighbors have an explicit value. + The diagonal is always explicit. + The matrix is of CSR format. + """ + check_is_fitted(self) + add_one = self.mode == "distance" + return self.kneighbors_graph( + X, mode=self.mode, n_neighbors=self.n_neighbors + add_one + ) + + def fit_transform(self, X, y=None): + """Fit to data, then transform it. + + Fits transformer to X and y with optional parameters fit_params + and returns a transformed version of X. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training set. + + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + Xt : sparse matrix of shape (n_samples, n_samples) + Xt[i, j] is assigned the weight of edge that connects i to j. + Only the neighbors have an explicit value. + The diagonal is always explicit. + The matrix is of CSR format. + """ + return self.fit(X).transform(X) + + def _more_tags(self): + return { + "_xfail_checks": { + "check_methods_sample_order_invariance": "check is not applicable." + } + } + + +class RadiusNeighborsTransformer( + ClassNamePrefixFeaturesOutMixin, + RadiusNeighborsMixin, + TransformerMixin, + NeighborsBase, +): + """Transform X into a (weighted) graph of neighbors nearer than a radius. + + The transformed data is a sparse graph as returned by + `radius_neighbors_graph`. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.22 + + Parameters + ---------- + mode : {'distance', 'connectivity'}, default='distance' + Type of returned matrix: 'connectivity' will return the connectivity + matrix with ones and zeros, and 'distance' will return the distances + between neighbors according to the given metric. + + radius : float, default=1.0 + Radius of neighborhood in the transformed sparse graph. + + algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto' + Algorithm used to compute the nearest neighbors: + + - 'ball_tree' will use :class:`BallTree` + - 'kd_tree' will use :class:`KDTree` + - 'brute' will use a brute-force search. + - 'auto' will attempt to decide the most appropriate algorithm + based on the values passed to :meth:`fit` method. + + Note: fitting on sparse input will override the setting of + this parameter, using brute force. + + leaf_size : int, default=30 + Leaf size passed to BallTree or KDTree. This can affect the + speed of the construction and query, as well as the memory + required to store the tree. The optimal value depends on the + nature of the problem. + + metric : str or callable, default='minkowski' + Metric to use for distance computation. Default is "minkowski", which + results in the standard Euclidean distance when p = 2. See the + documentation of `scipy.spatial.distance + `_ and + the metrics listed in + :class:`~sklearn.metrics.pairwise.distance_metrics` for valid metric + values. + + If metric is a callable function, it takes two arrays representing 1D + vectors as inputs and must return one value indicating the distance + between those vectors. This works for Scipy's metrics, but is less + efficient than passing the metric name as a string. + + Distance matrices are not supported. + + p : float, default=2 + Parameter for the Minkowski metric from + sklearn.metrics.pairwise.pairwise_distances. When p = 1, this is + equivalent to using manhattan_distance (l1), and euclidean_distance + (l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used. + This parameter is expected to be positive. + + metric_params : dict, default=None + Additional keyword arguments for the metric function. + + n_jobs : int, default=None + The number of parallel jobs to run for neighbors search. + If ``-1``, then the number of jobs is set to the number of CPU cores. + + Attributes + ---------- + effective_metric_ : str or callable + The distance metric used. It will be same as the `metric` parameter + or a synonym of it, e.g. 'euclidean' if the `metric` parameter set to + 'minkowski' and `p` parameter set to 2. + + effective_metric_params_ : dict + Additional keyword arguments for the metric function. For most metrics + will be same with `metric_params` parameter, but may also contain the + `p` parameter value if the `effective_metric_` attribute is set to + 'minkowski'. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + n_samples_fit_ : int + Number of samples in the fitted data. + + See Also + -------- + kneighbors_graph : Compute the weighted graph of k-neighbors for + points in X. + KNeighborsTransformer : Transform X into a weighted graph of k + nearest neighbors. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.datasets import load_wine + >>> from sklearn.cluster import DBSCAN + >>> from sklearn.neighbors import RadiusNeighborsTransformer + >>> from sklearn.pipeline import make_pipeline + >>> X, _ = load_wine(return_X_y=True) + >>> estimator = make_pipeline( + ... RadiusNeighborsTransformer(radius=42.0, mode='distance'), + ... DBSCAN(eps=25.0, metric='precomputed')) + >>> X_clustered = estimator.fit_predict(X) + >>> clusters, counts = np.unique(X_clustered, return_counts=True) + >>> print(counts) + [ 29 15 111 11 12] + """ + + _parameter_constraints: dict = { + **NeighborsBase._parameter_constraints, + "mode": [StrOptions({"distance", "connectivity"})], + } + _parameter_constraints.pop("n_neighbors") + + def __init__( + self, + *, + mode="distance", + radius=1.0, + algorithm="auto", + leaf_size=30, + metric="minkowski", + p=2, + metric_params=None, + n_jobs=None, + ): + super(RadiusNeighborsTransformer, self).__init__( + n_neighbors=None, + radius=radius, + algorithm=algorithm, + leaf_size=leaf_size, + metric=metric, + p=p, + metric_params=metric_params, + n_jobs=n_jobs, + ) + self.mode = mode + + @_fit_context( + # RadiusNeighborsTransformer.metric is not validated yet + prefer_skip_nested_validation=False + ) + def fit(self, X, y=None): + """Fit the radius neighbors transformer from the training dataset. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) or \ + (n_samples, n_samples) if metric='precomputed' + Training data. + + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + self : RadiusNeighborsTransformer + The fitted radius neighbors transformer. + """ + self._fit(X) + self._n_features_out = self.n_samples_fit_ + return self + + def transform(self, X): + """Compute the (weighted) graph of Neighbors for points in X. + + Parameters + ---------- + X : array-like of shape (n_samples_transform, n_features) + Sample data. + + Returns + ------- + Xt : sparse matrix of shape (n_samples_transform, n_samples_fit) + Xt[i, j] is assigned the weight of edge that connects i to j. + Only the neighbors have an explicit value. + The diagonal is always explicit. + The matrix is of CSR format. + """ + check_is_fitted(self) + return self.radius_neighbors_graph(X, mode=self.mode, sort_results=True) + + def fit_transform(self, X, y=None): + """Fit to data, then transform it. + + Fits transformer to X and y with optional parameters fit_params + and returns a transformed version of X. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training set. + + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + Xt : sparse matrix of shape (n_samples, n_samples) + Xt[i, j] is assigned the weight of edge that connects i to j. + Only the neighbors have an explicit value. + The diagonal is always explicit. + The matrix is of CSR format. + """ + return self.fit(X).transform(X) + + def _more_tags(self): + return { + "_xfail_checks": { + "check_methods_sample_order_invariance": "check is not applicable." + } + } diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/_kd_tree.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/_kd_tree.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..ac526d5f5f775cd1401e7196302edea1bf61ac6a Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/_kd_tree.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/_kde.py b/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/_kde.py new file mode 100644 index 0000000000000000000000000000000000000000..8885fb4c8c5d0a756d614c8e93e2d27ea242fe82 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/_kde.py @@ -0,0 +1,365 @@ +""" +Kernel Density Estimation +------------------------- +""" +# Author: Jake Vanderplas +import itertools +from numbers import Integral, Real + +import numpy as np +from scipy.special import gammainc + +from ..base import BaseEstimator, _fit_context +from ..neighbors._base import VALID_METRICS +from ..utils import check_random_state +from ..utils._param_validation import Interval, StrOptions +from ..utils.extmath import row_norms +from ..utils.validation import _check_sample_weight, check_is_fitted +from ._ball_tree import BallTree +from ._kd_tree import KDTree + +VALID_KERNELS = [ + "gaussian", + "tophat", + "epanechnikov", + "exponential", + "linear", + "cosine", +] + +TREE_DICT = {"ball_tree": BallTree, "kd_tree": KDTree} + + +# TODO: implement a brute force version for testing purposes +# TODO: create a density estimation base class? +class KernelDensity(BaseEstimator): + """Kernel Density Estimation. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + bandwidth : float or {"scott", "silverman"}, default=1.0 + The bandwidth of the kernel. If bandwidth is a float, it defines the + bandwidth of the kernel. If bandwidth is a string, one of the estimation + methods is implemented. + + algorithm : {'kd_tree', 'ball_tree', 'auto'}, default='auto' + The tree algorithm to use. + + kernel : {'gaussian', 'tophat', 'epanechnikov', 'exponential', 'linear', \ + 'cosine'}, default='gaussian' + The kernel to use. + + metric : str, default='euclidean' + Metric to use for distance computation. See the + documentation of `scipy.spatial.distance + `_ and + the metrics listed in + :class:`~sklearn.metrics.pairwise.distance_metrics` for valid metric + values. + + Not all metrics are valid with all algorithms: refer to the + documentation of :class:`BallTree` and :class:`KDTree`. Note that the + normalization of the density output is correct only for the Euclidean + distance metric. + + atol : float, default=0 + The desired absolute tolerance of the result. A larger tolerance will + generally lead to faster execution. + + rtol : float, default=0 + The desired relative tolerance of the result. A larger tolerance will + generally lead to faster execution. + + breadth_first : bool, default=True + If true (default), use a breadth-first approach to the problem. + Otherwise use a depth-first approach. + + leaf_size : int, default=40 + Specify the leaf size of the underlying tree. See :class:`BallTree` + or :class:`KDTree` for details. + + metric_params : dict, default=None + Additional parameters to be passed to the tree for use with the + metric. For more information, see the documentation of + :class:`BallTree` or :class:`KDTree`. + + Attributes + ---------- + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + tree_ : ``BinaryTree`` instance + The tree algorithm for fast generalized N-point problems. + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + bandwidth_ : float + Value of the bandwidth, given directly by the bandwidth parameter or + estimated using the 'scott' or 'silverman' method. + + .. versionadded:: 1.0 + + See Also + -------- + sklearn.neighbors.KDTree : K-dimensional tree for fast generalized N-point + problems. + sklearn.neighbors.BallTree : Ball tree for fast generalized N-point + problems. + + Examples + -------- + Compute a gaussian kernel density estimate with a fixed bandwidth. + + >>> from sklearn.neighbors import KernelDensity + >>> import numpy as np + >>> rng = np.random.RandomState(42) + >>> X = rng.random_sample((100, 3)) + >>> kde = KernelDensity(kernel='gaussian', bandwidth=0.5).fit(X) + >>> log_density = kde.score_samples(X[:3]) + >>> log_density + array([-1.52955942, -1.51462041, -1.60244657]) + """ + + _parameter_constraints: dict = { + "bandwidth": [ + Interval(Real, 0, None, closed="neither"), + StrOptions({"scott", "silverman"}), + ], + "algorithm": [StrOptions(set(TREE_DICT.keys()) | {"auto"})], + "kernel": [StrOptions(set(VALID_KERNELS))], + "metric": [ + StrOptions( + set(itertools.chain(*[VALID_METRICS[alg] for alg in TREE_DICT.keys()])) + ) + ], + "atol": [Interval(Real, 0, None, closed="left")], + "rtol": [Interval(Real, 0, None, closed="left")], + "breadth_first": ["boolean"], + "leaf_size": [Interval(Integral, 1, None, closed="left")], + "metric_params": [None, dict], + } + + def __init__( + self, + *, + bandwidth=1.0, + algorithm="auto", + kernel="gaussian", + metric="euclidean", + atol=0, + rtol=0, + breadth_first=True, + leaf_size=40, + metric_params=None, + ): + self.algorithm = algorithm + self.bandwidth = bandwidth + self.kernel = kernel + self.metric = metric + self.atol = atol + self.rtol = rtol + self.breadth_first = breadth_first + self.leaf_size = leaf_size + self.metric_params = metric_params + + def _choose_algorithm(self, algorithm, metric): + # given the algorithm string + metric string, choose the optimal + # algorithm to compute the result. + if algorithm == "auto": + # use KD Tree if possible + if metric in KDTree.valid_metrics: + return "kd_tree" + elif metric in BallTree.valid_metrics: + return "ball_tree" + else: # kd_tree or ball_tree + if metric not in TREE_DICT[algorithm].valid_metrics: + raise ValueError( + "invalid metric for {0}: '{1}'".format(TREE_DICT[algorithm], metric) + ) + return algorithm + + @_fit_context( + # KernelDensity.metric is not validated yet + prefer_skip_nested_validation=False + ) + def fit(self, X, y=None, sample_weight=None): + """Fit the Kernel Density model on the data. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + List of n_features-dimensional data points. Each row + corresponds to a single data point. + + y : None + Ignored. This parameter exists only for compatibility with + :class:`~sklearn.pipeline.Pipeline`. + + sample_weight : array-like of shape (n_samples,), default=None + List of sample weights attached to the data X. + + .. versionadded:: 0.20 + + Returns + ------- + self : object + Returns the instance itself. + """ + algorithm = self._choose_algorithm(self.algorithm, self.metric) + + if isinstance(self.bandwidth, str): + if self.bandwidth == "scott": + self.bandwidth_ = X.shape[0] ** (-1 / (X.shape[1] + 4)) + elif self.bandwidth == "silverman": + self.bandwidth_ = (X.shape[0] * (X.shape[1] + 2) / 4) ** ( + -1 / (X.shape[1] + 4) + ) + else: + self.bandwidth_ = self.bandwidth + + X = self._validate_data(X, order="C", dtype=np.float64) + + if sample_weight is not None: + sample_weight = _check_sample_weight( + sample_weight, X, dtype=np.float64, only_non_negative=True + ) + + kwargs = self.metric_params + if kwargs is None: + kwargs = {} + self.tree_ = TREE_DICT[algorithm]( + X, + metric=self.metric, + leaf_size=self.leaf_size, + sample_weight=sample_weight, + **kwargs, + ) + return self + + def score_samples(self, X): + """Compute the log-likelihood of each sample under the model. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + An array of points to query. Last dimension should match dimension + of training data (n_features). + + Returns + ------- + density : ndarray of shape (n_samples,) + Log-likelihood of each sample in `X`. These are normalized to be + probability densities, so values will be low for high-dimensional + data. + """ + check_is_fitted(self) + # The returned density is normalized to the number of points. + # For it to be a probability, we must scale it. For this reason + # we'll also scale atol. + X = self._validate_data(X, order="C", dtype=np.float64, reset=False) + if self.tree_.sample_weight is None: + N = self.tree_.data.shape[0] + else: + N = self.tree_.sum_weight + atol_N = self.atol * N + log_density = self.tree_.kernel_density( + X, + h=self.bandwidth_, + kernel=self.kernel, + atol=atol_N, + rtol=self.rtol, + breadth_first=self.breadth_first, + return_log=True, + ) + log_density -= np.log(N) + return log_density + + def score(self, X, y=None): + """Compute the total log-likelihood under the model. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + List of n_features-dimensional data points. Each row + corresponds to a single data point. + + y : None + Ignored. This parameter exists only for compatibility with + :class:`~sklearn.pipeline.Pipeline`. + + Returns + ------- + logprob : float + Total log-likelihood of the data in X. This is normalized to be a + probability density, so the value will be low for high-dimensional + data. + """ + return np.sum(self.score_samples(X)) + + def sample(self, n_samples=1, random_state=None): + """Generate random samples from the model. + + Currently, this is implemented only for gaussian and tophat kernels. + + Parameters + ---------- + n_samples : int, default=1 + Number of samples to generate. + + random_state : int, RandomState instance or None, default=None + Determines random number generation used to generate + random samples. Pass an int for reproducible results + across multiple function calls. + See :term:`Glossary `. + + Returns + ------- + X : array-like of shape (n_samples, n_features) + List of samples. + """ + check_is_fitted(self) + # TODO: implement sampling for other valid kernel shapes + if self.kernel not in ["gaussian", "tophat"]: + raise NotImplementedError() + + data = np.asarray(self.tree_.data) + + rng = check_random_state(random_state) + u = rng.uniform(0, 1, size=n_samples) + if self.tree_.sample_weight is None: + i = (u * data.shape[0]).astype(np.int64) + else: + cumsum_weight = np.cumsum(np.asarray(self.tree_.sample_weight)) + sum_weight = cumsum_weight[-1] + i = np.searchsorted(cumsum_weight, u * sum_weight) + if self.kernel == "gaussian": + return np.atleast_2d(rng.normal(data[i], self.bandwidth_)) + + elif self.kernel == "tophat": + # we first draw points from a d-dimensional normal distribution, + # then use an incomplete gamma function to map them to a uniform + # d-dimensional tophat distribution. + dim = data.shape[1] + X = rng.normal(size=(n_samples, dim)) + s_sq = row_norms(X, squared=True) + correction = ( + gammainc(0.5 * dim, 0.5 * s_sq) ** (1.0 / dim) + * self.bandwidth_ + / np.sqrt(s_sq) + ) + return data[i] + X * correction[:, np.newaxis] + + def _more_tags(self): + return { + "_xfail_checks": { + "check_sample_weights_invariance": ( + "sample_weight must have positive values" + ), + } + } diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/_lof.py b/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/_lof.py new file mode 100644 index 0000000000000000000000000000000000000000..05dfdb13a1cbe2a6bab62e909b8796e2a91d581e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/_lof.py @@ -0,0 +1,516 @@ +# Authors: Nicolas Goix +# Alexandre Gramfort +# License: BSD 3 clause + +import warnings +from numbers import Real + +import numpy as np + +from ..base import OutlierMixin, _fit_context +from ..utils import check_array +from ..utils._param_validation import Interval, StrOptions +from ..utils.metaestimators import available_if +from ..utils.validation import check_is_fitted +from ._base import KNeighborsMixin, NeighborsBase + +__all__ = ["LocalOutlierFactor"] + + +class LocalOutlierFactor(KNeighborsMixin, OutlierMixin, NeighborsBase): + """Unsupervised Outlier Detection using the Local Outlier Factor (LOF). + + The anomaly score of each sample is called the Local Outlier Factor. + It measures the local deviation of the density of a given sample with respect + to its neighbors. + It is local in that the anomaly score depends on how isolated the object + is with respect to the surrounding neighborhood. + More precisely, locality is given by k-nearest neighbors, whose distance + is used to estimate the local density. + By comparing the local density of a sample to the local densities of its + neighbors, one can identify samples that have a substantially lower density + than their neighbors. These are considered outliers. + + .. versionadded:: 0.19 + + Parameters + ---------- + n_neighbors : int, default=20 + Number of neighbors to use by default for :meth:`kneighbors` queries. + If n_neighbors is larger than the number of samples provided, + all samples will be used. + + algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto' + Algorithm used to compute the nearest neighbors: + + - 'ball_tree' will use :class:`BallTree` + - 'kd_tree' will use :class:`KDTree` + - 'brute' will use a brute-force search. + - 'auto' will attempt to decide the most appropriate algorithm + based on the values passed to :meth:`fit` method. + + Note: fitting on sparse input will override the setting of + this parameter, using brute force. + + leaf_size : int, default=30 + Leaf is size passed to :class:`BallTree` or :class:`KDTree`. This can + affect the speed of the construction and query, as well as the memory + required to store the tree. The optimal value depends on the + nature of the problem. + + metric : str or callable, default='minkowski' + Metric to use for distance computation. Default is "minkowski", which + results in the standard Euclidean distance when p = 2. See the + documentation of `scipy.spatial.distance + `_ and + the metrics listed in + :class:`~sklearn.metrics.pairwise.distance_metrics` for valid metric + values. + + If metric is "precomputed", X is assumed to be a distance matrix and + must be square during fit. X may be a :term:`sparse graph`, in which + case only "nonzero" elements may be considered neighbors. + + If metric is a callable function, it takes two arrays representing 1D + vectors as inputs and must return one value indicating the distance + between those vectors. This works for Scipy's metrics, but is less + efficient than passing the metric name as a string. + + p : float, default=2 + Parameter for the Minkowski metric from + :func:`sklearn.metrics.pairwise_distances`. When p = 1, this + is equivalent to using manhattan_distance (l1), and euclidean_distance + (l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used. + + metric_params : dict, default=None + Additional keyword arguments for the metric function. + + contamination : 'auto' or float, default='auto' + The amount of contamination of the data set, i.e. the proportion + of outliers in the data set. When fitting this is used to define the + threshold on the scores of the samples. + + - if 'auto', the threshold is determined as in the + original paper, + - if a float, the contamination should be in the range (0, 0.5]. + + .. versionchanged:: 0.22 + The default value of ``contamination`` changed from 0.1 + to ``'auto'``. + + novelty : bool, default=False + By default, LocalOutlierFactor is only meant to be used for outlier + detection (novelty=False). Set novelty to True if you want to use + LocalOutlierFactor for novelty detection. In this case be aware that + you should only use predict, decision_function and score_samples + on new unseen data and not on the training set; and note that the + results obtained this way may differ from the standard LOF results. + + .. versionadded:: 0.20 + + n_jobs : int, default=None + The number of parallel jobs to run for neighbors search. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + Attributes + ---------- + negative_outlier_factor_ : ndarray of shape (n_samples,) + The opposite LOF of the training samples. The higher, the more normal. + Inliers tend to have a LOF score close to 1 + (``negative_outlier_factor_`` close to -1), while outliers tend to have + a larger LOF score. + + The local outlier factor (LOF) of a sample captures its + supposed 'degree of abnormality'. + It is the average of the ratio of the local reachability density of + a sample and those of its k-nearest neighbors. + + n_neighbors_ : int + The actual number of neighbors used for :meth:`kneighbors` queries. + + offset_ : float + Offset used to obtain binary labels from the raw scores. + Observations having a negative_outlier_factor smaller than `offset_` + are detected as abnormal. + The offset is set to -1.5 (inliers score around -1), except when a + contamination parameter different than "auto" is provided. In that + case, the offset is defined in such a way we obtain the expected + number of outliers in training. + + .. versionadded:: 0.20 + + effective_metric_ : str + The effective metric used for the distance computation. + + effective_metric_params_ : dict + The effective additional keyword arguments for the metric function. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + n_samples_fit_ : int + It is the number of samples in the fitted data. + + See Also + -------- + sklearn.svm.OneClassSVM: Unsupervised Outlier Detection using + Support Vector Machine. + + References + ---------- + .. [1] Breunig, M. M., Kriegel, H. P., Ng, R. T., & Sander, J. (2000, May). + LOF: identifying density-based local outliers. In ACM sigmod record. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.neighbors import LocalOutlierFactor + >>> X = [[-1.1], [0.2], [101.1], [0.3]] + >>> clf = LocalOutlierFactor(n_neighbors=2) + >>> clf.fit_predict(X) + array([ 1, 1, -1, 1]) + >>> clf.negative_outlier_factor_ + array([ -0.9821..., -1.0370..., -73.3697..., -0.9821...]) + """ + + _parameter_constraints: dict = { + **NeighborsBase._parameter_constraints, + "contamination": [ + StrOptions({"auto"}), + Interval(Real, 0, 0.5, closed="right"), + ], + "novelty": ["boolean"], + } + _parameter_constraints.pop("radius") + + def __init__( + self, + n_neighbors=20, + *, + algorithm="auto", + leaf_size=30, + metric="minkowski", + p=2, + metric_params=None, + contamination="auto", + novelty=False, + n_jobs=None, + ): + super().__init__( + n_neighbors=n_neighbors, + algorithm=algorithm, + leaf_size=leaf_size, + metric=metric, + p=p, + metric_params=metric_params, + n_jobs=n_jobs, + ) + self.contamination = contamination + self.novelty = novelty + + def _check_novelty_fit_predict(self): + if self.novelty: + msg = ( + "fit_predict is not available when novelty=True. Use " + "novelty=False if you want to predict on the training set." + ) + raise AttributeError(msg) + return True + + @available_if(_check_novelty_fit_predict) + def fit_predict(self, X, y=None): + """Fit the model to the training set X and return the labels. + + **Not available for novelty detection (when novelty is set to True).** + Label is 1 for an inlier and -1 for an outlier according to the LOF + score and the contamination parameter. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features), default=None + The query sample or samples to compute the Local Outlier Factor + w.r.t. the training samples. + + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + is_inlier : ndarray of shape (n_samples,) + Returns -1 for anomalies/outliers and 1 for inliers. + """ + + # As fit_predict would be different from fit.predict, fit_predict is + # only available for outlier detection (novelty=False) + + return self.fit(X)._predict() + + @_fit_context( + # LocalOutlierFactor.metric is not validated yet + prefer_skip_nested_validation=False + ) + def fit(self, X, y=None): + """Fit the local outlier factor detector from the training dataset. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) or \ + (n_samples, n_samples) if metric='precomputed' + Training data. + + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + self : LocalOutlierFactor + The fitted local outlier factor detector. + """ + self._fit(X) + + n_samples = self.n_samples_fit_ + if self.n_neighbors > n_samples: + warnings.warn( + "n_neighbors (%s) is greater than the " + "total number of samples (%s). n_neighbors " + "will be set to (n_samples - 1) for estimation." + % (self.n_neighbors, n_samples) + ) + self.n_neighbors_ = max(1, min(self.n_neighbors, n_samples - 1)) + + self._distances_fit_X_, _neighbors_indices_fit_X_ = self.kneighbors( + n_neighbors=self.n_neighbors_ + ) + + if self._fit_X.dtype == np.float32: + self._distances_fit_X_ = self._distances_fit_X_.astype( + self._fit_X.dtype, + copy=False, + ) + + self._lrd = self._local_reachability_density( + self._distances_fit_X_, _neighbors_indices_fit_X_ + ) + + # Compute lof score over training samples to define offset_: + lrd_ratios_array = ( + self._lrd[_neighbors_indices_fit_X_] / self._lrd[:, np.newaxis] + ) + + self.negative_outlier_factor_ = -np.mean(lrd_ratios_array, axis=1) + + if self.contamination == "auto": + # inliers score around -1 (the higher, the less abnormal). + self.offset_ = -1.5 + else: + self.offset_ = np.percentile( + self.negative_outlier_factor_, 100.0 * self.contamination + ) + + return self + + def _check_novelty_predict(self): + if not self.novelty: + msg = ( + "predict is not available when novelty=False, use " + "fit_predict if you want to predict on training data. Use " + "novelty=True if you want to use LOF for novelty detection " + "and predict on new unseen data." + ) + raise AttributeError(msg) + return True + + @available_if(_check_novelty_predict) + def predict(self, X=None): + """Predict the labels (1 inlier, -1 outlier) of X according to LOF. + + **Only available for novelty detection (when novelty is set to True).** + This method allows to generalize prediction to *new observations* (not + in the training set). Note that the result of ``clf.fit(X)`` then + ``clf.predict(X)`` with ``novelty=True`` may differ from the result + obtained by ``clf.fit_predict(X)`` with ``novelty=False``. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The query sample or samples to compute the Local Outlier Factor + w.r.t. the training samples. + + Returns + ------- + is_inlier : ndarray of shape (n_samples,) + Returns -1 for anomalies/outliers and +1 for inliers. + """ + return self._predict(X) + + def _predict(self, X=None): + """Predict the labels (1 inlier, -1 outlier) of X according to LOF. + + If X is None, returns the same as fit_predict(X_train). + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features), default=None + The query sample or samples to compute the Local Outlier Factor + w.r.t. the training samples. If None, makes prediction on the + training data without considering them as their own neighbors. + + Returns + ------- + is_inlier : ndarray of shape (n_samples,) + Returns -1 for anomalies/outliers and +1 for inliers. + """ + check_is_fitted(self) + + if X is not None: + X = check_array(X, accept_sparse="csr") + is_inlier = np.ones(X.shape[0], dtype=int) + is_inlier[self.decision_function(X) < 0] = -1 + else: + is_inlier = np.ones(self.n_samples_fit_, dtype=int) + is_inlier[self.negative_outlier_factor_ < self.offset_] = -1 + + return is_inlier + + def _check_novelty_decision_function(self): + if not self.novelty: + msg = ( + "decision_function is not available when novelty=False. " + "Use novelty=True if you want to use LOF for novelty " + "detection and compute decision_function for new unseen " + "data. Note that the opposite LOF of the training samples " + "is always available by considering the " + "negative_outlier_factor_ attribute." + ) + raise AttributeError(msg) + return True + + @available_if(_check_novelty_decision_function) + def decision_function(self, X): + """Shifted opposite of the Local Outlier Factor of X. + + Bigger is better, i.e. large values correspond to inliers. + + **Only available for novelty detection (when novelty is set to True).** + The shift offset allows a zero threshold for being an outlier. + The argument X is supposed to contain *new data*: if X contains a + point from training, it considers the later in its own neighborhood. + Also, the samples in X are not considered in the neighborhood of any + point. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The query sample or samples to compute the Local Outlier Factor + w.r.t. the training samples. + + Returns + ------- + shifted_opposite_lof_scores : ndarray of shape (n_samples,) + The shifted opposite of the Local Outlier Factor of each input + samples. The lower, the more abnormal. Negative scores represent + outliers, positive scores represent inliers. + """ + return self.score_samples(X) - self.offset_ + + def _check_novelty_score_samples(self): + if not self.novelty: + msg = ( + "score_samples is not available when novelty=False. The " + "scores of the training samples are always available " + "through the negative_outlier_factor_ attribute. Use " + "novelty=True if you want to use LOF for novelty detection " + "and compute score_samples for new unseen data." + ) + raise AttributeError(msg) + return True + + @available_if(_check_novelty_score_samples) + def score_samples(self, X): + """Opposite of the Local Outlier Factor of X. + + It is the opposite as bigger is better, i.e. large values correspond + to inliers. + + **Only available for novelty detection (when novelty is set to True).** + The argument X is supposed to contain *new data*: if X contains a + point from training, it considers the later in its own neighborhood. + Also, the samples in X are not considered in the neighborhood of any + point. Because of this, the scores obtained via ``score_samples`` may + differ from the standard LOF scores. + The standard LOF scores for the training data is available via the + ``negative_outlier_factor_`` attribute. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The query sample or samples to compute the Local Outlier Factor + w.r.t. the training samples. + + Returns + ------- + opposite_lof_scores : ndarray of shape (n_samples,) + The opposite of the Local Outlier Factor of each input samples. + The lower, the more abnormal. + """ + check_is_fitted(self) + X = check_array(X, accept_sparse="csr") + + distances_X, neighbors_indices_X = self.kneighbors( + X, n_neighbors=self.n_neighbors_ + ) + + if X.dtype == np.float32: + distances_X = distances_X.astype(X.dtype, copy=False) + + X_lrd = self._local_reachability_density( + distances_X, + neighbors_indices_X, + ) + + lrd_ratios_array = self._lrd[neighbors_indices_X] / X_lrd[:, np.newaxis] + + # as bigger is better: + return -np.mean(lrd_ratios_array, axis=1) + + def _local_reachability_density(self, distances_X, neighbors_indices): + """The local reachability density (LRD) + + The LRD of a sample is the inverse of the average reachability + distance of its k-nearest neighbors. + + Parameters + ---------- + distances_X : ndarray of shape (n_queries, self.n_neighbors) + Distances to the neighbors (in the training samples `self._fit_X`) + of each query point to compute the LRD. + + neighbors_indices : ndarray of shape (n_queries, self.n_neighbors) + Neighbors indices (of each query point) among training samples + self._fit_X. + + Returns + ------- + local_reachability_density : ndarray of shape (n_queries,) + The local reachability density of each sample. + """ + dist_k = self._distances_fit_X_[neighbors_indices, self.n_neighbors_ - 1] + reach_dist_array = np.maximum(distances_X, dist_k) + + # 1e-10 to avoid `nan' when nb of duplicates > n_neighbors_: + return 1.0 / (np.mean(reach_dist_array, axis=1) + 1e-10) + + def _more_tags(self): + return { + "preserves_dtype": [np.float64, np.float32], + } diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/_nca.py b/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/_nca.py new file mode 100644 index 0000000000000000000000000000000000000000..d302aef0dc0a286a10c9fbbd99a415cf5cf1ccc7 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/_nca.py @@ -0,0 +1,525 @@ +""" +Neighborhood Component Analysis +""" + +# Authors: William de Vazelhes +# John Chiotellis +# License: BSD 3 clause + +import sys +import time +from numbers import Integral, Real +from warnings import warn + +import numpy as np +from scipy.optimize import minimize + +from ..base import ( + BaseEstimator, + ClassNamePrefixFeaturesOutMixin, + TransformerMixin, + _fit_context, +) +from ..decomposition import PCA +from ..exceptions import ConvergenceWarning +from ..metrics import pairwise_distances +from ..preprocessing import LabelEncoder +from ..utils._param_validation import Interval, StrOptions +from ..utils.extmath import softmax +from ..utils.multiclass import check_classification_targets +from ..utils.random import check_random_state +from ..utils.validation import check_array, check_is_fitted + + +class NeighborhoodComponentsAnalysis( + ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator +): + """Neighborhood Components Analysis. + + Neighborhood Component Analysis (NCA) is a machine learning algorithm for + metric learning. It learns a linear transformation in a supervised fashion + to improve the classification accuracy of a stochastic nearest neighbors + rule in the transformed space. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_components : int, default=None + Preferred dimensionality of the projected space. + If None it will be set to `n_features`. + + init : {'auto', 'pca', 'lda', 'identity', 'random'} or ndarray of shape \ + (n_features_a, n_features_b), default='auto' + Initialization of the linear transformation. Possible options are + `'auto'`, `'pca'`, `'lda'`, `'identity'`, `'random'`, and a numpy + array of shape `(n_features_a, n_features_b)`. + + - `'auto'` + Depending on `n_components`, the most reasonable initialization + will be chosen. If `n_components <= n_classes` we use `'lda'`, as + it uses labels information. If not, but + `n_components < min(n_features, n_samples)`, we use `'pca'`, as + it projects data in meaningful directions (those of higher + variance). Otherwise, we just use `'identity'`. + + - `'pca'` + `n_components` principal components of the inputs passed + to :meth:`fit` will be used to initialize the transformation. + (See :class:`~sklearn.decomposition.PCA`) + + - `'lda'` + `min(n_components, n_classes)` most discriminative + components of the inputs passed to :meth:`fit` will be used to + initialize the transformation. (If `n_components > n_classes`, + the rest of the components will be zero.) (See + :class:`~sklearn.discriminant_analysis.LinearDiscriminantAnalysis`) + + - `'identity'` + If `n_components` is strictly smaller than the + dimensionality of the inputs passed to :meth:`fit`, the identity + matrix will be truncated to the first `n_components` rows. + + - `'random'` + The initial transformation will be a random array of shape + `(n_components, n_features)`. Each value is sampled from the + standard normal distribution. + + - numpy array + `n_features_b` must match the dimensionality of the inputs passed + to :meth:`fit` and n_features_a must be less than or equal to that. + If `n_components` is not `None`, `n_features_a` must match it. + + warm_start : bool, default=False + If `True` and :meth:`fit` has been called before, the solution of the + previous call to :meth:`fit` is used as the initial linear + transformation (`n_components` and `init` will be ignored). + + max_iter : int, default=50 + Maximum number of iterations in the optimization. + + tol : float, default=1e-5 + Convergence tolerance for the optimization. + + callback : callable, default=None + If not `None`, this function is called after every iteration of the + optimizer, taking as arguments the current solution (flattened + transformation matrix) and the number of iterations. This might be + useful in case one wants to examine or store the transformation + found after each iteration. + + verbose : int, default=0 + If 0, no progress messages will be printed. + If 1, progress messages will be printed to stdout. + If > 1, progress messages will be printed and the `disp` + parameter of :func:`scipy.optimize.minimize` will be set to + `verbose - 2`. + + random_state : int or numpy.RandomState, default=None + A pseudo random number generator object or a seed for it if int. If + `init='random'`, `random_state` is used to initialize the random + transformation. If `init='pca'`, `random_state` is passed as an + argument to PCA when initializing the transformation. Pass an int + for reproducible results across multiple function calls. + See :term:`Glossary `. + + Attributes + ---------- + components_ : ndarray of shape (n_components, n_features) + The linear transformation learned during fitting. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + n_iter_ : int + Counts the number of iterations performed by the optimizer. + + random_state_ : numpy.RandomState + Pseudo random number generator object used during initialization. + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + sklearn.discriminant_analysis.LinearDiscriminantAnalysis : Linear + Discriminant Analysis. + sklearn.decomposition.PCA : Principal component analysis (PCA). + + References + ---------- + .. [1] J. Goldberger, G. Hinton, S. Roweis, R. Salakhutdinov. + "Neighbourhood Components Analysis". Advances in Neural Information + Processing Systems. 17, 513-520, 2005. + http://www.cs.nyu.edu/~roweis/papers/ncanips.pdf + + .. [2] Wikipedia entry on Neighborhood Components Analysis + https://en.wikipedia.org/wiki/Neighbourhood_components_analysis + + Examples + -------- + >>> from sklearn.neighbors import NeighborhoodComponentsAnalysis + >>> from sklearn.neighbors import KNeighborsClassifier + >>> from sklearn.datasets import load_iris + >>> from sklearn.model_selection import train_test_split + >>> X, y = load_iris(return_X_y=True) + >>> X_train, X_test, y_train, y_test = train_test_split(X, y, + ... stratify=y, test_size=0.7, random_state=42) + >>> nca = NeighborhoodComponentsAnalysis(random_state=42) + >>> nca.fit(X_train, y_train) + NeighborhoodComponentsAnalysis(...) + >>> knn = KNeighborsClassifier(n_neighbors=3) + >>> knn.fit(X_train, y_train) + KNeighborsClassifier(...) + >>> print(knn.score(X_test, y_test)) + 0.933333... + >>> knn.fit(nca.transform(X_train), y_train) + KNeighborsClassifier(...) + >>> print(knn.score(nca.transform(X_test), y_test)) + 0.961904... + """ + + _parameter_constraints: dict = { + "n_components": [ + Interval(Integral, 1, None, closed="left"), + None, + ], + "init": [ + StrOptions({"auto", "pca", "lda", "identity", "random"}), + np.ndarray, + ], + "warm_start": ["boolean"], + "max_iter": [Interval(Integral, 1, None, closed="left")], + "tol": [Interval(Real, 0, None, closed="left")], + "callback": [callable, None], + "verbose": ["verbose"], + "random_state": ["random_state"], + } + + def __init__( + self, + n_components=None, + *, + init="auto", + warm_start=False, + max_iter=50, + tol=1e-5, + callback=None, + verbose=0, + random_state=None, + ): + self.n_components = n_components + self.init = init + self.warm_start = warm_start + self.max_iter = max_iter + self.tol = tol + self.callback = callback + self.verbose = verbose + self.random_state = random_state + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y): + """Fit the model according to the given training data. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The training samples. + + y : array-like of shape (n_samples,) + The corresponding training labels. + + Returns + ------- + self : object + Fitted estimator. + """ + # Validate the inputs X and y, and converts y to numerical classes. + X, y = self._validate_data(X, y, ensure_min_samples=2) + check_classification_targets(y) + y = LabelEncoder().fit_transform(y) + + # Check the preferred dimensionality of the projected space + if self.n_components is not None and self.n_components > X.shape[1]: + raise ValueError( + "The preferred dimensionality of the " + f"projected space `n_components` ({self.n_components}) cannot " + "be greater than the given data " + f"dimensionality ({X.shape[1]})!" + ) + # If warm_start is enabled, check that the inputs are consistent + if ( + self.warm_start + and hasattr(self, "components_") + and self.components_.shape[1] != X.shape[1] + ): + raise ValueError( + f"The new inputs dimensionality ({X.shape[1]}) does not " + "match the input dimensionality of the " + f"previously learned transformation ({self.components_.shape[1]})." + ) + # Check how the linear transformation should be initialized + init = self.init + if isinstance(init, np.ndarray): + init = check_array(init) + # Assert that init.shape[1] = X.shape[1] + if init.shape[1] != X.shape[1]: + raise ValueError( + f"The input dimensionality ({init.shape[1]}) of the given " + "linear transformation `init` must match the " + f"dimensionality of the given inputs `X` ({X.shape[1]})." + ) + # Assert that init.shape[0] <= init.shape[1] + if init.shape[0] > init.shape[1]: + raise ValueError( + f"The output dimensionality ({init.shape[0]}) of the given " + "linear transformation `init` cannot be " + f"greater than its input dimensionality ({init.shape[1]})." + ) + # Assert that self.n_components = init.shape[0] + if self.n_components is not None and self.n_components != init.shape[0]: + raise ValueError( + "The preferred dimensionality of the " + f"projected space `n_components` ({self.n_components}) does" + " not match the output dimensionality of " + "the given linear transformation " + f"`init` ({init.shape[0]})!" + ) + + # Initialize the random generator + self.random_state_ = check_random_state(self.random_state) + + # Measure the total training time + t_train = time.time() + + # Compute a mask that stays fixed during optimization: + same_class_mask = y[:, np.newaxis] == y[np.newaxis, :] + # (n_samples, n_samples) + + # Initialize the transformation + transformation = np.ravel(self._initialize(X, y, init)) + + # Create a dictionary of parameters to be passed to the optimizer + disp = self.verbose - 2 if self.verbose > 1 else -1 + optimizer_params = { + "method": "L-BFGS-B", + "fun": self._loss_grad_lbfgs, + "args": (X, same_class_mask, -1.0), + "jac": True, + "x0": transformation, + "tol": self.tol, + "options": dict(maxiter=self.max_iter, disp=disp), + "callback": self._callback, + } + + # Call the optimizer + self.n_iter_ = 0 + opt_result = minimize(**optimizer_params) + + # Reshape the solution found by the optimizer + self.components_ = opt_result.x.reshape(-1, X.shape[1]) + self._n_features_out = self.components_.shape[1] + + # Stop timer + t_train = time.time() - t_train + if self.verbose: + cls_name = self.__class__.__name__ + + # Warn the user if the algorithm did not converge + if not opt_result.success: + warn( + "[{}] NCA did not converge: {}".format( + cls_name, opt_result.message + ), + ConvergenceWarning, + ) + + print("[{}] Training took {:8.2f}s.".format(cls_name, t_train)) + + return self + + def transform(self, X): + """Apply the learned transformation to the given data. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Data samples. + + Returns + ------- + X_embedded: ndarray of shape (n_samples, n_components) + The data samples transformed. + + Raises + ------ + NotFittedError + If :meth:`fit` has not been called before. + """ + + check_is_fitted(self) + X = self._validate_data(X, reset=False) + + return np.dot(X, self.components_.T) + + def _initialize(self, X, y, init): + """Initialize the transformation. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The training samples. + + y : array-like of shape (n_samples,) + The training labels. + + init : str or ndarray of shape (n_features_a, n_features_b) + The validated initialization of the linear transformation. + + Returns + ------- + transformation : ndarray of shape (n_components, n_features) + The initialized linear transformation. + + """ + + transformation = init + if self.warm_start and hasattr(self, "components_"): + transformation = self.components_ + elif isinstance(init, np.ndarray): + pass + else: + n_samples, n_features = X.shape + n_components = self.n_components or n_features + if init == "auto": + n_classes = len(np.unique(y)) + if n_components <= min(n_features, n_classes - 1): + init = "lda" + elif n_components < min(n_features, n_samples): + init = "pca" + else: + init = "identity" + if init == "identity": + transformation = np.eye(n_components, X.shape[1]) + elif init == "random": + transformation = self.random_state_.standard_normal( + size=(n_components, X.shape[1]) + ) + elif init in {"pca", "lda"}: + init_time = time.time() + if init == "pca": + pca = PCA( + n_components=n_components, random_state=self.random_state_ + ) + if self.verbose: + print("Finding principal components... ", end="") + sys.stdout.flush() + pca.fit(X) + transformation = pca.components_ + elif init == "lda": + from ..discriminant_analysis import LinearDiscriminantAnalysis + + lda = LinearDiscriminantAnalysis(n_components=n_components) + if self.verbose: + print("Finding most discriminative components... ", end="") + sys.stdout.flush() + lda.fit(X, y) + transformation = lda.scalings_.T[:n_components] + if self.verbose: + print("done in {:5.2f}s".format(time.time() - init_time)) + return transformation + + def _callback(self, transformation): + """Called after each iteration of the optimizer. + + Parameters + ---------- + transformation : ndarray of shape (n_components * n_features,) + The solution computed by the optimizer in this iteration. + """ + if self.callback is not None: + self.callback(transformation, self.n_iter_) + + self.n_iter_ += 1 + + def _loss_grad_lbfgs(self, transformation, X, same_class_mask, sign=1.0): + """Compute the loss and the loss gradient w.r.t. `transformation`. + + Parameters + ---------- + transformation : ndarray of shape (n_components * n_features,) + The raveled linear transformation on which to compute loss and + evaluate gradient. + + X : ndarray of shape (n_samples, n_features) + The training samples. + + same_class_mask : ndarray of shape (n_samples, n_samples) + A mask where `mask[i, j] == 1` if `X[i]` and `X[j]` belong + to the same class, and `0` otherwise. + + Returns + ------- + loss : float + The loss computed for the given transformation. + + gradient : ndarray of shape (n_components * n_features,) + The new (flattened) gradient of the loss. + """ + + if self.n_iter_ == 0: + self.n_iter_ += 1 + if self.verbose: + header_fields = ["Iteration", "Objective Value", "Time(s)"] + header_fmt = "{:>10} {:>20} {:>10}" + header = header_fmt.format(*header_fields) + cls_name = self.__class__.__name__ + print("[{}]".format(cls_name)) + print( + "[{}] {}\n[{}] {}".format( + cls_name, header, cls_name, "-" * len(header) + ) + ) + + t_funcall = time.time() + + transformation = transformation.reshape(-1, X.shape[1]) + X_embedded = np.dot(X, transformation.T) # (n_samples, n_components) + + # Compute softmax distances + p_ij = pairwise_distances(X_embedded, squared=True) + np.fill_diagonal(p_ij, np.inf) + p_ij = softmax(-p_ij) # (n_samples, n_samples) + + # Compute loss + masked_p_ij = p_ij * same_class_mask + p = np.sum(masked_p_ij, axis=1, keepdims=True) # (n_samples, 1) + loss = np.sum(p) + + # Compute gradient of loss w.r.t. `transform` + weighted_p_ij = masked_p_ij - p_ij * p + weighted_p_ij_sym = weighted_p_ij + weighted_p_ij.T + np.fill_diagonal(weighted_p_ij_sym, -weighted_p_ij.sum(axis=0)) + gradient = 2 * X_embedded.T.dot(weighted_p_ij_sym).dot(X) + # time complexity of the gradient: O(n_components x n_samples x ( + # n_samples + n_features)) + + if self.verbose: + t_funcall = time.time() - t_funcall + values_fmt = "[{}] {:>10} {:>20.6e} {:>10.2f}" + print( + values_fmt.format( + self.__class__.__name__, self.n_iter_, loss, t_funcall + ) + ) + sys.stdout.flush() + + return sign * loss, sign * gradient.ravel() + + def _more_tags(self): + return {"requires_y": True} diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/_nearest_centroid.py b/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/_nearest_centroid.py new file mode 100644 index 0000000000000000000000000000000000000000..75086ee25448e909b9dfe7dee6c1fe4c88de543b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/_nearest_centroid.py @@ -0,0 +1,261 @@ +""" +Nearest Centroid Classification +""" + +# Author: Robert Layton +# Olivier Grisel +# +# License: BSD 3 clause + +import warnings +from numbers import Real + +import numpy as np +from scipy import sparse as sp + +from sklearn.metrics.pairwise import _VALID_METRICS + +from ..base import BaseEstimator, ClassifierMixin, _fit_context +from ..metrics.pairwise import pairwise_distances_argmin +from ..preprocessing import LabelEncoder +from ..utils._param_validation import Interval, StrOptions +from ..utils.multiclass import check_classification_targets +from ..utils.sparsefuncs import csc_median_axis_0 +from ..utils.validation import check_is_fitted + + +class NearestCentroid(ClassifierMixin, BaseEstimator): + """Nearest centroid classifier. + + Each class is represented by its centroid, with test samples classified to + the class with the nearest centroid. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + metric : str or callable, default="euclidean" + Metric to use for distance computation. See the documentation of + `scipy.spatial.distance + `_ and + the metrics listed in + :class:`~sklearn.metrics.pairwise.distance_metrics` for valid metric + values. Note that "wminkowski", "seuclidean" and "mahalanobis" are not + supported. + + The centroids for the samples corresponding to each class is + the point from which the sum of the distances (according to the metric) + of all samples that belong to that particular class are minimized. + If the `"manhattan"` metric is provided, this centroid is the median + and for all other metrics, the centroid is now set to be the mean. + + .. deprecated:: 1.3 + Support for metrics other than `euclidean` and `manhattan` and for + callables was deprecated in version 1.3 and will be removed in + version 1.5. + + .. versionchanged:: 0.19 + `metric='precomputed'` was deprecated and now raises an error + + shrink_threshold : float, default=None + Threshold for shrinking centroids to remove features. + + Attributes + ---------- + centroids_ : array-like of shape (n_classes, n_features) + Centroid of each class. + + classes_ : array of shape (n_classes,) + The unique classes labels. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + KNeighborsClassifier : Nearest neighbors classifier. + + Notes + ----- + When used for text classification with tf-idf vectors, this classifier is + also known as the Rocchio classifier. + + References + ---------- + Tibshirani, R., Hastie, T., Narasimhan, B., & Chu, G. (2002). Diagnosis of + multiple cancer types by shrunken centroids of gene expression. Proceedings + of the National Academy of Sciences of the United States of America, + 99(10), 6567-6572. The National Academy of Sciences. + + Examples + -------- + >>> from sklearn.neighbors import NearestCentroid + >>> import numpy as np + >>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]]) + >>> y = np.array([1, 1, 1, 2, 2, 2]) + >>> clf = NearestCentroid() + >>> clf.fit(X, y) + NearestCentroid() + >>> print(clf.predict([[-0.8, -1]])) + [1] + """ + + _valid_metrics = set(_VALID_METRICS) - {"mahalanobis", "seuclidean", "wminkowski"} + + _parameter_constraints: dict = { + "metric": [ + StrOptions( + _valid_metrics, deprecated=_valid_metrics - {"manhattan", "euclidean"} + ), + callable, + ], + "shrink_threshold": [Interval(Real, 0, None, closed="neither"), None], + } + + def __init__(self, metric="euclidean", *, shrink_threshold=None): + self.metric = metric + self.shrink_threshold = shrink_threshold + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y): + """ + Fit the NearestCentroid model according to the given training data. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training vector, where `n_samples` is the number of samples and + `n_features` is the number of features. + Note that centroid shrinking cannot be used with sparse matrices. + y : array-like of shape (n_samples,) + Target values. + + Returns + ------- + self : object + Fitted estimator. + """ + if isinstance(self.metric, str) and self.metric not in ( + "manhattan", + "euclidean", + ): + warnings.warn( + ( + "Support for distance metrics other than euclidean and " + "manhattan and for callables was deprecated in version " + "1.3 and will be removed in version 1.5." + ), + FutureWarning, + ) + + # If X is sparse and the metric is "manhattan", store it in a csc + # format is easier to calculate the median. + if self.metric == "manhattan": + X, y = self._validate_data(X, y, accept_sparse=["csc"]) + else: + X, y = self._validate_data(X, y, accept_sparse=["csr", "csc"]) + is_X_sparse = sp.issparse(X) + if is_X_sparse and self.shrink_threshold: + raise ValueError("threshold shrinking not supported for sparse input") + check_classification_targets(y) + + n_samples, n_features = X.shape + le = LabelEncoder() + y_ind = le.fit_transform(y) + self.classes_ = classes = le.classes_ + n_classes = classes.size + if n_classes < 2: + raise ValueError( + "The number of classes has to be greater than one; got %d class" + % (n_classes) + ) + + # Mask mapping each class to its members. + self.centroids_ = np.empty((n_classes, n_features), dtype=np.float64) + # Number of clusters in each class. + nk = np.zeros(n_classes) + + for cur_class in range(n_classes): + center_mask = y_ind == cur_class + nk[cur_class] = np.sum(center_mask) + if is_X_sparse: + center_mask = np.where(center_mask)[0] + + if self.metric == "manhattan": + # NumPy does not calculate median of sparse matrices. + if not is_X_sparse: + self.centroids_[cur_class] = np.median(X[center_mask], axis=0) + else: + self.centroids_[cur_class] = csc_median_axis_0(X[center_mask]) + else: + # TODO(1.5) remove warning when metric is only manhattan or euclidean + if self.metric != "euclidean": + warnings.warn( + "Averaging for metrics other than " + "euclidean and manhattan not supported. " + "The average is set to be the mean." + ) + self.centroids_[cur_class] = X[center_mask].mean(axis=0) + + if self.shrink_threshold: + if np.all(np.ptp(X, axis=0) == 0): + raise ValueError("All features have zero variance. Division by zero.") + dataset_centroid_ = np.mean(X, axis=0) + + # m parameter for determining deviation + m = np.sqrt((1.0 / nk) - (1.0 / n_samples)) + # Calculate deviation using the standard deviation of centroids. + variance = (X - self.centroids_[y_ind]) ** 2 + variance = variance.sum(axis=0) + s = np.sqrt(variance / (n_samples - n_classes)) + s += np.median(s) # To deter outliers from affecting the results. + mm = m.reshape(len(m), 1) # Reshape to allow broadcasting. + ms = mm * s + deviation = (self.centroids_ - dataset_centroid_) / ms + # Soft thresholding: if the deviation crosses 0 during shrinking, + # it becomes zero. + signs = np.sign(deviation) + deviation = np.abs(deviation) - self.shrink_threshold + np.clip(deviation, 0, None, out=deviation) + deviation *= signs + # Now adjust the centroids using the deviation + msd = ms * deviation + self.centroids_ = dataset_centroid_[np.newaxis, :] + msd + return self + + # TODO(1.5) remove note about precomputed metric + def predict(self, X): + """Perform classification on an array of test vectors `X`. + + The predicted class `C` for each sample in `X` is returned. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Test samples. + + Returns + ------- + C : ndarray of shape (n_samples,) + The predicted classes. + + Notes + ----- + If the metric constructor parameter is `"precomputed"`, `X` is assumed + to be the distance matrix between the data to be predicted and + `self.centroids_`. + """ + check_is_fitted(self) + + X = self._validate_data(X, accept_sparse="csr", reset=False) + return self.classes_[ + pairwise_distances_argmin(X, self.centroids_, metric=self.metric) + ] diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/_partition_nodes.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/_partition_nodes.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..f165f2e20997b5f5d416826f23d5941178aaca03 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/_partition_nodes.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/_partition_nodes.pxd b/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/_partition_nodes.pxd new file mode 100644 index 0000000000000000000000000000000000000000..bd2160cc3b26f4eaf0821735aeb278fd3a16eb15 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/_partition_nodes.pxd @@ -0,0 +1,10 @@ +from cython cimport floating +from ..utils._typedefs cimport float64_t, intp_t + +cdef int partition_node_indices( + const floating *data, + intp_t *node_indices, + intp_t split_dim, + intp_t split_index, + intp_t n_features, + intp_t n_points) except -1 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/_quad_tree.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/_quad_tree.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..a607ac71046e32c08d9d7933c9fa5df681118153 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/_quad_tree.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/_quad_tree.pxd b/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/_quad_tree.pxd new file mode 100644 index 0000000000000000000000000000000000000000..9ed033e747314ef4b2f7599c99da85be6dbce73e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/_quad_tree.pxd @@ -0,0 +1,92 @@ +# Author: Thomas Moreau +# Author: Olivier Grisel + +# See quad_tree.pyx for details. + +cimport numpy as cnp +from ..utils._typedefs cimport float32_t, intp_t + +# This is effectively an ifdef statement in Cython +# It allows us to write printf debugging lines +# and remove them at compile time +cdef enum: + DEBUGFLAG = 0 + +cdef float EPSILON = 1e-6 + +# XXX: Careful to not change the order of the arguments. It is important to +# have is_leaf and max_width consecutive as it permits to avoid padding by +# the compiler and keep the size coherent for both C and numpy data structures. +cdef struct Cell: + # Base storage structure for cells in a QuadTree object + + # Tree structure + intp_t parent # Parent cell of this cell + intp_t[8] children # Array pointing to children of this cell + + # Cell description + intp_t cell_id # Id of the cell in the cells array in the Tree + intp_t point_index # Index of the point at this cell (only defined + # # in non empty leaf) + bint is_leaf # Does this cell have children? + float32_t squared_max_width # Squared value of the maximum width w + intp_t depth # Depth of the cell in the tree + intp_t cumulative_size # Number of points included in the subtree with + # # this cell as a root. + + # Internal constants + float32_t[3] center # Store the center for quick split of cells + float32_t[3] barycenter # Keep track of the center of mass of the cell + + # Cell boundaries + float32_t[3] min_bounds # Inferior boundaries of this cell (inclusive) + float32_t[3] max_bounds # Superior boundaries of this cell (exclusive) + + +cdef class _QuadTree: + # The QuadTree object is a quad tree structure constructed by inserting + # recursively points in the tree and splitting cells in 4 so that each + # leaf cell contains at most one point. + # This structure also handle 3D data, inserted in trees with 8 children + # for each node. + + # Parameters of the tree + cdef public int n_dimensions # Number of dimensions in X + cdef public int verbose # Verbosity of the output + cdef intp_t n_cells_per_cell # Number of children per node. (2 ** n_dimension) + + # Tree inner structure + cdef public intp_t max_depth # Max depth of the tree + cdef public intp_t cell_count # Counter for node IDs + cdef public intp_t capacity # Capacity of tree, in terms of nodes + cdef public intp_t n_points # Total number of points + cdef Cell* cells # Array of nodes + + # Point insertion methods + cdef int insert_point(self, float32_t[3] point, intp_t point_index, + intp_t cell_id=*) except -1 nogil + cdef intp_t _insert_point_in_new_child(self, float32_t[3] point, Cell* cell, + intp_t point_index, intp_t size=* + ) noexcept nogil + cdef intp_t _select_child(self, float32_t[3] point, Cell* cell) noexcept nogil + cdef bint _is_duplicate(self, float32_t[3] point1, float32_t[3] point2) noexcept nogil + + # Create a summary of the Tree compare to a query point + cdef long summarize(self, float32_t[3] point, float32_t* results, + float squared_theta=*, intp_t cell_id=*, long idx=* + ) noexcept nogil + + # Internal cell initialization methods + cdef void _init_cell(self, Cell* cell, intp_t parent, intp_t depth) noexcept nogil + cdef void _init_root(self, float32_t[3] min_bounds, float32_t[3] max_bounds + ) noexcept nogil + + # Private methods + cdef int _check_point_in_cell(self, float32_t[3] point, Cell* cell + ) except -1 nogil + + # Private array manipulation to manage the ``cells`` array + cdef int _resize(self, intp_t capacity) except -1 nogil + cdef int _resize_c(self, intp_t capacity=*) except -1 nogil + cdef int _get_cell(self, float32_t[3] point, intp_t cell_id=*) except -1 nogil + cdef Cell[:] _get_cell_ndarray(self) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/_regression.py b/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/_regression.py new file mode 100644 index 0000000000000000000000000000000000000000..2897c1ce409e8bb0733ff98df4dce11de5d3a256 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/_regression.py @@ -0,0 +1,510 @@ +"""Nearest Neighbor Regression.""" + +# Authors: Jake Vanderplas +# Fabian Pedregosa +# Alexandre Gramfort +# Sparseness support by Lars Buitinck +# Multi-output support by Arnaud Joly +# Empty radius support by Andreas Bjerre-Nielsen +# +# License: BSD 3 clause (C) INRIA, University of Amsterdam, +# University of Copenhagen + +import warnings + +import numpy as np + +from ..base import RegressorMixin, _fit_context +from ..metrics import DistanceMetric +from ..utils._param_validation import StrOptions +from ._base import KNeighborsMixin, NeighborsBase, RadiusNeighborsMixin, _get_weights + + +class KNeighborsRegressor(KNeighborsMixin, RegressorMixin, NeighborsBase): + """Regression based on k-nearest neighbors. + + The target is predicted by local interpolation of the targets + associated of the nearest neighbors in the training set. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.9 + + Parameters + ---------- + n_neighbors : int, default=5 + Number of neighbors to use by default for :meth:`kneighbors` queries. + + weights : {'uniform', 'distance'}, callable or None, default='uniform' + Weight function used in prediction. Possible values: + + - 'uniform' : uniform weights. All points in each neighborhood + are weighted equally. + - 'distance' : weight points by the inverse of their distance. + in this case, closer neighbors of a query point will have a + greater influence than neighbors which are further away. + - [callable] : a user-defined function which accepts an + array of distances, and returns an array of the same shape + containing the weights. + + Uniform weights are used by default. + + algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto' + Algorithm used to compute the nearest neighbors: + + - 'ball_tree' will use :class:`BallTree` + - 'kd_tree' will use :class:`KDTree` + - 'brute' will use a brute-force search. + - 'auto' will attempt to decide the most appropriate algorithm + based on the values passed to :meth:`fit` method. + + Note: fitting on sparse input will override the setting of + this parameter, using brute force. + + leaf_size : int, default=30 + Leaf size passed to BallTree or KDTree. This can affect the + speed of the construction and query, as well as the memory + required to store the tree. The optimal value depends on the + nature of the problem. + + p : float, default=2 + Power parameter for the Minkowski metric. When p = 1, this is + equivalent to using manhattan_distance (l1), and euclidean_distance + (l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used. + + metric : str, DistanceMetric object or callable, default='minkowski' + Metric to use for distance computation. Default is "minkowski", which + results in the standard Euclidean distance when p = 2. See the + documentation of `scipy.spatial.distance + `_ and + the metrics listed in + :class:`~sklearn.metrics.pairwise.distance_metrics` for valid metric + values. + + If metric is "precomputed", X is assumed to be a distance matrix and + must be square during fit. X may be a :term:`sparse graph`, in which + case only "nonzero" elements may be considered neighbors. + + If metric is a callable function, it takes two arrays representing 1D + vectors as inputs and must return one value indicating the distance + between those vectors. This works for Scipy's metrics, but is less + efficient than passing the metric name as a string. + + If metric is a DistanceMetric object, it will be passed directly to + the underlying computation routines. + + metric_params : dict, default=None + Additional keyword arguments for the metric function. + + n_jobs : int, default=None + The number of parallel jobs to run for neighbors search. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + Doesn't affect :meth:`fit` method. + + Attributes + ---------- + effective_metric_ : str or callable + The distance metric to use. It will be same as the `metric` parameter + or a synonym of it, e.g. 'euclidean' if the `metric` parameter set to + 'minkowski' and `p` parameter set to 2. + + effective_metric_params_ : dict + Additional keyword arguments for the metric function. For most metrics + will be same with `metric_params` parameter, but may also contain the + `p` parameter value if the `effective_metric_` attribute is set to + 'minkowski'. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + n_samples_fit_ : int + Number of samples in the fitted data. + + See Also + -------- + NearestNeighbors : Unsupervised learner for implementing neighbor searches. + RadiusNeighborsRegressor : Regression based on neighbors within a fixed radius. + KNeighborsClassifier : Classifier implementing the k-nearest neighbors vote. + RadiusNeighborsClassifier : Classifier implementing + a vote among neighbors within a given radius. + + Notes + ----- + See :ref:`Nearest Neighbors ` in the online documentation + for a discussion of the choice of ``algorithm`` and ``leaf_size``. + + .. warning:: + + Regarding the Nearest Neighbors algorithms, if it is found that two + neighbors, neighbor `k+1` and `k`, have identical distances but + different labels, the results will depend on the ordering of the + training data. + + https://en.wikipedia.org/wiki/K-nearest_neighbors_algorithm + + Examples + -------- + >>> X = [[0], [1], [2], [3]] + >>> y = [0, 0, 1, 1] + >>> from sklearn.neighbors import KNeighborsRegressor + >>> neigh = KNeighborsRegressor(n_neighbors=2) + >>> neigh.fit(X, y) + KNeighborsRegressor(...) + >>> print(neigh.predict([[1.5]])) + [0.5] + """ + + _parameter_constraints: dict = { + **NeighborsBase._parameter_constraints, + "weights": [StrOptions({"uniform", "distance"}), callable, None], + } + _parameter_constraints["metric"].append(DistanceMetric) + _parameter_constraints.pop("radius") + + def __init__( + self, + n_neighbors=5, + *, + weights="uniform", + algorithm="auto", + leaf_size=30, + p=2, + metric="minkowski", + metric_params=None, + n_jobs=None, + ): + super().__init__( + n_neighbors=n_neighbors, + algorithm=algorithm, + leaf_size=leaf_size, + metric=metric, + p=p, + metric_params=metric_params, + n_jobs=n_jobs, + ) + self.weights = weights + + def _more_tags(self): + # For cross-validation routines to split data correctly + return {"pairwise": self.metric == "precomputed"} + + @_fit_context( + # KNeighborsRegressor.metric is not validated yet + prefer_skip_nested_validation=False + ) + def fit(self, X, y): + """Fit the k-nearest neighbors regressor from the training dataset. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) or \ + (n_samples, n_samples) if metric='precomputed' + Training data. + + y : {array-like, sparse matrix} of shape (n_samples,) or \ + (n_samples, n_outputs) + Target values. + + Returns + ------- + self : KNeighborsRegressor + The fitted k-nearest neighbors regressor. + """ + return self._fit(X, y) + + def predict(self, X): + """Predict the target for the provided data. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_queries, n_features), \ + or (n_queries, n_indexed) if metric == 'precomputed' + Test samples. + + Returns + ------- + y : ndarray of shape (n_queries,) or (n_queries, n_outputs), dtype=int + Target values. + """ + if self.weights == "uniform": + # In that case, we do not need the distances to perform + # the weighting so we do not compute them. + neigh_ind = self.kneighbors(X, return_distance=False) + neigh_dist = None + else: + neigh_dist, neigh_ind = self.kneighbors(X) + + weights = _get_weights(neigh_dist, self.weights) + + _y = self._y + if _y.ndim == 1: + _y = _y.reshape((-1, 1)) + + if weights is None: + y_pred = np.mean(_y[neigh_ind], axis=1) + else: + y_pred = np.empty((neigh_dist.shape[0], _y.shape[1]), dtype=np.float64) + denom = np.sum(weights, axis=1) + + for j in range(_y.shape[1]): + num = np.sum(_y[neigh_ind, j] * weights, axis=1) + y_pred[:, j] = num / denom + + if self._y.ndim == 1: + y_pred = y_pred.ravel() + + return y_pred + + +class RadiusNeighborsRegressor(RadiusNeighborsMixin, RegressorMixin, NeighborsBase): + """Regression based on neighbors within a fixed radius. + + The target is predicted by local interpolation of the targets + associated of the nearest neighbors in the training set. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.9 + + Parameters + ---------- + radius : float, default=1.0 + Range of parameter space to use by default for :meth:`radius_neighbors` + queries. + + weights : {'uniform', 'distance'}, callable or None, default='uniform' + Weight function used in prediction. Possible values: + + - 'uniform' : uniform weights. All points in each neighborhood + are weighted equally. + - 'distance' : weight points by the inverse of their distance. + in this case, closer neighbors of a query point will have a + greater influence than neighbors which are further away. + - [callable] : a user-defined function which accepts an + array of distances, and returns an array of the same shape + containing the weights. + + Uniform weights are used by default. + + algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto' + Algorithm used to compute the nearest neighbors: + + - 'ball_tree' will use :class:`BallTree` + - 'kd_tree' will use :class:`KDTree` + - 'brute' will use a brute-force search. + - 'auto' will attempt to decide the most appropriate algorithm + based on the values passed to :meth:`fit` method. + + Note: fitting on sparse input will override the setting of + this parameter, using brute force. + + leaf_size : int, default=30 + Leaf size passed to BallTree or KDTree. This can affect the + speed of the construction and query, as well as the memory + required to store the tree. The optimal value depends on the + nature of the problem. + + p : float, default=2 + Power parameter for the Minkowski metric. When p = 1, this is + equivalent to using manhattan_distance (l1), and euclidean_distance + (l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used. + + metric : str or callable, default='minkowski' + Metric to use for distance computation. Default is "minkowski", which + results in the standard Euclidean distance when p = 2. See the + documentation of `scipy.spatial.distance + `_ and + the metrics listed in + :class:`~sklearn.metrics.pairwise.distance_metrics` for valid metric + values. + + If metric is "precomputed", X is assumed to be a distance matrix and + must be square during fit. X may be a :term:`sparse graph`, in which + case only "nonzero" elements may be considered neighbors. + + If metric is a callable function, it takes two arrays representing 1D + vectors as inputs and must return one value indicating the distance + between those vectors. This works for Scipy's metrics, but is less + efficient than passing the metric name as a string. + + metric_params : dict, default=None + Additional keyword arguments for the metric function. + + n_jobs : int, default=None + The number of parallel jobs to run for neighbors search. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + Attributes + ---------- + effective_metric_ : str or callable + The distance metric to use. It will be same as the `metric` parameter + or a synonym of it, e.g. 'euclidean' if the `metric` parameter set to + 'minkowski' and `p` parameter set to 2. + + effective_metric_params_ : dict + Additional keyword arguments for the metric function. For most metrics + will be same with `metric_params` parameter, but may also contain the + `p` parameter value if the `effective_metric_` attribute is set to + 'minkowski'. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + n_samples_fit_ : int + Number of samples in the fitted data. + + See Also + -------- + NearestNeighbors : Unsupervised learner for implementing neighbor searches. + KNeighborsRegressor : Regression based on k-nearest neighbors. + KNeighborsClassifier : Classifier based on the k-nearest neighbors. + RadiusNeighborsClassifier : Classifier based on neighbors within a given radius. + + Notes + ----- + See :ref:`Nearest Neighbors ` in the online documentation + for a discussion of the choice of ``algorithm`` and ``leaf_size``. + + https://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm + + Examples + -------- + >>> X = [[0], [1], [2], [3]] + >>> y = [0, 0, 1, 1] + >>> from sklearn.neighbors import RadiusNeighborsRegressor + >>> neigh = RadiusNeighborsRegressor(radius=1.0) + >>> neigh.fit(X, y) + RadiusNeighborsRegressor(...) + >>> print(neigh.predict([[1.5]])) + [0.5] + """ + + _parameter_constraints: dict = { + **NeighborsBase._parameter_constraints, + "weights": [StrOptions({"uniform", "distance"}), callable, None], + } + _parameter_constraints.pop("n_neighbors") + + def __init__( + self, + radius=1.0, + *, + weights="uniform", + algorithm="auto", + leaf_size=30, + p=2, + metric="minkowski", + metric_params=None, + n_jobs=None, + ): + super().__init__( + radius=radius, + algorithm=algorithm, + leaf_size=leaf_size, + p=p, + metric=metric, + metric_params=metric_params, + n_jobs=n_jobs, + ) + self.weights = weights + + @_fit_context( + # RadiusNeighborsRegressor.metric is not validated yet + prefer_skip_nested_validation=False + ) + def fit(self, X, y): + """Fit the radius neighbors regressor from the training dataset. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) or \ + (n_samples, n_samples) if metric='precomputed' + Training data. + + y : {array-like, sparse matrix} of shape (n_samples,) or \ + (n_samples, n_outputs) + Target values. + + Returns + ------- + self : RadiusNeighborsRegressor + The fitted radius neighbors regressor. + """ + return self._fit(X, y) + + def predict(self, X): + """Predict the target for the provided data. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_queries, n_features), \ + or (n_queries, n_indexed) if metric == 'precomputed' + Test samples. + + Returns + ------- + y : ndarray of shape (n_queries,) or (n_queries, n_outputs), \ + dtype=double + Target values. + """ + neigh_dist, neigh_ind = self.radius_neighbors(X) + + weights = _get_weights(neigh_dist, self.weights) + + _y = self._y + if _y.ndim == 1: + _y = _y.reshape((-1, 1)) + + empty_obs = np.full_like(_y[0], np.nan) + + if weights is None: + y_pred = np.array( + [ + np.mean(_y[ind, :], axis=0) if len(ind) else empty_obs + for (i, ind) in enumerate(neigh_ind) + ] + ) + + else: + y_pred = np.array( + [ + ( + np.average(_y[ind, :], axis=0, weights=weights[i]) + if len(ind) + else empty_obs + ) + for (i, ind) in enumerate(neigh_ind) + ] + ) + + if np.any(np.isnan(y_pred)): + empty_warning_msg = ( + "One or more samples have no neighbors " + "within specified radius; predicting NaN." + ) + warnings.warn(empty_warning_msg) + + if self._y.ndim == 1: + y_pred = y_pred.ravel() + + return y_pred diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/_unsupervised.py b/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/_unsupervised.py new file mode 100644 index 0000000000000000000000000000000000000000..a4ff66786340acc7379c135f8c51c27e41142744 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/_unsupervised.py @@ -0,0 +1,175 @@ +"""Unsupervised nearest neighbors learner""" +from ..base import _fit_context +from ._base import KNeighborsMixin, NeighborsBase, RadiusNeighborsMixin + + +class NearestNeighbors(KNeighborsMixin, RadiusNeighborsMixin, NeighborsBase): + """Unsupervised learner for implementing neighbor searches. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.9 + + Parameters + ---------- + n_neighbors : int, default=5 + Number of neighbors to use by default for :meth:`kneighbors` queries. + + radius : float, default=1.0 + Range of parameter space to use by default for :meth:`radius_neighbors` + queries. + + algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto' + Algorithm used to compute the nearest neighbors: + + - 'ball_tree' will use :class:`BallTree` + - 'kd_tree' will use :class:`KDTree` + - 'brute' will use a brute-force search. + - 'auto' will attempt to decide the most appropriate algorithm + based on the values passed to :meth:`fit` method. + + Note: fitting on sparse input will override the setting of + this parameter, using brute force. + + leaf_size : int, default=30 + Leaf size passed to BallTree or KDTree. This can affect the + speed of the construction and query, as well as the memory + required to store the tree. The optimal value depends on the + nature of the problem. + + metric : str or callable, default='minkowski' + Metric to use for distance computation. Default is "minkowski", which + results in the standard Euclidean distance when p = 2. See the + documentation of `scipy.spatial.distance + `_ and + the metrics listed in + :class:`~sklearn.metrics.pairwise.distance_metrics` for valid metric + values. + + If metric is "precomputed", X is assumed to be a distance matrix and + must be square during fit. X may be a :term:`sparse graph`, in which + case only "nonzero" elements may be considered neighbors. + + If metric is a callable function, it takes two arrays representing 1D + vectors as inputs and must return one value indicating the distance + between those vectors. This works for Scipy's metrics, but is less + efficient than passing the metric name as a string. + + p : float (positive), default=2 + Parameter for the Minkowski metric from + sklearn.metrics.pairwise.pairwise_distances. When p = 1, this is + equivalent to using manhattan_distance (l1), and euclidean_distance + (l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used. + + metric_params : dict, default=None + Additional keyword arguments for the metric function. + + n_jobs : int, default=None + The number of parallel jobs to run for neighbors search. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + Attributes + ---------- + effective_metric_ : str + Metric used to compute distances to neighbors. + + effective_metric_params_ : dict + Parameters for the metric used to compute distances to neighbors. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + n_samples_fit_ : int + Number of samples in the fitted data. + + See Also + -------- + KNeighborsClassifier : Classifier implementing the k-nearest neighbors + vote. + RadiusNeighborsClassifier : Classifier implementing a vote among neighbors + within a given radius. + KNeighborsRegressor : Regression based on k-nearest neighbors. + RadiusNeighborsRegressor : Regression based on neighbors within a fixed + radius. + BallTree : Space partitioning data structure for organizing points in a + multi-dimensional space, used for nearest neighbor search. + + Notes + ----- + See :ref:`Nearest Neighbors ` in the online documentation + for a discussion of the choice of ``algorithm`` and ``leaf_size``. + + https://en.wikipedia.org/wiki/K-nearest_neighbors_algorithm + + Examples + -------- + >>> import numpy as np + >>> from sklearn.neighbors import NearestNeighbors + >>> samples = [[0, 0, 2], [1, 0, 0], [0, 0, 1]] + >>> neigh = NearestNeighbors(n_neighbors=2, radius=0.4) + >>> neigh.fit(samples) + NearestNeighbors(...) + >>> neigh.kneighbors([[0, 0, 1.3]], 2, return_distance=False) + array([[2, 0]]...) + >>> nbrs = neigh.radius_neighbors( + ... [[0, 0, 1.3]], 0.4, return_distance=False + ... ) + >>> np.asarray(nbrs[0][0]) + array(2) + """ + + def __init__( + self, + *, + n_neighbors=5, + radius=1.0, + algorithm="auto", + leaf_size=30, + metric="minkowski", + p=2, + metric_params=None, + n_jobs=None, + ): + super().__init__( + n_neighbors=n_neighbors, + radius=radius, + algorithm=algorithm, + leaf_size=leaf_size, + metric=metric, + p=p, + metric_params=metric_params, + n_jobs=n_jobs, + ) + + @_fit_context( + # NearestNeighbors.metric is not validated yet + prefer_skip_nested_validation=False + ) + def fit(self, X, y=None): + """Fit the nearest neighbors estimator from the training dataset. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) or \ + (n_samples, n_samples) if metric='precomputed' + Training data. + + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + self : NearestNeighbors + The fitted nearest neighbors estimator. + """ + return self._fit(X) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/tests/__init__.py b/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/tests/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..819d2dd019343ed7d40f9dfa417271d8a6e6884a Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/tests/__pycache__/test_ball_tree.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/tests/__pycache__/test_ball_tree.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..33fa6d4b07781381f716fe6fed52b210c33766d8 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/tests/__pycache__/test_ball_tree.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/tests/__pycache__/test_kd_tree.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/tests/__pycache__/test_kd_tree.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d635007fe05426a3cdfcdbed815e5bb08a956595 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/tests/__pycache__/test_kd_tree.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/tests/__pycache__/test_lof.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/tests/__pycache__/test_lof.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a075101378952f78e7874e76e68b36bef2cb6d77 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/tests/__pycache__/test_lof.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/tests/__pycache__/test_nca.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/tests/__pycache__/test_nca.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4613a9b01b7835ffe60e7376aebf54da7460c2d8 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/tests/__pycache__/test_nca.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/tests/__pycache__/test_nearest_centroid.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/tests/__pycache__/test_nearest_centroid.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1328124ada5c7144e1b7aca737e6403cab63acaf Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/tests/__pycache__/test_nearest_centroid.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/tests/__pycache__/test_neighbors.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/tests/__pycache__/test_neighbors.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..20523925ac411570aa2d2c6b1c0da6b2aa86c290 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/tests/__pycache__/test_neighbors.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/tests/__pycache__/test_neighbors_pipeline.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/tests/__pycache__/test_neighbors_pipeline.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3766e406e008a7ef970326e745f31d2816c8453c Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/tests/__pycache__/test_neighbors_pipeline.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/tests/__pycache__/test_neighbors_tree.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/tests/__pycache__/test_neighbors_tree.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5d71d809036f03e2a52e74a7073833f004794b48 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/tests/__pycache__/test_neighbors_tree.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/tests/__pycache__/test_quad_tree.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/tests/__pycache__/test_quad_tree.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..55cd9590fd03ac4ba13734ba22a70c03145ec7e2 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/tests/__pycache__/test_quad_tree.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/tests/test_ball_tree.py b/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/tests/test_ball_tree.py new file mode 100644 index 0000000000000000000000000000000000000000..5263f201f320b17ced98fb223e7aaaf624d9271d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/tests/test_ball_tree.py @@ -0,0 +1,200 @@ +import itertools + +import numpy as np +import pytest +from numpy.testing import assert_allclose, assert_array_almost_equal, assert_equal + +from sklearn.neighbors._ball_tree import BallTree, BallTree32, BallTree64 +from sklearn.utils import check_random_state +from sklearn.utils._testing import _convert_container +from sklearn.utils.validation import check_array + +rng = np.random.RandomState(10) +V_mahalanobis = rng.rand(3, 3) +V_mahalanobis = np.dot(V_mahalanobis, V_mahalanobis.T) + +DIMENSION = 3 + +METRICS = { + "euclidean": {}, + "manhattan": {}, + "minkowski": dict(p=3), + "chebyshev": {}, +} + +DISCRETE_METRICS = ["hamming", "canberra", "braycurtis"] + +BOOLEAN_METRICS = [ + "jaccard", + "dice", + "rogerstanimoto", + "russellrao", + "sokalmichener", + "sokalsneath", +] + +BALL_TREE_CLASSES = [ + BallTree64, + BallTree32, +] + + +def brute_force_neighbors(X, Y, k, metric, **kwargs): + from sklearn.metrics import DistanceMetric + + X, Y = check_array(X), check_array(Y) + D = DistanceMetric.get_metric(metric, **kwargs).pairwise(Y, X) + ind = np.argsort(D, axis=1)[:, :k] + dist = D[np.arange(Y.shape[0])[:, None], ind] + return dist, ind + + +def test_BallTree_is_BallTree64_subclass(): + assert issubclass(BallTree, BallTree64) + + +@pytest.mark.parametrize("metric", itertools.chain(BOOLEAN_METRICS, DISCRETE_METRICS)) +@pytest.mark.parametrize("array_type", ["list", "array"]) +@pytest.mark.parametrize("BallTreeImplementation", BALL_TREE_CLASSES) +def test_ball_tree_query_metrics(metric, array_type, BallTreeImplementation): + rng = check_random_state(0) + if metric in BOOLEAN_METRICS: + X = rng.random_sample((40, 10)).round(0) + Y = rng.random_sample((10, 10)).round(0) + elif metric in DISCRETE_METRICS: + X = (4 * rng.random_sample((40, 10))).round(0) + Y = (4 * rng.random_sample((10, 10))).round(0) + X = _convert_container(X, array_type) + Y = _convert_container(Y, array_type) + + k = 5 + + bt = BallTreeImplementation(X, leaf_size=1, metric=metric) + dist1, ind1 = bt.query(Y, k) + dist2, ind2 = brute_force_neighbors(X, Y, k, metric) + assert_array_almost_equal(dist1, dist2) + + +@pytest.mark.parametrize( + "BallTreeImplementation, decimal_tol", zip(BALL_TREE_CLASSES, [6, 5]) +) +def test_query_haversine(BallTreeImplementation, decimal_tol): + rng = check_random_state(0) + X = 2 * np.pi * rng.random_sample((40, 2)) + bt = BallTreeImplementation(X, leaf_size=1, metric="haversine") + dist1, ind1 = bt.query(X, k=5) + dist2, ind2 = brute_force_neighbors(X, X, k=5, metric="haversine") + + assert_array_almost_equal(dist1, dist2, decimal=decimal_tol) + assert_array_almost_equal(ind1, ind2) + + +@pytest.mark.parametrize("BallTreeImplementation", BALL_TREE_CLASSES) +def test_array_object_type(BallTreeImplementation): + """Check that we do not accept object dtype array.""" + X = np.array([(1, 2, 3), (2, 5), (5, 5, 1, 2)], dtype=object) + with pytest.raises(ValueError, match="setting an array element with a sequence"): + BallTreeImplementation(X) + + +@pytest.mark.parametrize("BallTreeImplementation", BALL_TREE_CLASSES) +def test_bad_pyfunc_metric(BallTreeImplementation): + def wrong_returned_value(x, y): + return "1" + + def one_arg_func(x): + return 1.0 # pragma: no cover + + X = np.ones((5, 2)) + msg = "Custom distance function must accept two vectors and return a float." + with pytest.raises(TypeError, match=msg): + BallTreeImplementation(X, metric=wrong_returned_value) + + msg = "takes 1 positional argument but 2 were given" + with pytest.raises(TypeError, match=msg): + BallTreeImplementation(X, metric=one_arg_func) + + +@pytest.mark.parametrize("metric", itertools.chain(METRICS, BOOLEAN_METRICS)) +def test_ball_tree_numerical_consistency(global_random_seed, metric): + # Results on float64 and float32 versions of a dataset must be + # numerically close. + X_64, X_32, Y_64, Y_32 = get_dataset_for_binary_tree( + random_seed=global_random_seed, features=50 + ) + + metric_params = METRICS.get(metric, {}) + bt_64 = BallTree64(X_64, leaf_size=1, metric=metric, **metric_params) + bt_32 = BallTree32(X_32, leaf_size=1, metric=metric, **metric_params) + + # Test consistency with respect to the `query` method + k = 5 + dist_64, ind_64 = bt_64.query(Y_64, k=k) + dist_32, ind_32 = bt_32.query(Y_32, k=k) + assert_allclose(dist_64, dist_32, rtol=1e-5) + assert_equal(ind_64, ind_32) + assert dist_64.dtype == np.float64 + assert dist_32.dtype == np.float32 + + # Test consistency with respect to the `query_radius` method + r = 2.38 + ind_64 = bt_64.query_radius(Y_64, r=r) + ind_32 = bt_32.query_radius(Y_32, r=r) + for _ind64, _ind32 in zip(ind_64, ind_32): + assert_equal(_ind64, _ind32) + + # Test consistency with respect to the `query_radius` method + # with return distances being true + ind_64, dist_64 = bt_64.query_radius(Y_64, r=r, return_distance=True) + ind_32, dist_32 = bt_32.query_radius(Y_32, r=r, return_distance=True) + for _ind64, _ind32, _dist_64, _dist_32 in zip(ind_64, ind_32, dist_64, dist_32): + assert_equal(_ind64, _ind32) + assert_allclose(_dist_64, _dist_32, rtol=1e-5) + assert _dist_64.dtype == np.float64 + assert _dist_32.dtype == np.float32 + + +@pytest.mark.parametrize("metric", itertools.chain(METRICS, BOOLEAN_METRICS)) +def test_kernel_density_numerical_consistency(global_random_seed, metric): + # Test consistency with respect to the `kernel_density` method + X_64, X_32, Y_64, Y_32 = get_dataset_for_binary_tree(random_seed=global_random_seed) + + metric_params = METRICS.get(metric, {}) + bt_64 = BallTree64(X_64, leaf_size=1, metric=metric, **metric_params) + bt_32 = BallTree32(X_32, leaf_size=1, metric=metric, **metric_params) + + kernel = "gaussian" + h = 0.1 + density64 = bt_64.kernel_density(Y_64, h=h, kernel=kernel, breadth_first=True) + density32 = bt_32.kernel_density(Y_32, h=h, kernel=kernel, breadth_first=True) + assert_allclose(density64, density32, rtol=1e-5) + assert density64.dtype == np.float64 + assert density32.dtype == np.float32 + + +def test_two_point_correlation_numerical_consistency(global_random_seed): + # Test consistency with respect to the `two_point_correlation` method + X_64, X_32, Y_64, Y_32 = get_dataset_for_binary_tree(random_seed=global_random_seed) + + bt_64 = BallTree64(X_64, leaf_size=10) + bt_32 = BallTree32(X_32, leaf_size=10) + + r = np.linspace(0, 1, 10) + + counts_64 = bt_64.two_point_correlation(Y_64, r=r, dualtree=True) + counts_32 = bt_32.two_point_correlation(Y_32, r=r, dualtree=True) + assert_allclose(counts_64, counts_32) + + +def get_dataset_for_binary_tree(random_seed, features=3): + rng = np.random.RandomState(random_seed) + _X = rng.rand(100, features) + _Y = rng.rand(5, features) + + X_64 = _X.astype(dtype=np.float64, copy=False) + Y_64 = _Y.astype(dtype=np.float64, copy=False) + + X_32 = _X.astype(dtype=np.float32, copy=False) + Y_32 = _Y.astype(dtype=np.float32, copy=False) + + return X_64, X_32, Y_64, Y_32 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/tests/test_graph.py b/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/tests/test_graph.py new file mode 100644 index 0000000000000000000000000000000000000000..fb593485d17a8155f784ef881b3868338348e1a8 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/tests/test_graph.py @@ -0,0 +1,101 @@ +import numpy as np +import pytest + +from sklearn.metrics import euclidean_distances +from sklearn.neighbors import KNeighborsTransformer, RadiusNeighborsTransformer +from sklearn.neighbors._base import _is_sorted_by_data +from sklearn.utils._testing import assert_array_equal + + +def test_transformer_result(): + # Test the number of neighbors returned + n_neighbors = 5 + n_samples_fit = 20 + n_queries = 18 + n_features = 10 + + rng = np.random.RandomState(42) + X = rng.randn(n_samples_fit, n_features) + X2 = rng.randn(n_queries, n_features) + radius = np.percentile(euclidean_distances(X), 10) + + # with n_neighbors + for mode in ["distance", "connectivity"]: + add_one = mode == "distance" + nnt = KNeighborsTransformer(n_neighbors=n_neighbors, mode=mode) + Xt = nnt.fit_transform(X) + assert Xt.shape == (n_samples_fit, n_samples_fit) + assert Xt.data.shape == (n_samples_fit * (n_neighbors + add_one),) + assert Xt.format == "csr" + assert _is_sorted_by_data(Xt) + + X2t = nnt.transform(X2) + assert X2t.shape == (n_queries, n_samples_fit) + assert X2t.data.shape == (n_queries * (n_neighbors + add_one),) + assert X2t.format == "csr" + assert _is_sorted_by_data(X2t) + + # with radius + for mode in ["distance", "connectivity"]: + add_one = mode == "distance" + nnt = RadiusNeighborsTransformer(radius=radius, mode=mode) + Xt = nnt.fit_transform(X) + assert Xt.shape == (n_samples_fit, n_samples_fit) + assert not Xt.data.shape == (n_samples_fit * (n_neighbors + add_one),) + assert Xt.format == "csr" + assert _is_sorted_by_data(Xt) + + X2t = nnt.transform(X2) + assert X2t.shape == (n_queries, n_samples_fit) + assert not X2t.data.shape == (n_queries * (n_neighbors + add_one),) + assert X2t.format == "csr" + assert _is_sorted_by_data(X2t) + + +def _has_explicit_diagonal(X): + """Return True if the diagonal is explicitly stored""" + X = X.tocoo() + explicit = X.row[X.row == X.col] + return len(explicit) == X.shape[0] + + +def test_explicit_diagonal(): + # Test that the diagonal is explicitly stored in the sparse graph + n_neighbors = 5 + n_samples_fit, n_samples_transform, n_features = 20, 18, 10 + rng = np.random.RandomState(42) + X = rng.randn(n_samples_fit, n_features) + X2 = rng.randn(n_samples_transform, n_features) + + nnt = KNeighborsTransformer(n_neighbors=n_neighbors) + Xt = nnt.fit_transform(X) + assert _has_explicit_diagonal(Xt) + assert np.all(Xt.data.reshape(n_samples_fit, n_neighbors + 1)[:, 0] == 0) + + Xt = nnt.transform(X) + assert _has_explicit_diagonal(Xt) + assert np.all(Xt.data.reshape(n_samples_fit, n_neighbors + 1)[:, 0] == 0) + + # Using transform on new data should not always have zero diagonal + X2t = nnt.transform(X2) + assert not _has_explicit_diagonal(X2t) + + +@pytest.mark.parametrize("Klass", [KNeighborsTransformer, RadiusNeighborsTransformer]) +def test_graph_feature_names_out(Klass): + """Check `get_feature_names_out` for transformers defined in `_graph.py`.""" + + n_samples_fit = 20 + n_features = 10 + rng = np.random.RandomState(42) + X = rng.randn(n_samples_fit, n_features) + + est = Klass().fit(X) + names_out = est.get_feature_names_out() + + class_name_lower = Klass.__name__.lower() + expected_names_out = np.array( + [f"{class_name_lower}{i}" for i in range(est.n_samples_fit_)], + dtype=object, + ) + assert_array_equal(names_out, expected_names_out) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/tests/test_kd_tree.py b/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/tests/test_kd_tree.py new file mode 100644 index 0000000000000000000000000000000000000000..749601baaf66fdbf96e8396ca1df45c5bdab4a1e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/tests/test_kd_tree.py @@ -0,0 +1,100 @@ +import numpy as np +import pytest +from numpy.testing import assert_allclose, assert_equal + +from sklearn.neighbors._kd_tree import KDTree, KDTree32, KDTree64 +from sklearn.neighbors.tests.test_ball_tree import get_dataset_for_binary_tree +from sklearn.utils.parallel import Parallel, delayed + +DIMENSION = 3 + +METRICS = {"euclidean": {}, "manhattan": {}, "chebyshev": {}, "minkowski": dict(p=3)} + +KD_TREE_CLASSES = [ + KDTree64, + KDTree32, +] + + +def test_KDTree_is_KDTree64_subclass(): + assert issubclass(KDTree, KDTree64) + + +@pytest.mark.parametrize("BinarySearchTree", KD_TREE_CLASSES) +def test_array_object_type(BinarySearchTree): + """Check that we do not accept object dtype array.""" + X = np.array([(1, 2, 3), (2, 5), (5, 5, 1, 2)], dtype=object) + with pytest.raises(ValueError, match="setting an array element with a sequence"): + BinarySearchTree(X) + + +@pytest.mark.parametrize("BinarySearchTree", KD_TREE_CLASSES) +def test_kdtree_picklable_with_joblib(BinarySearchTree): + """Make sure that KDTree queries work when joblib memmaps. + + Non-regression test for #21685 and #21228.""" + rng = np.random.RandomState(0) + X = rng.random_sample((10, 3)) + tree = BinarySearchTree(X, leaf_size=2) + + # Call Parallel with max_nbytes=1 to trigger readonly memory mapping that + # use to raise "ValueError: buffer source array is read-only" in a previous + # version of the Cython code. + Parallel(n_jobs=2, max_nbytes=1)(delayed(tree.query)(data) for data in 2 * [X]) + + +@pytest.mark.parametrize("metric", METRICS) +def test_kd_tree_numerical_consistency(global_random_seed, metric): + # Results on float64 and float32 versions of a dataset must be + # numerically close. + X_64, X_32, Y_64, Y_32 = get_dataset_for_binary_tree( + random_seed=global_random_seed, features=50 + ) + + metric_params = METRICS.get(metric, {}) + kd_64 = KDTree64(X_64, leaf_size=2, metric=metric, **metric_params) + kd_32 = KDTree32(X_32, leaf_size=2, metric=metric, **metric_params) + + # Test consistency with respect to the `query` method + k = 4 + dist_64, ind_64 = kd_64.query(Y_64, k=k) + dist_32, ind_32 = kd_32.query(Y_32, k=k) + assert_allclose(dist_64, dist_32, rtol=1e-5) + assert_equal(ind_64, ind_32) + assert dist_64.dtype == np.float64 + assert dist_32.dtype == np.float32 + + # Test consistency with respect to the `query_radius` method + r = 2.38 + ind_64 = kd_64.query_radius(Y_64, r=r) + ind_32 = kd_32.query_radius(Y_32, r=r) + for _ind64, _ind32 in zip(ind_64, ind_32): + assert_equal(_ind64, _ind32) + + # Test consistency with respect to the `query_radius` method + # with return distances being true + ind_64, dist_64 = kd_64.query_radius(Y_64, r=r, return_distance=True) + ind_32, dist_32 = kd_32.query_radius(Y_32, r=r, return_distance=True) + for _ind64, _ind32, _dist_64, _dist_32 in zip(ind_64, ind_32, dist_64, dist_32): + assert_equal(_ind64, _ind32) + assert_allclose(_dist_64, _dist_32, rtol=1e-5) + assert _dist_64.dtype == np.float64 + assert _dist_32.dtype == np.float32 + + +@pytest.mark.parametrize("metric", METRICS) +def test_kernel_density_numerical_consistency(global_random_seed, metric): + # Test consistency with respect to the `kernel_density` method + X_64, X_32, Y_64, Y_32 = get_dataset_for_binary_tree(random_seed=global_random_seed) + + metric_params = METRICS.get(metric, {}) + kd_64 = KDTree64(X_64, leaf_size=2, metric=metric, **metric_params) + kd_32 = KDTree32(X_32, leaf_size=2, metric=metric, **metric_params) + + kernel = "gaussian" + h = 0.1 + density64 = kd_64.kernel_density(Y_64, h=h, kernel=kernel, breadth_first=True) + density32 = kd_32.kernel_density(Y_32, h=h, kernel=kernel, breadth_first=True) + assert_allclose(density64, density32, rtol=1e-5) + assert density64.dtype == np.float64 + assert density32.dtype == np.float32 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/tests/test_kde.py b/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/tests/test_kde.py new file mode 100644 index 0000000000000000000000000000000000000000..b6bf09d01b672b7ad5a3abf3506443b0ac620915 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/tests/test_kde.py @@ -0,0 +1,252 @@ +import joblib +import numpy as np +import pytest + +from sklearn.datasets import make_blobs +from sklearn.exceptions import NotFittedError +from sklearn.model_selection import GridSearchCV +from sklearn.neighbors import KDTree, KernelDensity, NearestNeighbors +from sklearn.neighbors._ball_tree import kernel_norm +from sklearn.pipeline import make_pipeline +from sklearn.preprocessing import StandardScaler +from sklearn.utils._testing import assert_allclose + + +# XXX Duplicated in test_neighbors_tree, test_kde +def compute_kernel_slow(Y, X, kernel, h): + if h == "scott": + h = X.shape[0] ** (-1 / (X.shape[1] + 4)) + elif h == "silverman": + h = (X.shape[0] * (X.shape[1] + 2) / 4) ** (-1 / (X.shape[1] + 4)) + + d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1)) + norm = kernel_norm(h, X.shape[1], kernel) / X.shape[0] + + if kernel == "gaussian": + return norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1) + elif kernel == "tophat": + return norm * (d < h).sum(-1) + elif kernel == "epanechnikov": + return norm * ((1.0 - (d * d) / (h * h)) * (d < h)).sum(-1) + elif kernel == "exponential": + return norm * (np.exp(-d / h)).sum(-1) + elif kernel == "linear": + return norm * ((1 - d / h) * (d < h)).sum(-1) + elif kernel == "cosine": + return norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1) + else: + raise ValueError("kernel not recognized") + + +def check_results(kernel, bandwidth, atol, rtol, X, Y, dens_true): + kde = KernelDensity(kernel=kernel, bandwidth=bandwidth, atol=atol, rtol=rtol) + log_dens = kde.fit(X).score_samples(Y) + assert_allclose(np.exp(log_dens), dens_true, atol=atol, rtol=max(1e-7, rtol)) + assert_allclose( + np.exp(kde.score(Y)), np.prod(dens_true), atol=atol, rtol=max(1e-7, rtol) + ) + + +@pytest.mark.parametrize( + "kernel", ["gaussian", "tophat", "epanechnikov", "exponential", "linear", "cosine"] +) +@pytest.mark.parametrize("bandwidth", [0.01, 0.1, 1, "scott", "silverman"]) +def test_kernel_density(kernel, bandwidth): + n_samples, n_features = (100, 3) + + rng = np.random.RandomState(0) + X = rng.randn(n_samples, n_features) + Y = rng.randn(n_samples, n_features) + + dens_true = compute_kernel_slow(Y, X, kernel, bandwidth) + + for rtol in [0, 1e-5]: + for atol in [1e-6, 1e-2]: + for breadth_first in (True, False): + check_results(kernel, bandwidth, atol, rtol, X, Y, dens_true) + + +def test_kernel_density_sampling(n_samples=100, n_features=3): + rng = np.random.RandomState(0) + X = rng.randn(n_samples, n_features) + + bandwidth = 0.2 + + for kernel in ["gaussian", "tophat"]: + # draw a tophat sample + kde = KernelDensity(bandwidth=bandwidth, kernel=kernel).fit(X) + samp = kde.sample(100) + assert X.shape == samp.shape + + # check that samples are in the right range + nbrs = NearestNeighbors(n_neighbors=1).fit(X) + dist, ind = nbrs.kneighbors(X, return_distance=True) + + if kernel == "tophat": + assert np.all(dist < bandwidth) + elif kernel == "gaussian": + # 5 standard deviations is safe for 100 samples, but there's a + # very small chance this test could fail. + assert np.all(dist < 5 * bandwidth) + + # check unsupported kernels + for kernel in ["epanechnikov", "exponential", "linear", "cosine"]: + kde = KernelDensity(bandwidth=bandwidth, kernel=kernel).fit(X) + with pytest.raises(NotImplementedError): + kde.sample(100) + + # non-regression test: used to return a scalar + X = rng.randn(4, 1) + kde = KernelDensity(kernel="gaussian").fit(X) + assert kde.sample().shape == (1, 1) + + +@pytest.mark.parametrize("algorithm", ["auto", "ball_tree", "kd_tree"]) +@pytest.mark.parametrize( + "metric", ["euclidean", "minkowski", "manhattan", "chebyshev", "haversine"] +) +def test_kde_algorithm_metric_choice(algorithm, metric): + # Smoke test for various metrics and algorithms + rng = np.random.RandomState(0) + X = rng.randn(10, 2) # 2 features required for haversine dist. + Y = rng.randn(10, 2) + + kde = KernelDensity(algorithm=algorithm, metric=metric) + + if algorithm == "kd_tree" and metric not in KDTree.valid_metrics: + with pytest.raises(ValueError, match="invalid metric"): + kde.fit(X) + else: + kde.fit(X) + y_dens = kde.score_samples(Y) + assert y_dens.shape == Y.shape[:1] + + +def test_kde_score(n_samples=100, n_features=3): + pass + # FIXME + # rng = np.random.RandomState(0) + # X = rng.random_sample((n_samples, n_features)) + # Y = rng.random_sample((n_samples, n_features)) + + +def test_kde_sample_weights_error(): + kde = KernelDensity() + with pytest.raises(ValueError): + kde.fit(np.random.random((200, 10)), sample_weight=np.random.random((200, 10))) + with pytest.raises(ValueError): + kde.fit(np.random.random((200, 10)), sample_weight=-np.random.random(200)) + + +def test_kde_pipeline_gridsearch(): + # test that kde plays nice in pipelines and grid-searches + X, _ = make_blobs(cluster_std=0.1, random_state=1, centers=[[0, 1], [1, 0], [0, 0]]) + pipe1 = make_pipeline( + StandardScaler(with_mean=False, with_std=False), + KernelDensity(kernel="gaussian"), + ) + params = dict(kerneldensity__bandwidth=[0.001, 0.01, 0.1, 1, 10]) + search = GridSearchCV(pipe1, param_grid=params) + search.fit(X) + assert search.best_params_["kerneldensity__bandwidth"] == 0.1 + + +def test_kde_sample_weights(): + n_samples = 400 + size_test = 20 + weights_neutral = np.full(n_samples, 3.0) + for d in [1, 2, 10]: + rng = np.random.RandomState(0) + X = rng.rand(n_samples, d) + weights = 1 + (10 * X.sum(axis=1)).astype(np.int8) + X_repetitions = np.repeat(X, weights, axis=0) + n_samples_test = size_test // d + test_points = rng.rand(n_samples_test, d) + for algorithm in ["auto", "ball_tree", "kd_tree"]: + for metric in ["euclidean", "minkowski", "manhattan", "chebyshev"]: + if algorithm != "kd_tree" or metric in KDTree.valid_metrics: + kde = KernelDensity(algorithm=algorithm, metric=metric) + + # Test that adding a constant sample weight has no effect + kde.fit(X, sample_weight=weights_neutral) + scores_const_weight = kde.score_samples(test_points) + sample_const_weight = kde.sample(random_state=1234) + kde.fit(X) + scores_no_weight = kde.score_samples(test_points) + sample_no_weight = kde.sample(random_state=1234) + assert_allclose(scores_const_weight, scores_no_weight) + assert_allclose(sample_const_weight, sample_no_weight) + + # Test equivalence between sampling and (integer) weights + kde.fit(X, sample_weight=weights) + scores_weight = kde.score_samples(test_points) + sample_weight = kde.sample(random_state=1234) + kde.fit(X_repetitions) + scores_ref_sampling = kde.score_samples(test_points) + sample_ref_sampling = kde.sample(random_state=1234) + assert_allclose(scores_weight, scores_ref_sampling) + assert_allclose(sample_weight, sample_ref_sampling) + + # Test that sample weights has a non-trivial effect + diff = np.max(np.abs(scores_no_weight - scores_weight)) + assert diff > 0.001 + + # Test invariance with respect to arbitrary scaling + scale_factor = rng.rand() + kde.fit(X, sample_weight=(scale_factor * weights)) + scores_scaled_weight = kde.score_samples(test_points) + assert_allclose(scores_scaled_weight, scores_weight) + + +@pytest.mark.parametrize("sample_weight", [None, [0.1, 0.2, 0.3]]) +def test_pickling(tmpdir, sample_weight): + # Make sure that predictions are the same before and after pickling. Used + # to be a bug because sample_weights wasn't pickled and the resulting tree + # would miss some info. + + kde = KernelDensity() + data = np.reshape([1.0, 2.0, 3.0], (-1, 1)) + kde.fit(data, sample_weight=sample_weight) + + X = np.reshape([1.1, 2.1], (-1, 1)) + scores = kde.score_samples(X) + + file_path = str(tmpdir.join("dump.pkl")) + joblib.dump(kde, file_path) + kde = joblib.load(file_path) + scores_pickled = kde.score_samples(X) + + assert_allclose(scores, scores_pickled) + + +@pytest.mark.parametrize("method", ["score_samples", "sample"]) +def test_check_is_fitted(method): + # Check that predict raises an exception in an unfitted estimator. + # Unfitted estimators should raise a NotFittedError. + rng = np.random.RandomState(0) + X = rng.randn(10, 2) + kde = KernelDensity() + + with pytest.raises(NotFittedError): + getattr(kde, method)(X) + + +@pytest.mark.parametrize("bandwidth", ["scott", "silverman", 0.1]) +def test_bandwidth(bandwidth): + n_samples, n_features = (100, 3) + rng = np.random.RandomState(0) + X = rng.randn(n_samples, n_features) + kde = KernelDensity(bandwidth=bandwidth).fit(X) + samp = kde.sample(100) + kde_sc = kde.score_samples(X) + assert X.shape == samp.shape + assert kde_sc.shape == (n_samples,) + + # Test that the attribute self.bandwidth_ has the expected value + if bandwidth == "scott": + h = X.shape[0] ** (-1 / (X.shape[1] + 4)) + elif bandwidth == "silverman": + h = (X.shape[0] * (X.shape[1] + 2) / 4) ** (-1 / (X.shape[1] + 4)) + else: + h = bandwidth + assert kde.bandwidth_ == pytest.approx(h) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/tests/test_lof.py b/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/tests/test_lof.py new file mode 100644 index 0000000000000000000000000000000000000000..3f5c1e161b7e88012b3d3334e0dd621797416248 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/tests/test_lof.py @@ -0,0 +1,361 @@ +# Authors: Nicolas Goix +# Alexandre Gramfort +# License: BSD 3 clause + +import re +from math import sqrt + +import numpy as np +import pytest + +from sklearn import metrics, neighbors +from sklearn.datasets import load_iris +from sklearn.metrics import roc_auc_score +from sklearn.utils import check_random_state +from sklearn.utils._testing import assert_allclose, assert_array_equal +from sklearn.utils.estimator_checks import ( + check_outlier_corruption, + parametrize_with_checks, +) +from sklearn.utils.fixes import CSR_CONTAINERS + +# load the iris dataset +# and randomly permute it +rng = check_random_state(0) +iris = load_iris() +perm = rng.permutation(iris.target.size) +iris.data = iris.data[perm] +iris.target = iris.target[perm] + + +def test_lof(global_dtype): + # Toy sample (the last two samples are outliers): + X = np.asarray( + [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1], [5, 3], [-4, 2]], + dtype=global_dtype, + ) + + # Test LocalOutlierFactor: + clf = neighbors.LocalOutlierFactor(n_neighbors=5) + score = clf.fit(X).negative_outlier_factor_ + assert_array_equal(clf._fit_X, X) + + # Assert largest outlier score is smaller than smallest inlier score: + assert np.min(score[:-2]) > np.max(score[-2:]) + + # Assert predict() works: + clf = neighbors.LocalOutlierFactor(contamination=0.25, n_neighbors=5).fit(X) + expected_predictions = 6 * [1] + 2 * [-1] + assert_array_equal(clf._predict(), expected_predictions) + assert_array_equal(clf.fit_predict(X), expected_predictions) + + +def test_lof_performance(global_dtype): + # Generate train/test data + rng = check_random_state(2) + X = 0.3 * rng.randn(120, 2).astype(global_dtype, copy=False) + X_train = X[:100] + + # Generate some abnormal novel observations + X_outliers = rng.uniform(low=-4, high=4, size=(20, 2)).astype( + global_dtype, copy=False + ) + X_test = np.r_[X[100:], X_outliers] + y_test = np.array([0] * 20 + [1] * 20) + + # fit the model for novelty detection + clf = neighbors.LocalOutlierFactor(novelty=True).fit(X_train) + + # predict scores (the lower, the more normal) + y_pred = -clf.decision_function(X_test) + + # check that roc_auc is good + assert roc_auc_score(y_test, y_pred) > 0.99 + + +def test_lof_values(global_dtype): + # toy samples: + X_train = np.asarray([[1, 1], [1, 2], [2, 1]], dtype=global_dtype) + clf1 = neighbors.LocalOutlierFactor( + n_neighbors=2, contamination=0.1, novelty=True + ).fit(X_train) + clf2 = neighbors.LocalOutlierFactor(n_neighbors=2, novelty=True).fit(X_train) + s_0 = 2.0 * sqrt(2.0) / (1.0 + sqrt(2.0)) + s_1 = (1.0 + sqrt(2)) * (1.0 / (4.0 * sqrt(2.0)) + 1.0 / (2.0 + 2.0 * sqrt(2))) + # check predict() + assert_allclose(-clf1.negative_outlier_factor_, [s_0, s_1, s_1]) + assert_allclose(-clf2.negative_outlier_factor_, [s_0, s_1, s_1]) + # check predict(one sample not in train) + assert_allclose(-clf1.score_samples([[2.0, 2.0]]), [s_0]) + assert_allclose(-clf2.score_samples([[2.0, 2.0]]), [s_0]) + # check predict(one sample already in train) + assert_allclose(-clf1.score_samples([[1.0, 1.0]]), [s_1]) + assert_allclose(-clf2.score_samples([[1.0, 1.0]]), [s_1]) + + +def test_lof_precomputed(global_dtype, random_state=42): + """Tests LOF with a distance matrix.""" + # Note: smaller samples may result in spurious test success + rng = np.random.RandomState(random_state) + X = rng.random_sample((10, 4)).astype(global_dtype, copy=False) + Y = rng.random_sample((3, 4)).astype(global_dtype, copy=False) + DXX = metrics.pairwise_distances(X, metric="euclidean") + DYX = metrics.pairwise_distances(Y, X, metric="euclidean") + # As a feature matrix (n_samples by n_features) + lof_X = neighbors.LocalOutlierFactor(n_neighbors=3, novelty=True) + lof_X.fit(X) + pred_X_X = lof_X._predict() + pred_X_Y = lof_X.predict(Y) + + # As a dense distance matrix (n_samples by n_samples) + lof_D = neighbors.LocalOutlierFactor( + n_neighbors=3, algorithm="brute", metric="precomputed", novelty=True + ) + lof_D.fit(DXX) + pred_D_X = lof_D._predict() + pred_D_Y = lof_D.predict(DYX) + + assert_allclose(pred_X_X, pred_D_X) + assert_allclose(pred_X_Y, pred_D_Y) + + +def test_n_neighbors_attribute(): + X = iris.data + clf = neighbors.LocalOutlierFactor(n_neighbors=500).fit(X) + assert clf.n_neighbors_ == X.shape[0] - 1 + + clf = neighbors.LocalOutlierFactor(n_neighbors=500) + msg = "n_neighbors will be set to (n_samples - 1)" + with pytest.warns(UserWarning, match=re.escape(msg)): + clf.fit(X) + assert clf.n_neighbors_ == X.shape[0] - 1 + + +def test_score_samples(global_dtype): + X_train = np.asarray([[1, 1], [1, 2], [2, 1]], dtype=global_dtype) + X_test = np.asarray([[2.0, 2.0]], dtype=global_dtype) + clf1 = neighbors.LocalOutlierFactor( + n_neighbors=2, contamination=0.1, novelty=True + ).fit(X_train) + clf2 = neighbors.LocalOutlierFactor(n_neighbors=2, novelty=True).fit(X_train) + + clf1_scores = clf1.score_samples(X_test) + clf1_decisions = clf1.decision_function(X_test) + + clf2_scores = clf2.score_samples(X_test) + clf2_decisions = clf2.decision_function(X_test) + + assert_allclose( + clf1_scores, + clf1_decisions + clf1.offset_, + ) + assert_allclose( + clf2_scores, + clf2_decisions + clf2.offset_, + ) + assert_allclose(clf1_scores, clf2_scores) + + +def test_novelty_errors(): + X = iris.data + + # check errors for novelty=False + clf = neighbors.LocalOutlierFactor() + clf.fit(X) + # predict, decision_function and score_samples raise ValueError + for method in ["predict", "decision_function", "score_samples"]: + outer_msg = f"'LocalOutlierFactor' has no attribute '{method}'" + inner_msg = "{} is not available when novelty=False".format(method) + with pytest.raises(AttributeError, match=outer_msg) as exec_info: + getattr(clf, method) + + assert isinstance(exec_info.value.__cause__, AttributeError) + assert inner_msg in str(exec_info.value.__cause__) + + # check errors for novelty=True + clf = neighbors.LocalOutlierFactor(novelty=True) + + outer_msg = "'LocalOutlierFactor' has no attribute 'fit_predict'" + inner_msg = "fit_predict is not available when novelty=True" + with pytest.raises(AttributeError, match=outer_msg) as exec_info: + getattr(clf, "fit_predict") + + assert isinstance(exec_info.value.__cause__, AttributeError) + assert inner_msg in str(exec_info.value.__cause__) + + +def test_novelty_training_scores(global_dtype): + # check that the scores of the training samples are still accessible + # when novelty=True through the negative_outlier_factor_ attribute + X = iris.data.astype(global_dtype) + + # fit with novelty=False + clf_1 = neighbors.LocalOutlierFactor() + clf_1.fit(X) + scores_1 = clf_1.negative_outlier_factor_ + + # fit with novelty=True + clf_2 = neighbors.LocalOutlierFactor(novelty=True) + clf_2.fit(X) + scores_2 = clf_2.negative_outlier_factor_ + + assert_allclose(scores_1, scores_2) + + +def test_hasattr_prediction(): + # check availability of prediction methods depending on novelty value. + X = [[1, 1], [1, 2], [2, 1]] + + # when novelty=True + clf = neighbors.LocalOutlierFactor(novelty=True) + clf.fit(X) + assert hasattr(clf, "predict") + assert hasattr(clf, "decision_function") + assert hasattr(clf, "score_samples") + assert not hasattr(clf, "fit_predict") + + # when novelty=False + clf = neighbors.LocalOutlierFactor(novelty=False) + clf.fit(X) + assert hasattr(clf, "fit_predict") + assert not hasattr(clf, "predict") + assert not hasattr(clf, "decision_function") + assert not hasattr(clf, "score_samples") + + +@parametrize_with_checks([neighbors.LocalOutlierFactor(novelty=True)]) +def test_novelty_true_common_tests(estimator, check): + # the common tests are run for the default LOF (novelty=False). + # here we run these common tests for LOF when novelty=True + check(estimator) + + +@pytest.mark.parametrize("expected_outliers", [30, 53]) +def test_predicted_outlier_number(expected_outliers): + # the number of predicted outliers should be equal to the number of + # expected outliers unless there are ties in the abnormality scores. + X = iris.data + n_samples = X.shape[0] + contamination = float(expected_outliers) / n_samples + + clf = neighbors.LocalOutlierFactor(contamination=contamination) + y_pred = clf.fit_predict(X) + + num_outliers = np.sum(y_pred != 1) + if num_outliers != expected_outliers: + y_dec = clf.negative_outlier_factor_ + check_outlier_corruption(num_outliers, expected_outliers, y_dec) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_sparse(csr_container): + # LocalOutlierFactor must support CSR inputs + # TODO: compare results on dense and sparse data as proposed in: + # https://github.com/scikit-learn/scikit-learn/pull/23585#discussion_r968388186 + X = csr_container(iris.data) + + lof = neighbors.LocalOutlierFactor(novelty=True) + lof.fit(X) + lof.predict(X) + lof.score_samples(X) + lof.decision_function(X) + + lof = neighbors.LocalOutlierFactor(novelty=False) + lof.fit_predict(X) + + +def test_lof_error_n_neighbors_too_large(): + """Check that we raise a proper error message when n_neighbors == n_samples. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/17207 + """ + X = np.ones((7, 7)) + + msg = ( + "Expected n_neighbors < n_samples_fit, but n_neighbors = 1, " + "n_samples_fit = 1, n_samples = 1" + ) + with pytest.raises(ValueError, match=msg): + lof = neighbors.LocalOutlierFactor(n_neighbors=1).fit(X[:1]) + + lof = neighbors.LocalOutlierFactor(n_neighbors=2).fit(X[:2]) + assert lof.n_samples_fit_ == 2 + + msg = ( + "Expected n_neighbors < n_samples_fit, but n_neighbors = 2, " + "n_samples_fit = 2, n_samples = 2" + ) + with pytest.raises(ValueError, match=msg): + lof.kneighbors(None, n_neighbors=2) + + distances, indices = lof.kneighbors(None, n_neighbors=1) + assert distances.shape == (2, 1) + assert indices.shape == (2, 1) + + msg = ( + "Expected n_neighbors <= n_samples_fit, but n_neighbors = 3, " + "n_samples_fit = 2, n_samples = 7" + ) + with pytest.raises(ValueError, match=msg): + lof.kneighbors(X, n_neighbors=3) + + ( + distances, + indices, + ) = lof.kneighbors(X, n_neighbors=2) + assert distances.shape == (7, 2) + assert indices.shape == (7, 2) + + +@pytest.mark.parametrize("algorithm", ["auto", "ball_tree", "kd_tree", "brute"]) +@pytest.mark.parametrize("novelty", [True, False]) +@pytest.mark.parametrize("contamination", [0.5, "auto"]) +def test_lof_input_dtype_preservation(global_dtype, algorithm, contamination, novelty): + """Check that the fitted attributes are stored using the data type of X.""" + X = iris.data.astype(global_dtype, copy=False) + + iso = neighbors.LocalOutlierFactor( + n_neighbors=5, algorithm=algorithm, contamination=contamination, novelty=novelty + ) + iso.fit(X) + + assert iso.negative_outlier_factor_.dtype == global_dtype + + for method in ("score_samples", "decision_function"): + if hasattr(iso, method): + y_pred = getattr(iso, method)(X) + assert y_pred.dtype == global_dtype + + +@pytest.mark.parametrize("algorithm", ["auto", "ball_tree", "kd_tree", "brute"]) +@pytest.mark.parametrize("novelty", [True, False]) +@pytest.mark.parametrize("contamination", [0.5, "auto"]) +def test_lof_dtype_equivalence(algorithm, novelty, contamination): + """Check the equivalence of the results with 32 and 64 bits input.""" + + inliers = iris.data[:50] # setosa iris are really distinct from others + outliers = iris.data[-5:] # virginica will be considered as outliers + # lower the precision of the input data to check that we have an equivalence when + # making the computation in 32 and 64 bits. + X = np.concatenate([inliers, outliers], axis=0).astype(np.float32) + + lof_32 = neighbors.LocalOutlierFactor( + algorithm=algorithm, novelty=novelty, contamination=contamination + ) + X_32 = X.astype(np.float32, copy=True) + lof_32.fit(X_32) + + lof_64 = neighbors.LocalOutlierFactor( + algorithm=algorithm, novelty=novelty, contamination=contamination + ) + X_64 = X.astype(np.float64, copy=True) + lof_64.fit(X_64) + + assert_allclose(lof_32.negative_outlier_factor_, lof_64.negative_outlier_factor_) + + for method in ("score_samples", "decision_function", "predict", "fit_predict"): + if hasattr(lof_32, method): + y_pred_32 = getattr(lof_32, method)(X_32) + y_pred_64 = getattr(lof_64, method)(X_64) + assert_allclose(y_pred_32, y_pred_64, atol=0.0002) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/tests/test_nca.py b/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/tests/test_nca.py new file mode 100644 index 0000000000000000000000000000000000000000..7dedd97ff423b802546e8ee457bc403863ec4d9d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/tests/test_nca.py @@ -0,0 +1,548 @@ +""" +Testing for Neighborhood Component Analysis module (sklearn.neighbors.nca) +""" + +# Authors: William de Vazelhes +# John Chiotellis +# License: BSD 3 clause + +import re + +import numpy as np +import pytest +from numpy.testing import assert_array_almost_equal, assert_array_equal +from scipy.optimize import check_grad + +from sklearn import clone +from sklearn.datasets import load_iris, make_blobs, make_classification +from sklearn.exceptions import ConvergenceWarning +from sklearn.metrics import pairwise_distances +from sklearn.neighbors import NeighborhoodComponentsAnalysis +from sklearn.preprocessing import LabelEncoder +from sklearn.utils import check_random_state + +rng = check_random_state(0) +# load and shuffle iris dataset +iris = load_iris() +perm = rng.permutation(iris.target.size) +iris_data = iris.data[perm] +iris_target = iris.target[perm] +EPS = np.finfo(float).eps + + +def test_simple_example(): + """Test on a simple example. + + Puts four points in the input space where the opposite labels points are + next to each other. After transform the samples from the same class + should be next to each other. + + """ + X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]]) + y = np.array([1, 0, 1, 0]) + nca = NeighborhoodComponentsAnalysis( + n_components=2, init="identity", random_state=42 + ) + nca.fit(X, y) + X_t = nca.transform(X) + assert_array_equal(pairwise_distances(X_t).argsort()[:, 1], np.array([2, 3, 0, 1])) + + +def test_toy_example_collapse_points(): + """Test on a toy example of three points that should collapse + + We build a simple example: two points from the same class and a point from + a different class in the middle of them. On this simple example, the new + (transformed) points should all collapse into one single point. Indeed, the + objective is 2/(1 + exp(d/2)), with d the euclidean distance between the + two samples from the same class. This is maximized for d=0 (because d>=0), + with an objective equal to 1 (loss=-1.). + + """ + rng = np.random.RandomState(42) + input_dim = 5 + two_points = rng.randn(2, input_dim) + X = np.vstack([two_points, two_points.mean(axis=0)[np.newaxis, :]]) + y = [0, 0, 1] + + class LossStorer: + def __init__(self, X, y): + self.loss = np.inf # initialize the loss to very high + # Initialize a fake NCA and variables needed to compute the loss: + self.fake_nca = NeighborhoodComponentsAnalysis() + self.fake_nca.n_iter_ = np.inf + self.X, y = self.fake_nca._validate_data(X, y, ensure_min_samples=2) + y = LabelEncoder().fit_transform(y) + self.same_class_mask = y[:, np.newaxis] == y[np.newaxis, :] + + def callback(self, transformation, n_iter): + """Stores the last value of the loss function""" + self.loss, _ = self.fake_nca._loss_grad_lbfgs( + transformation, self.X, self.same_class_mask, -1.0 + ) + + loss_storer = LossStorer(X, y) + nca = NeighborhoodComponentsAnalysis(random_state=42, callback=loss_storer.callback) + X_t = nca.fit_transform(X, y) + print(X_t) + # test that points are collapsed into one point + assert_array_almost_equal(X_t - X_t[0], 0.0) + assert abs(loss_storer.loss + 1) < 1e-10 + + +def test_finite_differences(global_random_seed): + """Test gradient of loss function + + Assert that the gradient is almost equal to its finite differences + approximation. + """ + # Initialize the transformation `M`, as well as `X` and `y` and `NCA` + rng = np.random.RandomState(global_random_seed) + X, y = make_classification(random_state=global_random_seed) + M = rng.randn(rng.randint(1, X.shape[1] + 1), X.shape[1]) + nca = NeighborhoodComponentsAnalysis() + nca.n_iter_ = 0 + mask = y[:, np.newaxis] == y[np.newaxis, :] + + def fun(M): + return nca._loss_grad_lbfgs(M, X, mask)[0] + + def grad(M): + return nca._loss_grad_lbfgs(M, X, mask)[1] + + # compare the gradient to a finite difference approximation + diff = check_grad(fun, grad, M.ravel()) + assert diff == pytest.approx(0.0, abs=1e-4) + + +def test_params_validation(): + # Test that invalid parameters raise value error + X = np.arange(12).reshape(4, 3) + y = [1, 1, 2, 2] + NCA = NeighborhoodComponentsAnalysis + rng = np.random.RandomState(42) + + init = rng.rand(5, 3) + msg = ( + f"The output dimensionality ({init.shape[0]}) " + "of the given linear transformation `init` cannot be " + f"greater than its input dimensionality ({init.shape[1]})." + ) + with pytest.raises(ValueError, match=re.escape(msg)): + NCA(init=init).fit(X, y) + n_components = 10 + msg = ( + "The preferred dimensionality of the projected space " + f"`n_components` ({n_components}) cannot be greater " + f"than the given data dimensionality ({X.shape[1]})!" + ) + with pytest.raises(ValueError, match=re.escape(msg)): + NCA(n_components=n_components).fit(X, y) + + +def test_transformation_dimensions(): + X = np.arange(12).reshape(4, 3) + y = [1, 1, 2, 2] + + # Fail if transformation input dimension does not match inputs dimensions + transformation = np.array([[1, 2], [3, 4]]) + with pytest.raises(ValueError): + NeighborhoodComponentsAnalysis(init=transformation).fit(X, y) + + # Fail if transformation output dimension is larger than + # transformation input dimension + transformation = np.array([[1, 2], [3, 4], [5, 6]]) + # len(transformation) > len(transformation[0]) + with pytest.raises(ValueError): + NeighborhoodComponentsAnalysis(init=transformation).fit(X, y) + + # Pass otherwise + transformation = np.arange(9).reshape(3, 3) + NeighborhoodComponentsAnalysis(init=transformation).fit(X, y) + + +def test_n_components(): + rng = np.random.RandomState(42) + X = np.arange(12).reshape(4, 3) + y = [1, 1, 2, 2] + + init = rng.rand(X.shape[1] - 1, 3) + + # n_components = X.shape[1] != transformation.shape[0] + n_components = X.shape[1] + nca = NeighborhoodComponentsAnalysis(init=init, n_components=n_components) + msg = ( + "The preferred dimensionality of the projected space " + f"`n_components` ({n_components}) does not match the output " + "dimensionality of the given linear transformation " + f"`init` ({init.shape[0]})!" + ) + with pytest.raises(ValueError, match=re.escape(msg)): + nca.fit(X, y) + + # n_components > X.shape[1] + n_components = X.shape[1] + 2 + nca = NeighborhoodComponentsAnalysis(init=init, n_components=n_components) + msg = ( + "The preferred dimensionality of the projected space " + f"`n_components` ({n_components}) cannot be greater than " + f"the given data dimensionality ({X.shape[1]})!" + ) + with pytest.raises(ValueError, match=re.escape(msg)): + nca.fit(X, y) + + # n_components < X.shape[1] + nca = NeighborhoodComponentsAnalysis(n_components=2, init="identity") + nca.fit(X, y) + + +def test_init_transformation(): + rng = np.random.RandomState(42) + X, y = make_blobs(n_samples=30, centers=6, n_features=5, random_state=0) + + # Start learning from scratch + nca = NeighborhoodComponentsAnalysis(init="identity") + nca.fit(X, y) + + # Initialize with random + nca_random = NeighborhoodComponentsAnalysis(init="random") + nca_random.fit(X, y) + + # Initialize with auto + nca_auto = NeighborhoodComponentsAnalysis(init="auto") + nca_auto.fit(X, y) + + # Initialize with PCA + nca_pca = NeighborhoodComponentsAnalysis(init="pca") + nca_pca.fit(X, y) + + # Initialize with LDA + nca_lda = NeighborhoodComponentsAnalysis(init="lda") + nca_lda.fit(X, y) + + init = rng.rand(X.shape[1], X.shape[1]) + nca = NeighborhoodComponentsAnalysis(init=init) + nca.fit(X, y) + + # init.shape[1] must match X.shape[1] + init = rng.rand(X.shape[1], X.shape[1] + 1) + nca = NeighborhoodComponentsAnalysis(init=init) + msg = ( + f"The input dimensionality ({init.shape[1]}) of the given " + "linear transformation `init` must match the " + f"dimensionality of the given inputs `X` ({X.shape[1]})." + ) + with pytest.raises(ValueError, match=re.escape(msg)): + nca.fit(X, y) + + # init.shape[0] must be <= init.shape[1] + init = rng.rand(X.shape[1] + 1, X.shape[1]) + nca = NeighborhoodComponentsAnalysis(init=init) + msg = ( + f"The output dimensionality ({init.shape[0]}) of the given " + "linear transformation `init` cannot be " + f"greater than its input dimensionality ({init.shape[1]})." + ) + with pytest.raises(ValueError, match=re.escape(msg)): + nca.fit(X, y) + + # init.shape[0] must match n_components + init = rng.rand(X.shape[1], X.shape[1]) + n_components = X.shape[1] - 2 + nca = NeighborhoodComponentsAnalysis(init=init, n_components=n_components) + msg = ( + "The preferred dimensionality of the " + f"projected space `n_components` ({n_components}) " + "does not match the output dimensionality of the given " + f"linear transformation `init` ({init.shape[0]})!" + ) + with pytest.raises(ValueError, match=re.escape(msg)): + nca.fit(X, y) + + +@pytest.mark.parametrize("n_samples", [3, 5, 7, 11]) +@pytest.mark.parametrize("n_features", [3, 5, 7, 11]) +@pytest.mark.parametrize("n_classes", [5, 7, 11]) +@pytest.mark.parametrize("n_components", [3, 5, 7, 11]) +def test_auto_init(n_samples, n_features, n_classes, n_components): + # Test that auto choose the init as expected with every configuration + # of order of n_samples, n_features, n_classes and n_components. + rng = np.random.RandomState(42) + nca_base = NeighborhoodComponentsAnalysis( + init="auto", n_components=n_components, max_iter=1, random_state=rng + ) + if n_classes >= n_samples: + pass + # n_classes > n_samples is impossible, and n_classes == n_samples + # throws an error from lda but is an absurd case + else: + X = rng.randn(n_samples, n_features) + y = np.tile(range(n_classes), n_samples // n_classes + 1)[:n_samples] + if n_components > n_features: + # this would return a ValueError, which is already tested in + # test_params_validation + pass + else: + nca = clone(nca_base) + nca.fit(X, y) + if n_components <= min(n_classes - 1, n_features): + nca_other = clone(nca_base).set_params(init="lda") + elif n_components < min(n_features, n_samples): + nca_other = clone(nca_base).set_params(init="pca") + else: + nca_other = clone(nca_base).set_params(init="identity") + nca_other.fit(X, y) + assert_array_almost_equal(nca.components_, nca_other.components_) + + +def test_warm_start_validation(): + X, y = make_classification( + n_samples=30, + n_features=5, + n_classes=4, + n_redundant=0, + n_informative=5, + random_state=0, + ) + + nca = NeighborhoodComponentsAnalysis(warm_start=True, max_iter=5) + nca.fit(X, y) + + X_less_features, y = make_classification( + n_samples=30, + n_features=4, + n_classes=4, + n_redundant=0, + n_informative=4, + random_state=0, + ) + msg = ( + f"The new inputs dimensionality ({X_less_features.shape[1]}) " + "does not match the input dimensionality of the previously learned " + f"transformation ({nca.components_.shape[1]})." + ) + with pytest.raises(ValueError, match=re.escape(msg)): + nca.fit(X_less_features, y) + + +def test_warm_start_effectiveness(): + # A 1-iteration second fit on same data should give almost same result + # with warm starting, and quite different result without warm starting. + + nca_warm = NeighborhoodComponentsAnalysis(warm_start=True, random_state=0) + nca_warm.fit(iris_data, iris_target) + transformation_warm = nca_warm.components_ + nca_warm.max_iter = 1 + nca_warm.fit(iris_data, iris_target) + transformation_warm_plus_one = nca_warm.components_ + + nca_cold = NeighborhoodComponentsAnalysis(warm_start=False, random_state=0) + nca_cold.fit(iris_data, iris_target) + transformation_cold = nca_cold.components_ + nca_cold.max_iter = 1 + nca_cold.fit(iris_data, iris_target) + transformation_cold_plus_one = nca_cold.components_ + + diff_warm = np.sum(np.abs(transformation_warm_plus_one - transformation_warm)) + diff_cold = np.sum(np.abs(transformation_cold_plus_one - transformation_cold)) + assert diff_warm < 3.0, ( + "Transformer changed significantly after one " + "iteration even though it was warm-started." + ) + + assert diff_cold > diff_warm, ( + "Cold-started transformer changed less " + "significantly than warm-started " + "transformer after one iteration." + ) + + +@pytest.mark.parametrize( + "init_name", ["pca", "lda", "identity", "random", "precomputed"] +) +def test_verbose(init_name, capsys): + # assert there is proper output when verbose = 1, for every initialization + # except auto because auto will call one of the others + rng = np.random.RandomState(42) + X, y = make_blobs(n_samples=30, centers=6, n_features=5, random_state=0) + regexp_init = r"... done in \ *\d+\.\d{2}s" + msgs = { + "pca": "Finding principal components" + regexp_init, + "lda": "Finding most discriminative components" + regexp_init, + } + if init_name == "precomputed": + init = rng.randn(X.shape[1], X.shape[1]) + else: + init = init_name + nca = NeighborhoodComponentsAnalysis(verbose=1, init=init) + nca.fit(X, y) + out, _ = capsys.readouterr() + + # check output + lines = re.split("\n+", out) + # if pca or lda init, an additional line is printed, so we test + # it and remove it to test the rest equally among initializations + if init_name in ["pca", "lda"]: + assert re.match(msgs[init_name], lines[0]) + lines = lines[1:] + assert lines[0] == "[NeighborhoodComponentsAnalysis]" + header = "{:>10} {:>20} {:>10}".format("Iteration", "Objective Value", "Time(s)") + assert lines[1] == "[NeighborhoodComponentsAnalysis] {}".format(header) + assert lines[2] == "[NeighborhoodComponentsAnalysis] {}".format("-" * len(header)) + for line in lines[3:-2]: + # The following regex will match for instance: + # '[NeighborhoodComponentsAnalysis] 0 6.988936e+01 0.01' + assert re.match( + r"\[NeighborhoodComponentsAnalysis\] *\d+ *\d\.\d{6}e" + r"[+|-]\d+\ *\d+\.\d{2}", + line, + ) + assert re.match( + r"\[NeighborhoodComponentsAnalysis\] Training took\ *" r"\d+\.\d{2}s\.", + lines[-2], + ) + assert lines[-1] == "" + + +def test_no_verbose(capsys): + # assert by default there is no output (verbose=0) + nca = NeighborhoodComponentsAnalysis() + nca.fit(iris_data, iris_target) + out, _ = capsys.readouterr() + # check output + assert out == "" + + +def test_singleton_class(): + X = iris_data + y = iris_target + + # one singleton class + singleton_class = 1 + (ind_singleton,) = np.where(y == singleton_class) + y[ind_singleton] = 2 + y[ind_singleton[0]] = singleton_class + + nca = NeighborhoodComponentsAnalysis(max_iter=30) + nca.fit(X, y) + + # One non-singleton class + (ind_1,) = np.where(y == 1) + (ind_2,) = np.where(y == 2) + y[ind_1] = 0 + y[ind_1[0]] = 1 + y[ind_2] = 0 + y[ind_2[0]] = 2 + + nca = NeighborhoodComponentsAnalysis(max_iter=30) + nca.fit(X, y) + + # Only singleton classes + (ind_0,) = np.where(y == 0) + (ind_1,) = np.where(y == 1) + (ind_2,) = np.where(y == 2) + X = X[[ind_0[0], ind_1[0], ind_2[0]]] + y = y[[ind_0[0], ind_1[0], ind_2[0]]] + + nca = NeighborhoodComponentsAnalysis(init="identity", max_iter=30) + nca.fit(X, y) + assert_array_equal(X, nca.transform(X)) + + +def test_one_class(): + X = iris_data[iris_target == 0] + y = iris_target[iris_target == 0] + + nca = NeighborhoodComponentsAnalysis( + max_iter=30, n_components=X.shape[1], init="identity" + ) + nca.fit(X, y) + assert_array_equal(X, nca.transform(X)) + + +def test_callback(capsys): + max_iter = 10 + + def my_cb(transformation, n_iter): + assert transformation.shape == (iris_data.shape[1] ** 2,) + rem_iter = max_iter - n_iter + print("{} iterations remaining...".format(rem_iter)) + + # assert that my_cb is called + nca = NeighborhoodComponentsAnalysis(max_iter=max_iter, callback=my_cb, verbose=1) + nca.fit(iris_data, iris_target) + out, _ = capsys.readouterr() + + # check output + assert "{} iterations remaining...".format(max_iter - 1) in out + + +def test_expected_transformation_shape(): + """Test that the transformation has the expected shape.""" + X = iris_data + y = iris_target + + class TransformationStorer: + def __init__(self, X, y): + # Initialize a fake NCA and variables needed to call the loss + # function: + self.fake_nca = NeighborhoodComponentsAnalysis() + self.fake_nca.n_iter_ = np.inf + self.X, y = self.fake_nca._validate_data(X, y, ensure_min_samples=2) + y = LabelEncoder().fit_transform(y) + self.same_class_mask = y[:, np.newaxis] == y[np.newaxis, :] + + def callback(self, transformation, n_iter): + """Stores the last value of the transformation taken as input by + the optimizer""" + self.transformation = transformation + + transformation_storer = TransformationStorer(X, y) + cb = transformation_storer.callback + nca = NeighborhoodComponentsAnalysis(max_iter=5, callback=cb) + nca.fit(X, y) + assert transformation_storer.transformation.size == X.shape[1] ** 2 + + +def test_convergence_warning(): + nca = NeighborhoodComponentsAnalysis(max_iter=2, verbose=1) + cls_name = nca.__class__.__name__ + msg = "[{}] NCA did not converge".format(cls_name) + with pytest.warns(ConvergenceWarning, match=re.escape(msg)): + nca.fit(iris_data, iris_target) + + +@pytest.mark.parametrize( + "param, value", + [ + ("n_components", np.int32(3)), + ("max_iter", np.int32(100)), + ("tol", np.float32(0.0001)), + ], +) +def test_parameters_valid_types(param, value): + # check that no error is raised when parameters have numpy integer or + # floating types. + nca = NeighborhoodComponentsAnalysis(**{param: value}) + + X = iris_data + y = iris_target + + nca.fit(X, y) + + +def test_nca_feature_names_out(): + """Check `get_feature_names_out` for `NeighborhoodComponentsAnalysis`.""" + + X = iris_data + y = iris_target + + est = NeighborhoodComponentsAnalysis().fit(X, y) + names_out = est.get_feature_names_out() + + class_name_lower = est.__class__.__name__.lower() + expected_names_out = np.array( + [f"{class_name_lower}{i}" for i in range(est.components_.shape[1])], + dtype=object, + ) + assert_array_equal(names_out, expected_names_out) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/tests/test_nearest_centroid.py b/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/tests/test_nearest_centroid.py new file mode 100644 index 0000000000000000000000000000000000000000..ee548d801781000cbbebb7da36e56e7b5e2724f3 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/tests/test_nearest_centroid.py @@ -0,0 +1,178 @@ +""" +Testing for the nearest centroid module. +""" +import numpy as np +import pytest +from numpy.testing import assert_array_equal + +from sklearn import datasets +from sklearn.neighbors import NearestCentroid +from sklearn.utils.fixes import CSR_CONTAINERS + +# toy sample +X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]] +y = [-1, -1, -1, 1, 1, 1] +T = [[-1, -1], [2, 2], [3, 2]] +true_result = [-1, 1, 1] + +# also load the iris dataset +# and randomly permute it +iris = datasets.load_iris() +rng = np.random.RandomState(1) +perm = rng.permutation(iris.target.size) +iris.data = iris.data[perm] +iris.target = iris.target[perm] + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_classification_toy(csr_container): + # Check classification on a toy dataset, including sparse versions. + X_csr = csr_container(X) + T_csr = csr_container(T) + + clf = NearestCentroid() + clf.fit(X, y) + assert_array_equal(clf.predict(T), true_result) + + # Same test, but with a sparse matrix to fit and test. + clf = NearestCentroid() + clf.fit(X_csr, y) + assert_array_equal(clf.predict(T_csr), true_result) + + # Fit with sparse, test with non-sparse + clf = NearestCentroid() + clf.fit(X_csr, y) + assert_array_equal(clf.predict(T), true_result) + + # Fit with non-sparse, test with sparse + clf = NearestCentroid() + clf.fit(X, y) + assert_array_equal(clf.predict(T_csr), true_result) + + # Fit and predict with non-CSR sparse matrices + clf = NearestCentroid() + clf.fit(X_csr.tocoo(), y) + assert_array_equal(clf.predict(T_csr.tolil()), true_result) + + +# TODO(1.5): Remove filterwarnings when support for some metrics is removed +@pytest.mark.filterwarnings("ignore:Support for distance metrics:FutureWarning:sklearn") +def test_iris(): + # Check consistency on dataset iris. + for metric in ("euclidean", "cosine"): + clf = NearestCentroid(metric=metric).fit(iris.data, iris.target) + score = np.mean(clf.predict(iris.data) == iris.target) + assert score > 0.9, "Failed with score = " + str(score) + + +# TODO(1.5): Remove filterwarnings when support for some metrics is removed +@pytest.mark.filterwarnings("ignore:Support for distance metrics:FutureWarning:sklearn") +def test_iris_shrinkage(): + # Check consistency on dataset iris, when using shrinkage. + for metric in ("euclidean", "cosine"): + for shrink_threshold in [None, 0.1, 0.5]: + clf = NearestCentroid(metric=metric, shrink_threshold=shrink_threshold) + clf = clf.fit(iris.data, iris.target) + score = np.mean(clf.predict(iris.data) == iris.target) + assert score > 0.8, "Failed with score = " + str(score) + + +def test_pickle(): + import pickle + + # classification + obj = NearestCentroid() + obj.fit(iris.data, iris.target) + score = obj.score(iris.data, iris.target) + s = pickle.dumps(obj) + + obj2 = pickle.loads(s) + assert type(obj2) == obj.__class__ + score2 = obj2.score(iris.data, iris.target) + assert_array_equal( + score, + score2, + "Failed to generate same score after pickling (classification).", + ) + + +def test_shrinkage_correct(): + # Ensure that the shrinking is correct. + # The expected result is calculated by R (pamr), + # which is implemented by the author of the original paper. + # (One need to modify the code to output the new centroid in pamr.predict) + + X = np.array([[0, 1], [1, 0], [1, 1], [2, 0], [6, 8]]) + y = np.array([1, 1, 2, 2, 2]) + clf = NearestCentroid(shrink_threshold=0.1) + clf.fit(X, y) + expected_result = np.array([[0.7787310, 0.8545292], [2.814179, 2.763647]]) + np.testing.assert_array_almost_equal(clf.centroids_, expected_result) + + +def test_shrinkage_threshold_decoded_y(): + clf = NearestCentroid(shrink_threshold=0.01) + y_ind = np.asarray(y) + y_ind[y_ind == -1] = 0 + clf.fit(X, y_ind) + centroid_encoded = clf.centroids_ + clf.fit(X, y) + assert_array_equal(centroid_encoded, clf.centroids_) + + +def test_predict_translated_data(): + # Test that NearestCentroid gives same results on translated data + + rng = np.random.RandomState(0) + X = rng.rand(50, 50) + y = rng.randint(0, 3, 50) + noise = rng.rand(50) + clf = NearestCentroid(shrink_threshold=0.1) + clf.fit(X, y) + y_init = clf.predict(X) + clf = NearestCentroid(shrink_threshold=0.1) + X_noise = X + noise + clf.fit(X_noise, y) + y_translate = clf.predict(X_noise) + assert_array_equal(y_init, y_translate) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_manhattan_metric(csr_container): + # Test the manhattan metric. + X_csr = csr_container(X) + + clf = NearestCentroid(metric="manhattan") + clf.fit(X, y) + dense_centroid = clf.centroids_ + clf.fit(X_csr, y) + assert_array_equal(clf.centroids_, dense_centroid) + assert_array_equal(dense_centroid, [[-1, -1], [1, 1]]) + + +# TODO(1.5): remove this test +@pytest.mark.parametrize( + "metric", sorted(list(NearestCentroid._valid_metrics - {"manhattan", "euclidean"})) +) +def test_deprecated_distance_metric_supports(metric): + # Check that a warning is raised for all deprecated distance metric supports + clf = NearestCentroid(metric=metric) + with pytest.warns( + FutureWarning, + match="Support for distance metrics other than euclidean and manhattan", + ): + clf.fit(X, y) + + +def test_features_zero_var(): + # Test that features with 0 variance throw error + + X = np.empty((10, 2)) + X[:, 0] = -0.13725701 + X[:, 1] = -0.9853293 + y = np.zeros((10)) + y[0] = 1 + + clf = NearestCentroid(shrink_threshold=0.1) + with pytest.raises(ValueError): + clf.fit(X, y) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/tests/test_neighbors.py b/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/tests/test_neighbors.py new file mode 100644 index 0000000000000000000000000000000000000000..d3fc71478e6f551723c716f63d8219cf0bd421da --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/tests/test_neighbors.py @@ -0,0 +1,2372 @@ +import re +import warnings +from itertools import product + +import joblib +import numpy as np +import pytest +from scipy.sparse import issparse + +from sklearn import ( + config_context, + datasets, + metrics, + neighbors, +) +from sklearn.base import clone +from sklearn.exceptions import DataConversionWarning, EfficiencyWarning, NotFittedError +from sklearn.metrics._dist_metrics import ( + DistanceMetric, +) +from sklearn.metrics.pairwise import PAIRWISE_BOOLEAN_FUNCTIONS, pairwise_distances +from sklearn.metrics.tests.test_dist_metrics import BOOL_METRICS +from sklearn.metrics.tests.test_pairwise_distances_reduction import ( + assert_compatible_argkmin_results, + assert_compatible_radius_results, +) +from sklearn.model_selection import cross_val_score, train_test_split +from sklearn.neighbors import ( + VALID_METRICS_SPARSE, + KNeighborsRegressor, +) +from sklearn.neighbors._base import ( + KNeighborsMixin, + _check_precomputed, + _is_sorted_by_data, + sort_graph_by_row_values, +) +from sklearn.pipeline import make_pipeline +from sklearn.utils._testing import ( + assert_allclose, + assert_array_equal, + ignore_warnings, +) +from sklearn.utils.fixes import ( + BSR_CONTAINERS, + COO_CONTAINERS, + CSC_CONTAINERS, + CSR_CONTAINERS, + DIA_CONTAINERS, + DOK_CONTAINERS, + LIL_CONTAINERS, + parse_version, + sp_version, +) +from sklearn.utils.validation import check_random_state + +rng = np.random.RandomState(0) +# load and shuffle iris dataset +iris = datasets.load_iris() +perm = rng.permutation(iris.target.size) +iris.data = iris.data[perm] +iris.target = iris.target[perm] + +# load and shuffle digits +digits = datasets.load_digits() +perm = rng.permutation(digits.target.size) +digits.data = digits.data[perm] +digits.target = digits.target[perm] + +SPARSE_TYPES = tuple( + BSR_CONTAINERS + + COO_CONTAINERS + + CSC_CONTAINERS + + CSR_CONTAINERS + + DOK_CONTAINERS + + LIL_CONTAINERS +) +SPARSE_OR_DENSE = SPARSE_TYPES + (np.asarray,) + +ALGORITHMS = ("ball_tree", "brute", "kd_tree", "auto") +COMMON_VALID_METRICS = sorted( + set.intersection(*map(set, neighbors.VALID_METRICS.values())) +) # type: ignore + +P = (1, 2, 3, 4, np.inf) + +# Filter deprecation warnings. +neighbors.kneighbors_graph = ignore_warnings(neighbors.kneighbors_graph) +neighbors.radius_neighbors_graph = ignore_warnings(neighbors.radius_neighbors_graph) + +# A list containing metrics where the string specifies the use of the +# DistanceMetric object directly (as resolved in _parse_metric) +DISTANCE_METRIC_OBJS = ["DM_euclidean"] + + +def _parse_metric(metric: str, dtype=None): + """ + Helper function for properly building a type-specialized DistanceMetric instances. + + Constructs a type-specialized DistanceMetric instance from a string + beginning with "DM_" while allowing a pass-through for other metric-specifying + strings. This is necessary since we wish to parameterize dtype independent of + metric, yet DistanceMetric requires it for construction. + + """ + if metric[:3] == "DM_": + return DistanceMetric.get_metric(metric[3:], dtype=dtype) + return metric + + +def _generate_test_params_for(metric: str, n_features: int): + """Return list of DistanceMetric kwargs for tests.""" + + # Distinguishing on cases not to compute unneeded datastructures. + rng = np.random.RandomState(1) + + if metric == "minkowski": + minkowski_kwargs = [dict(p=1.5), dict(p=2), dict(p=3), dict(p=np.inf)] + if sp_version >= parse_version("1.8.0.dev0"): + # TODO: remove the test once we no longer support scipy < 1.8.0. + # Recent scipy versions accept weights in the Minkowski metric directly: + # type: ignore + minkowski_kwargs.append(dict(p=3, w=rng.rand(n_features))) + return minkowski_kwargs + + if metric == "seuclidean": + return [dict(V=rng.rand(n_features))] + + if metric == "mahalanobis": + A = rng.rand(n_features, n_features) + # Make the matrix symmetric positive definite + VI = A + A.T + 3 * np.eye(n_features) + return [dict(VI=VI)] + + # Case of: "euclidean", "manhattan", "chebyshev", "haversine" or any other metric. + # In those cases, no kwargs are needed. + return [{}] + + +def _weight_func(dist): + """Weight function to replace lambda d: d ** -2. + The lambda function is not valid because: + if d==0 then 0^-2 is not valid.""" + + # Dist could be multidimensional, flatten it so all values + # can be looped + with np.errstate(divide="ignore"): + retval = 1.0 / dist + return retval**2 + + +WEIGHTS = ["uniform", "distance", _weight_func] + + +@pytest.mark.parametrize( + "n_samples, n_features, n_query_pts, n_neighbors", + [ + (100, 100, 10, 100), + (1000, 5, 100, 1), + ], +) +@pytest.mark.parametrize("query_is_train", [False, True]) +@pytest.mark.parametrize("metric", COMMON_VALID_METRICS + DISTANCE_METRIC_OBJS) # type: ignore # noqa +def test_unsupervised_kneighbors( + global_dtype, + n_samples, + n_features, + n_query_pts, + n_neighbors, + query_is_train, + metric, +): + # The different algorithms must return identical results + # on their common metrics, with and without returning + # distances + + metric = _parse_metric(metric, global_dtype) + + # Redefining the rng locally to use the same generated X + local_rng = np.random.RandomState(0) + X = local_rng.rand(n_samples, n_features).astype(global_dtype, copy=False) + + query = ( + X + if query_is_train + else local_rng.rand(n_query_pts, n_features).astype(global_dtype, copy=False) + ) + + results_nodist = [] + results = [] + + for algorithm in ALGORITHMS: + if isinstance(metric, DistanceMetric) and global_dtype == np.float32: + if "tree" in algorithm: # pragma: nocover + pytest.skip( + "Neither KDTree nor BallTree support 32-bit distance metric" + " objects." + ) + neigh = neighbors.NearestNeighbors( + n_neighbors=n_neighbors, algorithm=algorithm, metric=metric + ) + neigh.fit(X) + + results_nodist.append(neigh.kneighbors(query, return_distance=False)) + results.append(neigh.kneighbors(query, return_distance=True)) + + for i in range(len(results) - 1): + algorithm = ALGORITHMS[i] + next_algorithm = ALGORITHMS[i + 1] + + indices_no_dist = results_nodist[i] + distances, next_distances = results[i][0], results[i + 1][0] + indices, next_indices = results[i][1], results[i + 1][1] + assert_array_equal( + indices_no_dist, + indices, + err_msg=( + f"The '{algorithm}' algorithm returns different" + "indices depending on 'return_distances'." + ), + ) + assert_array_equal( + indices, + next_indices, + err_msg=( + f"The '{algorithm}' and '{next_algorithm}' " + "algorithms return different indices." + ), + ) + assert_allclose( + distances, + next_distances, + err_msg=( + f"The '{algorithm}' and '{next_algorithm}' " + "algorithms return different distances." + ), + atol=1e-6, + ) + + +@pytest.mark.parametrize( + "n_samples, n_features, n_query_pts", + [ + (100, 100, 10), + (1000, 5, 100), + ], +) +@pytest.mark.parametrize("metric", COMMON_VALID_METRICS + DISTANCE_METRIC_OBJS) # type: ignore # noqa +@pytest.mark.parametrize("n_neighbors, radius", [(1, 100), (50, 500), (100, 1000)]) +@pytest.mark.parametrize( + "NeighborsMixinSubclass", + [ + neighbors.KNeighborsClassifier, + neighbors.KNeighborsRegressor, + neighbors.RadiusNeighborsClassifier, + neighbors.RadiusNeighborsRegressor, + ], +) +def test_neigh_predictions_algorithm_agnosticity( + global_dtype, + n_samples, + n_features, + n_query_pts, + metric, + n_neighbors, + radius, + NeighborsMixinSubclass, +): + # The different algorithms must return identical predictions results + # on their common metrics. + + metric = _parse_metric(metric, global_dtype) + if isinstance(metric, DistanceMetric): + if "Classifier" in NeighborsMixinSubclass.__name__: + pytest.skip( + "Metrics of type `DistanceMetric` are not yet supported for" + " classifiers." + ) + if "Radius" in NeighborsMixinSubclass.__name__: + pytest.skip( + "Metrics of type `DistanceMetric` are not yet supported for" + " radius-neighbor estimators." + ) + + # Redefining the rng locally to use the same generated X + local_rng = np.random.RandomState(0) + X = local_rng.rand(n_samples, n_features).astype(global_dtype, copy=False) + y = local_rng.randint(3, size=n_samples) + + query = local_rng.rand(n_query_pts, n_features).astype(global_dtype, copy=False) + + predict_results = [] + + parameter = ( + n_neighbors if issubclass(NeighborsMixinSubclass, KNeighborsMixin) else radius + ) + + for algorithm in ALGORITHMS: + if isinstance(metric, DistanceMetric) and global_dtype == np.float32: + if "tree" in algorithm: # pragma: nocover + pytest.skip( + "Neither KDTree nor BallTree support 32-bit distance metric" + " objects." + ) + neigh = NeighborsMixinSubclass(parameter, algorithm=algorithm, metric=metric) + neigh.fit(X, y) + + predict_results.append(neigh.predict(query)) + + for i in range(len(predict_results) - 1): + algorithm = ALGORITHMS[i] + next_algorithm = ALGORITHMS[i + 1] + + predictions, next_predictions = predict_results[i], predict_results[i + 1] + + assert_allclose( + predictions, + next_predictions, + err_msg=( + f"The '{algorithm}' and '{next_algorithm}' " + "algorithms return different predictions." + ), + ) + + +@pytest.mark.parametrize( + "KNeighborsMixinSubclass", + [ + neighbors.KNeighborsClassifier, + neighbors.KNeighborsRegressor, + neighbors.NearestNeighbors, + ], +) +def test_unsupervised_inputs(global_dtype, KNeighborsMixinSubclass): + # Test unsupervised inputs for neighbors estimators + + X = rng.random_sample((10, 3)).astype(global_dtype, copy=False) + y = rng.randint(3, size=10) + nbrs_fid = neighbors.NearestNeighbors(n_neighbors=1) + nbrs_fid.fit(X) + + dist1, ind1 = nbrs_fid.kneighbors(X) + + nbrs = KNeighborsMixinSubclass(n_neighbors=1) + + for data in (nbrs_fid, neighbors.BallTree(X), neighbors.KDTree(X)): + nbrs.fit(data, y) + + dist2, ind2 = nbrs.kneighbors(X) + + assert_allclose(dist1, dist2) + assert_array_equal(ind1, ind2) + + +def test_not_fitted_error_gets_raised(): + X = [[1]] + neighbors_ = neighbors.NearestNeighbors() + with pytest.raises(NotFittedError): + neighbors_.kneighbors_graph(X) + with pytest.raises(NotFittedError): + neighbors_.radius_neighbors_graph(X) + + +@pytest.mark.filterwarnings("ignore:EfficiencyWarning") +def check_precomputed(make_train_test, estimators): + """Tests unsupervised NearestNeighbors with a distance matrix.""" + # Note: smaller samples may result in spurious test success + rng = np.random.RandomState(42) + X = rng.random_sample((10, 4)) + Y = rng.random_sample((3, 4)) + DXX, DYX = make_train_test(X, Y) + for method in [ + "kneighbors", + ]: + # TODO: also test radius_neighbors, but requires different assertion + + # As a feature matrix (n_samples by n_features) + nbrs_X = neighbors.NearestNeighbors(n_neighbors=3) + nbrs_X.fit(X) + dist_X, ind_X = getattr(nbrs_X, method)(Y) + + # As a dense distance matrix (n_samples by n_samples) + nbrs_D = neighbors.NearestNeighbors( + n_neighbors=3, algorithm="brute", metric="precomputed" + ) + nbrs_D.fit(DXX) + dist_D, ind_D = getattr(nbrs_D, method)(DYX) + assert_allclose(dist_X, dist_D) + assert_array_equal(ind_X, ind_D) + + # Check auto works too + nbrs_D = neighbors.NearestNeighbors( + n_neighbors=3, algorithm="auto", metric="precomputed" + ) + nbrs_D.fit(DXX) + dist_D, ind_D = getattr(nbrs_D, method)(DYX) + assert_allclose(dist_X, dist_D) + assert_array_equal(ind_X, ind_D) + + # Check X=None in prediction + dist_X, ind_X = getattr(nbrs_X, method)(None) + dist_D, ind_D = getattr(nbrs_D, method)(None) + assert_allclose(dist_X, dist_D) + assert_array_equal(ind_X, ind_D) + + # Must raise a ValueError if the matrix is not of correct shape + with pytest.raises(ValueError): + getattr(nbrs_D, method)(X) + + target = np.arange(X.shape[0]) + for Est in estimators: + est = Est(metric="euclidean") + est.radius = est.n_neighbors = 1 + pred_X = est.fit(X, target).predict(Y) + est.metric = "precomputed" + pred_D = est.fit(DXX, target).predict(DYX) + assert_allclose(pred_X, pred_D) + + +def test_precomputed_dense(): + def make_train_test(X_train, X_test): + return ( + metrics.pairwise_distances(X_train), + metrics.pairwise_distances(X_test, X_train), + ) + + estimators = [ + neighbors.KNeighborsClassifier, + neighbors.KNeighborsRegressor, + neighbors.RadiusNeighborsClassifier, + neighbors.RadiusNeighborsRegressor, + ] + check_precomputed(make_train_test, estimators) + + +@pytest.mark.parametrize("fmt", ["csr", "lil"]) +def test_precomputed_sparse_knn(fmt): + def make_train_test(X_train, X_test): + nn = neighbors.NearestNeighbors(n_neighbors=3 + 1).fit(X_train) + return ( + nn.kneighbors_graph(X_train, mode="distance").asformat(fmt), + nn.kneighbors_graph(X_test, mode="distance").asformat(fmt), + ) + + # We do not test RadiusNeighborsClassifier and RadiusNeighborsRegressor + # since the precomputed neighbors graph is built with k neighbors only. + estimators = [ + neighbors.KNeighborsClassifier, + neighbors.KNeighborsRegressor, + ] + check_precomputed(make_train_test, estimators) + + +@pytest.mark.parametrize("fmt", ["csr", "lil"]) +def test_precomputed_sparse_radius(fmt): + def make_train_test(X_train, X_test): + nn = neighbors.NearestNeighbors(radius=1).fit(X_train) + return ( + nn.radius_neighbors_graph(X_train, mode="distance").asformat(fmt), + nn.radius_neighbors_graph(X_test, mode="distance").asformat(fmt), + ) + + # We do not test KNeighborsClassifier and KNeighborsRegressor + # since the precomputed neighbors graph is built with a radius. + estimators = [ + neighbors.RadiusNeighborsClassifier, + neighbors.RadiusNeighborsRegressor, + ] + check_precomputed(make_train_test, estimators) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_is_sorted_by_data(csr_container): + # Test that _is_sorted_by_data works as expected. In CSR sparse matrix, + # entries in each row can be sorted by indices, by data, or unsorted. + # _is_sorted_by_data should return True when entries are sorted by data, + # and False in all other cases. + + # Test with sorted single row sparse array + X = csr_container(np.arange(10).reshape(1, 10)) + assert _is_sorted_by_data(X) + # Test with unsorted 1D array + X[0, 2] = 5 + assert not _is_sorted_by_data(X) + + # Test when the data is sorted in each sample, but not necessarily + # between samples + X = csr_container([[0, 1, 2], [3, 0, 0], [3, 4, 0], [1, 0, 2]]) + assert _is_sorted_by_data(X) + + # Test with duplicates entries in X.indptr + data, indices, indptr = [0, 4, 2, 2], [0, 1, 1, 1], [0, 2, 2, 4] + X = csr_container((data, indices, indptr), shape=(3, 3)) + assert _is_sorted_by_data(X) + + +@pytest.mark.filterwarnings("ignore:EfficiencyWarning") +@pytest.mark.parametrize("function", [sort_graph_by_row_values, _check_precomputed]) +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_sort_graph_by_row_values(function, csr_container): + # Test that sort_graph_by_row_values returns a graph sorted by row values + X = csr_container(np.abs(np.random.RandomState(42).randn(10, 10))) + assert not _is_sorted_by_data(X) + Xt = function(X) + assert _is_sorted_by_data(Xt) + + # test with a different number of nonzero entries for each sample + mask = np.random.RandomState(42).randint(2, size=(10, 10)) + X = X.toarray() + X[mask == 1] = 0 + X = csr_container(X) + assert not _is_sorted_by_data(X) + Xt = function(X) + assert _is_sorted_by_data(Xt) + + +@pytest.mark.filterwarnings("ignore:EfficiencyWarning") +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_sort_graph_by_row_values_copy(csr_container): + # Test if the sorting is done inplace if X is CSR, so that Xt is X. + X_ = csr_container(np.abs(np.random.RandomState(42).randn(10, 10))) + assert not _is_sorted_by_data(X_) + + # sort_graph_by_row_values is done inplace if copy=False + X = X_.copy() + assert sort_graph_by_row_values(X).data is X.data + + X = X_.copy() + assert sort_graph_by_row_values(X, copy=False).data is X.data + + X = X_.copy() + assert sort_graph_by_row_values(X, copy=True).data is not X.data + + # _check_precomputed is never done inplace + X = X_.copy() + assert _check_precomputed(X).data is not X.data + + # do not raise if X is not CSR and copy=True + sort_graph_by_row_values(X.tocsc(), copy=True) + + # raise if X is not CSR and copy=False + with pytest.raises(ValueError, match="Use copy=True to allow the conversion"): + sort_graph_by_row_values(X.tocsc(), copy=False) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_sort_graph_by_row_values_warning(csr_container): + # Test that the parameter warn_when_not_sorted works as expected. + X = csr_container(np.abs(np.random.RandomState(42).randn(10, 10))) + assert not _is_sorted_by_data(X) + + # warning + with pytest.warns(EfficiencyWarning, match="was not sorted by row values"): + sort_graph_by_row_values(X, copy=True) + with pytest.warns(EfficiencyWarning, match="was not sorted by row values"): + sort_graph_by_row_values(X, copy=True, warn_when_not_sorted=True) + with pytest.warns(EfficiencyWarning, match="was not sorted by row values"): + _check_precomputed(X) + + # no warning + with warnings.catch_warnings(): + warnings.simplefilter("error") + sort_graph_by_row_values(X, copy=True, warn_when_not_sorted=False) + + +@pytest.mark.parametrize( + "sparse_container", DOK_CONTAINERS + BSR_CONTAINERS + DIA_CONTAINERS +) +def test_sort_graph_by_row_values_bad_sparse_format(sparse_container): + # Test that sort_graph_by_row_values and _check_precomputed error on bad formats + X = sparse_container(np.abs(np.random.RandomState(42).randn(10, 10))) + with pytest.raises(TypeError, match="format is not supported"): + sort_graph_by_row_values(X) + with pytest.raises(TypeError, match="format is not supported"): + _check_precomputed(X) + + +@pytest.mark.filterwarnings("ignore:EfficiencyWarning") +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_precomputed_sparse_invalid(csr_container): + dist = np.array([[0.0, 2.0, 1.0], [2.0, 0.0, 3.0], [1.0, 3.0, 0.0]]) + dist_csr = csr_container(dist) + neigh = neighbors.NearestNeighbors(n_neighbors=1, metric="precomputed") + neigh.fit(dist_csr) + neigh.kneighbors(None, n_neighbors=1) + neigh.kneighbors(np.array([[0.0, 0.0, 0.0]]), n_neighbors=2) + + # Ensures enough number of nearest neighbors + dist = np.array([[0.0, 2.0, 0.0], [2.0, 0.0, 3.0], [0.0, 3.0, 0.0]]) + dist_csr = csr_container(dist) + neigh.fit(dist_csr) + msg = "2 neighbors per samples are required, but some samples have only 1" + with pytest.raises(ValueError, match=msg): + neigh.kneighbors(None, n_neighbors=1) + + # Checks error with inconsistent distance matrix + dist = np.array([[5.0, 2.0, 1.0], [-2.0, 0.0, 3.0], [1.0, 3.0, 0.0]]) + dist_csr = csr_container(dist) + msg = "Negative values in data passed to precomputed distance matrix." + with pytest.raises(ValueError, match=msg): + neigh.kneighbors(dist_csr, n_neighbors=1) + + +def test_precomputed_cross_validation(): + # Ensure array is split correctly + rng = np.random.RandomState(0) + X = rng.rand(20, 2) + D = pairwise_distances(X, metric="euclidean") + y = rng.randint(3, size=20) + for Est in ( + neighbors.KNeighborsClassifier, + neighbors.RadiusNeighborsClassifier, + neighbors.KNeighborsRegressor, + neighbors.RadiusNeighborsRegressor, + ): + metric_score = cross_val_score(Est(), X, y) + precomp_score = cross_val_score(Est(metric="precomputed"), D, y) + assert_array_equal(metric_score, precomp_score) + + +def test_unsupervised_radius_neighbors( + global_dtype, n_samples=20, n_features=5, n_query_pts=2, radius=0.5, random_state=0 +): + # Test unsupervised radius-based query + rng = np.random.RandomState(random_state) + + X = rng.rand(n_samples, n_features).astype(global_dtype, copy=False) + + test = rng.rand(n_query_pts, n_features).astype(global_dtype, copy=False) + + for p in P: + results = [] + + for algorithm in ALGORITHMS: + neigh = neighbors.NearestNeighbors(radius=radius, algorithm=algorithm, p=p) + neigh.fit(X) + + ind1 = neigh.radius_neighbors(test, return_distance=False) + + # sort the results: this is not done automatically for + # radius searches + dist, ind = neigh.radius_neighbors(test, return_distance=True) + for d, i, i1 in zip(dist, ind, ind1): + j = d.argsort() + d[:] = d[j] + i[:] = i[j] + i1[:] = i1[j] + results.append((dist, ind)) + + assert_allclose(np.concatenate(list(ind)), np.concatenate(list(ind1))) + + for i in range(len(results) - 1): + assert_allclose( + np.concatenate(list(results[i][0])), + np.concatenate(list(results[i + 1][0])), + ), + assert_allclose( + np.concatenate(list(results[i][1])), + np.concatenate(list(results[i + 1][1])), + ) + + +@pytest.mark.parametrize("algorithm", ALGORITHMS) +@pytest.mark.parametrize("weights", WEIGHTS) +def test_kneighbors_classifier( + global_dtype, + algorithm, + weights, + n_samples=40, + n_features=5, + n_test_pts=10, + n_neighbors=5, + random_state=0, +): + # Test k-neighbors classification + rng = np.random.RandomState(random_state) + X = 2 * rng.rand(n_samples, n_features).astype(global_dtype, copy=False) - 1 + y = ((X**2).sum(axis=1) < 0.5).astype(int) + y_str = y.astype(str) + + knn = neighbors.KNeighborsClassifier( + n_neighbors=n_neighbors, weights=weights, algorithm=algorithm + ) + knn.fit(X, y) + epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1) + y_pred = knn.predict(X[:n_test_pts] + epsilon) + assert_array_equal(y_pred, y[:n_test_pts]) + # Test prediction with y_str + knn.fit(X, y_str) + y_pred = knn.predict(X[:n_test_pts] + epsilon) + assert_array_equal(y_pred, y_str[:n_test_pts]) + + +def test_kneighbors_classifier_float_labels( + global_dtype, + n_samples=40, + n_features=5, + n_test_pts=10, + n_neighbors=5, + random_state=0, +): + # Test k-neighbors classification + rng = np.random.RandomState(random_state) + X = 2 * rng.rand(n_samples, n_features).astype(global_dtype, copy=False) - 1 + y = ((X**2).sum(axis=1) < 0.5).astype(int) + + knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors) + knn.fit(X, y.astype(float)) + epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1) + y_pred = knn.predict(X[:n_test_pts] + epsilon) + assert_array_equal(y_pred, y[:n_test_pts]) + + +def test_kneighbors_classifier_predict_proba(global_dtype): + # Test KNeighborsClassifier.predict_proba() method + X = np.array( + [[0, 2, 0], [0, 2, 1], [2, 0, 0], [2, 2, 0], [0, 0, 2], [0, 0, 1]] + ).astype(global_dtype, copy=False) + y = np.array([4, 4, 5, 5, 1, 1]) + cls = neighbors.KNeighborsClassifier(n_neighbors=3, p=1) # cityblock dist + cls.fit(X, y) + y_prob = cls.predict_proba(X) + real_prob = ( + np.array( + [ + [0, 2, 1], + [1, 2, 0], + [1, 0, 2], + [0, 1, 2], + [2, 1, 0], + [2, 1, 0], + ] + ) + / 3.0 + ) + assert_array_equal(real_prob, y_prob) + # Check that it also works with non integer labels + cls.fit(X, y.astype(str)) + y_prob = cls.predict_proba(X) + assert_array_equal(real_prob, y_prob) + # Check that it works with weights='distance' + cls = neighbors.KNeighborsClassifier(n_neighbors=2, p=1, weights="distance") + cls.fit(X, y) + y_prob = cls.predict_proba(np.array([[0, 2, 0], [2, 2, 2]])) + real_prob = np.array([[0, 1, 0], [0, 0.4, 0.6]]) + assert_allclose(real_prob, y_prob) + + +@pytest.mark.parametrize("algorithm", ALGORITHMS) +@pytest.mark.parametrize("weights", WEIGHTS) +def test_radius_neighbors_classifier( + global_dtype, + algorithm, + weights, + n_samples=40, + n_features=5, + n_test_pts=10, + radius=0.5, + random_state=0, +): + # Test radius-based classification + rng = np.random.RandomState(random_state) + X = 2 * rng.rand(n_samples, n_features).astype(global_dtype, copy=False) - 1 + y = ((X**2).sum(axis=1) < radius).astype(int) + y_str = y.astype(str) + + neigh = neighbors.RadiusNeighborsClassifier( + radius=radius, weights=weights, algorithm=algorithm + ) + neigh.fit(X, y) + epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1) + y_pred = neigh.predict(X[:n_test_pts] + epsilon) + assert_array_equal(y_pred, y[:n_test_pts]) + neigh.fit(X, y_str) + y_pred = neigh.predict(X[:n_test_pts] + epsilon) + assert_array_equal(y_pred, y_str[:n_test_pts]) + + +@pytest.mark.parametrize("algorithm", ALGORITHMS) +@pytest.mark.parametrize("weights", WEIGHTS) +@pytest.mark.parametrize("outlier_label", [0, -1, None]) +def test_radius_neighbors_classifier_when_no_neighbors( + global_dtype, algorithm, weights, outlier_label +): + # Test radius-based classifier when no neighbors found. + # In this case it should rise an informative exception + + X = np.array([[1.0, 1.0], [2.0, 2.0]], dtype=global_dtype) + y = np.array([1, 2]) + radius = 0.1 + + # no outliers + z1 = np.array([[1.01, 1.01], [2.01, 2.01]], dtype=global_dtype) + + # one outlier + z2 = np.array([[1.01, 1.01], [1.4, 1.4]], dtype=global_dtype) + + rnc = neighbors.RadiusNeighborsClassifier + clf = rnc( + radius=radius, + weights=weights, + algorithm=algorithm, + outlier_label=outlier_label, + ) + clf.fit(X, y) + assert_array_equal(np.array([1, 2]), clf.predict(z1)) + if outlier_label is None: + with pytest.raises(ValueError): + clf.predict(z2) + + +@pytest.mark.parametrize("algorithm", ALGORITHMS) +@pytest.mark.parametrize("weights", WEIGHTS) +def test_radius_neighbors_classifier_outlier_labeling(global_dtype, algorithm, weights): + # Test radius-based classifier when no neighbors found and outliers + # are labeled. + + X = np.array( + [[1.0, 1.0], [2.0, 2.0], [0.99, 0.99], [0.98, 0.98], [2.01, 2.01]], + dtype=global_dtype, + ) + y = np.array([1, 2, 1, 1, 2]) + radius = 0.1 + + # no outliers + z1 = np.array([[1.01, 1.01], [2.01, 2.01]], dtype=global_dtype) + + # one outlier + z2 = np.array([[1.4, 1.4], [1.01, 1.01], [2.01, 2.01]], dtype=global_dtype) + + correct_labels1 = np.array([1, 2]) + correct_labels2 = np.array([-1, 1, 2]) + outlier_proba = np.array([0, 0]) + + clf = neighbors.RadiusNeighborsClassifier( + radius=radius, weights=weights, algorithm=algorithm, outlier_label=-1 + ) + clf.fit(X, y) + assert_array_equal(correct_labels1, clf.predict(z1)) + with pytest.warns(UserWarning, match="Outlier label -1 is not in training classes"): + assert_array_equal(correct_labels2, clf.predict(z2)) + with pytest.warns(UserWarning, match="Outlier label -1 is not in training classes"): + assert_allclose(outlier_proba, clf.predict_proba(z2)[0]) + + # test outlier_labeling of using predict_proba() + RNC = neighbors.RadiusNeighborsClassifier + X = np.array([[0], [1], [2], [3], [4], [5], [6], [7], [8], [9]], dtype=global_dtype) + y = np.array([0, 2, 2, 1, 1, 1, 3, 3, 3, 3]) + + # test outlier_label scalar verification + def check_array_exception(): + clf = RNC(radius=1, outlier_label=[[5]]) + clf.fit(X, y) + + with pytest.raises(TypeError): + check_array_exception() + + # test invalid outlier_label dtype + def check_dtype_exception(): + clf = RNC(radius=1, outlier_label="a") + clf.fit(X, y) + + with pytest.raises(TypeError): + check_dtype_exception() + + # test most frequent + clf = RNC(radius=1, outlier_label="most_frequent") + clf.fit(X, y) + proba = clf.predict_proba([[1], [15]]) + assert_array_equal(proba[1, :], [0, 0, 0, 1]) + + # test manual label in y + clf = RNC(radius=1, outlier_label=1) + clf.fit(X, y) + proba = clf.predict_proba([[1], [15]]) + assert_array_equal(proba[1, :], [0, 1, 0, 0]) + pred = clf.predict([[1], [15]]) + assert_array_equal(pred, [2, 1]) + + # test manual label out of y warning + def check_warning(): + clf = RNC(radius=1, outlier_label=4) + clf.fit(X, y) + clf.predict_proba([[1], [15]]) + + with pytest.warns(UserWarning): + check_warning() + + # test multi output same outlier label + y_multi = [ + [0, 1], + [2, 1], + [2, 2], + [1, 2], + [1, 2], + [1, 3], + [3, 3], + [3, 3], + [3, 0], + [3, 0], + ] + clf = RNC(radius=1, outlier_label=1) + clf.fit(X, y_multi) + proba = clf.predict_proba([[7], [15]]) + assert_array_equal(proba[1][1, :], [0, 1, 0, 0]) + pred = clf.predict([[7], [15]]) + assert_array_equal(pred[1, :], [1, 1]) + + # test multi output different outlier label + y_multi = [ + [0, 0], + [2, 2], + [2, 2], + [1, 1], + [1, 1], + [1, 1], + [3, 3], + [3, 3], + [3, 3], + [3, 3], + ] + clf = RNC(radius=1, outlier_label=[0, 1]) + clf.fit(X, y_multi) + proba = clf.predict_proba([[7], [15]]) + assert_array_equal(proba[0][1, :], [1, 0, 0, 0]) + assert_array_equal(proba[1][1, :], [0, 1, 0, 0]) + pred = clf.predict([[7], [15]]) + assert_array_equal(pred[1, :], [0, 1]) + + # test inconsistent outlier label list length + def check_exception(): + clf = RNC(radius=1, outlier_label=[0, 1, 2]) + clf.fit(X, y_multi) + + with pytest.raises(ValueError): + check_exception() + + +def test_radius_neighbors_classifier_zero_distance(): + # Test radius-based classifier, when distance to a sample is zero. + + X = np.array([[1.0, 1.0], [2.0, 2.0]]) + y = np.array([1, 2]) + radius = 0.1 + + z1 = np.array([[1.01, 1.01], [2.0, 2.0]]) + correct_labels1 = np.array([1, 2]) + + weight_func = _weight_func + + for algorithm in ALGORITHMS: + for weights in ["uniform", "distance", weight_func]: + clf = neighbors.RadiusNeighborsClassifier( + radius=radius, weights=weights, algorithm=algorithm + ) + clf.fit(X, y) + with np.errstate(invalid="ignore"): + # Ignore the warning raised in _weight_func when making + # predictions with null distances resulting in np.inf values. + assert_array_equal(correct_labels1, clf.predict(z1)) + + +def test_neighbors_regressors_zero_distance(): + # Test radius-based regressor, when distance to a sample is zero. + + X = np.array([[1.0, 1.0], [1.0, 1.0], [2.0, 2.0], [2.5, 2.5]]) + y = np.array([1.0, 1.5, 2.0, 0.0]) + radius = 0.2 + z = np.array([[1.1, 1.1], [2.0, 2.0]]) + + rnn_correct_labels = np.array([1.25, 2.0]) + + knn_correct_unif = np.array([1.25, 1.0]) + knn_correct_dist = np.array([1.25, 2.0]) + + for algorithm in ALGORITHMS: + # we don't test for weights=_weight_func since user will be expected + # to handle zero distances themselves in the function. + for weights in ["uniform", "distance"]: + rnn = neighbors.RadiusNeighborsRegressor( + radius=radius, weights=weights, algorithm=algorithm + ) + rnn.fit(X, y) + assert_allclose(rnn_correct_labels, rnn.predict(z)) + + for weights, corr_labels in zip( + ["uniform", "distance"], [knn_correct_unif, knn_correct_dist] + ): + knn = neighbors.KNeighborsRegressor( + n_neighbors=2, weights=weights, algorithm=algorithm + ) + knn.fit(X, y) + assert_allclose(corr_labels, knn.predict(z)) + + +def test_radius_neighbors_boundary_handling(): + """Test whether points lying on boundary are handled consistently + + Also ensures that even with only one query point, an object array + is returned rather than a 2d array. + """ + + X = np.array([[1.5], [3.0], [3.01]]) + radius = 3.0 + + for algorithm in ALGORITHMS: + nbrs = neighbors.NearestNeighbors(radius=radius, algorithm=algorithm).fit(X) + results = nbrs.radius_neighbors([[0.0]], return_distance=False) + assert results.shape == (1,) + assert results.dtype == object + assert_array_equal(results[0], [0, 1]) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_radius_neighbors_returns_array_of_objects(csr_container): + # check that we can pass precomputed distances to + # NearestNeighbors.radius_neighbors() + # non-regression test for + # https://github.com/scikit-learn/scikit-learn/issues/16036 + X = csr_container(np.ones((4, 4))) + X.setdiag([0, 0, 0, 0]) + + nbrs = neighbors.NearestNeighbors( + radius=0.5, algorithm="auto", leaf_size=30, metric="precomputed" + ).fit(X) + neigh_dist, neigh_ind = nbrs.radius_neighbors(X, return_distance=True) + + expected_dist = np.empty(X.shape[0], dtype=object) + expected_dist[:] = [np.array([0]), np.array([0]), np.array([0]), np.array([0])] + expected_ind = np.empty(X.shape[0], dtype=object) + expected_ind[:] = [np.array([0]), np.array([1]), np.array([2]), np.array([3])] + + assert_array_equal(neigh_dist, expected_dist) + assert_array_equal(neigh_ind, expected_ind) + + +@pytest.mark.parametrize("algorithm", ["ball_tree", "kd_tree", "brute"]) +def test_query_equidistant_kth_nn(algorithm): + # For several candidates for the k-th nearest neighbor position, + # the first candidate should be chosen + query_point = np.array([[0, 0]]) + equidistant_points = np.array([[1, 0], [0, 1], [-1, 0], [0, -1]]) + # The 3rd and 4th points should not replace the 2nd point + # for the 2th nearest neighbor position + k = 2 + knn_indices = np.array([[0, 1]]) + nn = neighbors.NearestNeighbors(algorithm=algorithm).fit(equidistant_points) + indices = np.sort(nn.kneighbors(query_point, n_neighbors=k, return_distance=False)) + assert_array_equal(indices, knn_indices) + + +@pytest.mark.parametrize( + ["algorithm", "metric"], + list( + product( + ("kd_tree", "ball_tree", "brute"), + ("euclidean", *DISTANCE_METRIC_OBJS), + ) + ) + + [ + ("brute", "euclidean"), + ("brute", "precomputed"), + ], +) +def test_radius_neighbors_sort_results(algorithm, metric): + # Test radius_neighbors[_graph] output when sort_result is True + + metric = _parse_metric(metric, np.float64) + if isinstance(metric, DistanceMetric): + pytest.skip( + "Metrics of type `DistanceMetric` are not yet supported for radius-neighbor" + " estimators." + ) + n_samples = 10 + rng = np.random.RandomState(42) + X = rng.random_sample((n_samples, 4)) + + if metric == "precomputed": + X = neighbors.radius_neighbors_graph(X, radius=np.inf, mode="distance") + model = neighbors.NearestNeighbors(algorithm=algorithm, metric=metric) + model.fit(X) + + # self.radius_neighbors + distances, indices = model.radius_neighbors(X=X, radius=np.inf, sort_results=True) + for ii in range(n_samples): + assert_array_equal(distances[ii], np.sort(distances[ii])) + + # sort_results=True and return_distance=False + if metric != "precomputed": # no need to raise with precomputed graph + with pytest.raises(ValueError, match="return_distance must be True"): + model.radius_neighbors( + X=X, radius=np.inf, sort_results=True, return_distance=False + ) + + # self.radius_neighbors_graph + graph = model.radius_neighbors_graph( + X=X, radius=np.inf, mode="distance", sort_results=True + ) + assert _is_sorted_by_data(graph) + + +def test_RadiusNeighborsClassifier_multioutput(): + # Test k-NN classifier on multioutput data + rng = check_random_state(0) + n_features = 2 + n_samples = 40 + n_output = 3 + + X = rng.rand(n_samples, n_features) + y = rng.randint(0, 3, (n_samples, n_output)) + + X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) + + weights = [None, "uniform", "distance", _weight_func] + + for algorithm, weights in product(ALGORITHMS, weights): + # Stack single output prediction + y_pred_so = [] + for o in range(n_output): + rnn = neighbors.RadiusNeighborsClassifier( + weights=weights, algorithm=algorithm + ) + rnn.fit(X_train, y_train[:, o]) + y_pred_so.append(rnn.predict(X_test)) + + y_pred_so = np.vstack(y_pred_so).T + assert y_pred_so.shape == y_test.shape + + # Multioutput prediction + rnn_mo = neighbors.RadiusNeighborsClassifier( + weights=weights, algorithm=algorithm + ) + rnn_mo.fit(X_train, y_train) + y_pred_mo = rnn_mo.predict(X_test) + + assert y_pred_mo.shape == y_test.shape + assert_array_equal(y_pred_mo, y_pred_so) + + +def test_kneighbors_classifier_sparse( + n_samples=40, n_features=5, n_test_pts=10, n_neighbors=5, random_state=0 +): + # Test k-NN classifier on sparse matrices + # Like the above, but with various types of sparse matrices + rng = np.random.RandomState(random_state) + X = 2 * rng.rand(n_samples, n_features) - 1 + X *= X > 0.2 + y = ((X**2).sum(axis=1) < 0.5).astype(int) + + for sparsemat in SPARSE_TYPES: + knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors, algorithm="auto") + knn.fit(sparsemat(X), y) + epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1) + for sparsev in SPARSE_TYPES + (np.asarray,): + X_eps = sparsev(X[:n_test_pts] + epsilon) + y_pred = knn.predict(X_eps) + assert_array_equal(y_pred, y[:n_test_pts]) + + +def test_KNeighborsClassifier_multioutput(): + # Test k-NN classifier on multioutput data + rng = check_random_state(0) + n_features = 5 + n_samples = 50 + n_output = 3 + + X = rng.rand(n_samples, n_features) + y = rng.randint(0, 3, (n_samples, n_output)) + + X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) + + weights = [None, "uniform", "distance", _weight_func] + + for algorithm, weights in product(ALGORITHMS, weights): + # Stack single output prediction + y_pred_so = [] + y_pred_proba_so = [] + for o in range(n_output): + knn = neighbors.KNeighborsClassifier(weights=weights, algorithm=algorithm) + knn.fit(X_train, y_train[:, o]) + y_pred_so.append(knn.predict(X_test)) + y_pred_proba_so.append(knn.predict_proba(X_test)) + + y_pred_so = np.vstack(y_pred_so).T + assert y_pred_so.shape == y_test.shape + assert len(y_pred_proba_so) == n_output + + # Multioutput prediction + knn_mo = neighbors.KNeighborsClassifier(weights=weights, algorithm=algorithm) + knn_mo.fit(X_train, y_train) + y_pred_mo = knn_mo.predict(X_test) + + assert y_pred_mo.shape == y_test.shape + assert_array_equal(y_pred_mo, y_pred_so) + + # Check proba + y_pred_proba_mo = knn_mo.predict_proba(X_test) + assert len(y_pred_proba_mo) == n_output + + for proba_mo, proba_so in zip(y_pred_proba_mo, y_pred_proba_so): + assert_array_equal(proba_mo, proba_so) + + +def test_kneighbors_regressor( + n_samples=40, n_features=5, n_test_pts=10, n_neighbors=3, random_state=0 +): + # Test k-neighbors regression + rng = np.random.RandomState(random_state) + X = 2 * rng.rand(n_samples, n_features) - 1 + y = np.sqrt((X**2).sum(1)) + y /= y.max() + + y_target = y[:n_test_pts] + + weight_func = _weight_func + + for algorithm in ALGORITHMS: + for weights in ["uniform", "distance", weight_func]: + knn = neighbors.KNeighborsRegressor( + n_neighbors=n_neighbors, weights=weights, algorithm=algorithm + ) + knn.fit(X, y) + epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1) + y_pred = knn.predict(X[:n_test_pts] + epsilon) + assert np.all(abs(y_pred - y_target) < 0.3) + + +def test_KNeighborsRegressor_multioutput_uniform_weight(): + # Test k-neighbors in multi-output regression with uniform weight + rng = check_random_state(0) + n_features = 5 + n_samples = 40 + n_output = 4 + + X = rng.rand(n_samples, n_features) + y = rng.rand(n_samples, n_output) + + X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) + for algorithm, weights in product(ALGORITHMS, [None, "uniform"]): + knn = neighbors.KNeighborsRegressor(weights=weights, algorithm=algorithm) + knn.fit(X_train, y_train) + + neigh_idx = knn.kneighbors(X_test, return_distance=False) + y_pred_idx = np.array([np.mean(y_train[idx], axis=0) for idx in neigh_idx]) + + y_pred = knn.predict(X_test) + + assert y_pred.shape == y_test.shape + assert y_pred_idx.shape == y_test.shape + assert_allclose(y_pred, y_pred_idx) + + +def test_kneighbors_regressor_multioutput( + n_samples=40, n_features=5, n_test_pts=10, n_neighbors=3, random_state=0 +): + # Test k-neighbors in multi-output regression + rng = np.random.RandomState(random_state) + X = 2 * rng.rand(n_samples, n_features) - 1 + y = np.sqrt((X**2).sum(1)) + y /= y.max() + y = np.vstack([y, y]).T + + y_target = y[:n_test_pts] + + weights = ["uniform", "distance", _weight_func] + for algorithm, weights in product(ALGORITHMS, weights): + knn = neighbors.KNeighborsRegressor( + n_neighbors=n_neighbors, weights=weights, algorithm=algorithm + ) + knn.fit(X, y) + epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1) + y_pred = knn.predict(X[:n_test_pts] + epsilon) + assert y_pred.shape == y_target.shape + + assert np.all(np.abs(y_pred - y_target) < 0.3) + + +def test_radius_neighbors_regressor( + n_samples=40, n_features=3, n_test_pts=10, radius=0.5, random_state=0 +): + # Test radius-based neighbors regression + rng = np.random.RandomState(random_state) + X = 2 * rng.rand(n_samples, n_features) - 1 + y = np.sqrt((X**2).sum(1)) + y /= y.max() + + y_target = y[:n_test_pts] + + weight_func = _weight_func + + for algorithm in ALGORITHMS: + for weights in ["uniform", "distance", weight_func]: + neigh = neighbors.RadiusNeighborsRegressor( + radius=radius, weights=weights, algorithm=algorithm + ) + neigh.fit(X, y) + epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1) + y_pred = neigh.predict(X[:n_test_pts] + epsilon) + assert np.all(abs(y_pred - y_target) < radius / 2) + + # test that nan is returned when no nearby observations + for weights in ["uniform", "distance"]: + neigh = neighbors.RadiusNeighborsRegressor( + radius=radius, weights=weights, algorithm="auto" + ) + neigh.fit(X, y) + X_test_nan = np.full((1, n_features), -1.0) + empty_warning_msg = ( + "One or more samples have no neighbors " + "within specified radius; predicting NaN." + ) + with pytest.warns(UserWarning, match=re.escape(empty_warning_msg)): + pred = neigh.predict(X_test_nan) + assert np.all(np.isnan(pred)) + + +def test_RadiusNeighborsRegressor_multioutput_with_uniform_weight(): + # Test radius neighbors in multi-output regression (uniform weight) + + rng = check_random_state(0) + n_features = 5 + n_samples = 40 + n_output = 4 + + X = rng.rand(n_samples, n_features) + y = rng.rand(n_samples, n_output) + X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) + + for algorithm, weights in product(ALGORITHMS, [None, "uniform"]): + rnn = neighbors.RadiusNeighborsRegressor(weights=weights, algorithm=algorithm) + rnn.fit(X_train, y_train) + + neigh_idx = rnn.radius_neighbors(X_test, return_distance=False) + y_pred_idx = np.array([np.mean(y_train[idx], axis=0) for idx in neigh_idx]) + + y_pred_idx = np.array(y_pred_idx) + y_pred = rnn.predict(X_test) + + assert y_pred_idx.shape == y_test.shape + assert y_pred.shape == y_test.shape + assert_allclose(y_pred, y_pred_idx) + + +def test_RadiusNeighborsRegressor_multioutput( + n_samples=40, n_features=5, n_test_pts=10, random_state=0 +): + # Test k-neighbors in multi-output regression with various weight + rng = np.random.RandomState(random_state) + X = 2 * rng.rand(n_samples, n_features) - 1 + y = np.sqrt((X**2).sum(1)) + y /= y.max() + y = np.vstack([y, y]).T + + y_target = y[:n_test_pts] + weights = ["uniform", "distance", _weight_func] + + for algorithm, weights in product(ALGORITHMS, weights): + rnn = neighbors.RadiusNeighborsRegressor(weights=weights, algorithm=algorithm) + rnn.fit(X, y) + epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1) + y_pred = rnn.predict(X[:n_test_pts] + epsilon) + + assert y_pred.shape == y_target.shape + assert np.all(np.abs(y_pred - y_target) < 0.3) + + +@pytest.mark.filterwarnings("ignore:EfficiencyWarning") +def test_kneighbors_regressor_sparse( + n_samples=40, n_features=5, n_test_pts=10, n_neighbors=5, random_state=0 +): + # Test radius-based regression on sparse matrices + # Like the above, but with various types of sparse matrices + rng = np.random.RandomState(random_state) + X = 2 * rng.rand(n_samples, n_features) - 1 + y = ((X**2).sum(axis=1) < 0.25).astype(int) + + for sparsemat in SPARSE_TYPES: + knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors, algorithm="auto") + knn.fit(sparsemat(X), y) + + knn_pre = neighbors.KNeighborsRegressor( + n_neighbors=n_neighbors, metric="precomputed" + ) + knn_pre.fit(pairwise_distances(X, metric="euclidean"), y) + + for sparsev in SPARSE_OR_DENSE: + X2 = sparsev(X) + assert np.mean(knn.predict(X2).round() == y) > 0.95 + + X2_pre = sparsev(pairwise_distances(X, metric="euclidean")) + if sparsev in DOK_CONTAINERS + BSR_CONTAINERS: + msg = "not supported due to its handling of explicit zeros" + with pytest.raises(TypeError, match=msg): + knn_pre.predict(X2_pre) + else: + assert np.mean(knn_pre.predict(X2_pre).round() == y) > 0.95 + + +def test_neighbors_iris(): + # Sanity checks on the iris dataset + # Puts three points of each label in the plane and performs a + # nearest neighbor query on points near the decision boundary. + + for algorithm in ALGORITHMS: + clf = neighbors.KNeighborsClassifier(n_neighbors=1, algorithm=algorithm) + clf.fit(iris.data, iris.target) + assert_array_equal(clf.predict(iris.data), iris.target) + + clf.set_params(n_neighbors=9, algorithm=algorithm) + clf.fit(iris.data, iris.target) + assert np.mean(clf.predict(iris.data) == iris.target) > 0.95 + + rgs = neighbors.KNeighborsRegressor(n_neighbors=5, algorithm=algorithm) + rgs.fit(iris.data, iris.target) + assert np.mean(rgs.predict(iris.data).round() == iris.target) > 0.95 + + +def test_neighbors_digits(): + # Sanity check on the digits dataset + # the 'brute' algorithm has been observed to fail if the input + # dtype is uint8 due to overflow in distance calculations. + + X = digits.data.astype("uint8") + Y = digits.target + (n_samples, n_features) = X.shape + train_test_boundary = int(n_samples * 0.8) + train = np.arange(0, train_test_boundary) + test = np.arange(train_test_boundary, n_samples) + (X_train, Y_train, X_test, Y_test) = X[train], Y[train], X[test], Y[test] + + clf = neighbors.KNeighborsClassifier(n_neighbors=1, algorithm="brute") + score_uint8 = clf.fit(X_train, Y_train).score(X_test, Y_test) + score_float = clf.fit(X_train.astype(float, copy=False), Y_train).score( + X_test.astype(float, copy=False), Y_test + ) + assert score_uint8 == score_float + + +def test_kneighbors_graph(): + # Test kneighbors_graph to build the k-Nearest Neighbor graph. + X = np.array([[0, 1], [1.01, 1.0], [2, 0]]) + + # n_neighbors = 1 + A = neighbors.kneighbors_graph(X, 1, mode="connectivity", include_self=True) + assert_array_equal(A.toarray(), np.eye(A.shape[0])) + + A = neighbors.kneighbors_graph(X, 1, mode="distance") + assert_allclose( + A.toarray(), [[0.00, 1.01, 0.0], [1.01, 0.0, 0.0], [0.00, 1.40716026, 0.0]] + ) + + # n_neighbors = 2 + A = neighbors.kneighbors_graph(X, 2, mode="connectivity", include_self=True) + assert_array_equal(A.toarray(), [[1.0, 1.0, 0.0], [1.0, 1.0, 0.0], [0.0, 1.0, 1.0]]) + + A = neighbors.kneighbors_graph(X, 2, mode="distance") + assert_allclose( + A.toarray(), + [ + [0.0, 1.01, 2.23606798], + [1.01, 0.0, 1.40716026], + [2.23606798, 1.40716026, 0.0], + ], + ) + + # n_neighbors = 3 + A = neighbors.kneighbors_graph(X, 3, mode="connectivity", include_self=True) + assert_allclose(A.toarray(), [[1, 1, 1], [1, 1, 1], [1, 1, 1]]) + + +@pytest.mark.parametrize("n_neighbors", [1, 2, 3]) +@pytest.mark.parametrize("mode", ["connectivity", "distance"]) +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_kneighbors_graph_sparse(n_neighbors, mode, csr_container, seed=36): + # Test kneighbors_graph to build the k-Nearest Neighbor graph + # for sparse input. + rng = np.random.RandomState(seed) + X = rng.randn(10, 10) + Xcsr = csr_container(X) + + assert_allclose( + neighbors.kneighbors_graph(X, n_neighbors, mode=mode).toarray(), + neighbors.kneighbors_graph(Xcsr, n_neighbors, mode=mode).toarray(), + ) + + +def test_radius_neighbors_graph(): + # Test radius_neighbors_graph to build the Nearest Neighbor graph. + X = np.array([[0, 1], [1.01, 1.0], [2, 0]]) + + A = neighbors.radius_neighbors_graph(X, 1.5, mode="connectivity", include_self=True) + assert_array_equal(A.toarray(), [[1.0, 1.0, 0.0], [1.0, 1.0, 1.0], [0.0, 1.0, 1.0]]) + + A = neighbors.radius_neighbors_graph(X, 1.5, mode="distance") + assert_allclose( + A.toarray(), [[0.0, 1.01, 0.0], [1.01, 0.0, 1.40716026], [0.0, 1.40716026, 0.0]] + ) + + +@pytest.mark.parametrize("n_neighbors", [1, 2, 3]) +@pytest.mark.parametrize("mode", ["connectivity", "distance"]) +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_radius_neighbors_graph_sparse(n_neighbors, mode, csr_container, seed=36): + # Test radius_neighbors_graph to build the Nearest Neighbor graph + # for sparse input. + rng = np.random.RandomState(seed) + X = rng.randn(10, 10) + Xcsr = csr_container(X) + + assert_allclose( + neighbors.radius_neighbors_graph(X, n_neighbors, mode=mode).toarray(), + neighbors.radius_neighbors_graph(Xcsr, n_neighbors, mode=mode).toarray(), + ) + + +@pytest.mark.parametrize( + "Estimator", + [ + neighbors.KNeighborsClassifier, + neighbors.RadiusNeighborsClassifier, + neighbors.KNeighborsRegressor, + neighbors.RadiusNeighborsRegressor, + ], +) +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_neighbors_validate_parameters(Estimator, csr_container): + """Additional parameter validation for *Neighbors* estimators not covered by common + validation.""" + X = rng.random_sample((10, 2)) + Xsparse = csr_container(X) + X3 = rng.random_sample((10, 3)) + y = np.ones(10) + + nbrs = Estimator(algorithm="ball_tree", metric="haversine") + msg = "instance is not fitted yet" + with pytest.raises(ValueError, match=msg): + nbrs.predict(X) + msg = "Metric 'haversine' not valid for sparse input." + with pytest.raises(ValueError, match=msg): + ignore_warnings(nbrs.fit(Xsparse, y)) + + nbrs = Estimator(metric="haversine", algorithm="brute") + nbrs.fit(X3, y) + msg = "Haversine distance only valid in 2 dimensions" + with pytest.raises(ValueError, match=msg): + nbrs.predict(X3) + + nbrs = Estimator() + msg = re.escape("Found array with 0 sample(s)") + with pytest.raises(ValueError, match=msg): + nbrs.fit(np.ones((0, 2)), np.ones(0)) + + msg = "Found array with dim 3" + with pytest.raises(ValueError, match=msg): + nbrs.fit(X[:, :, None], y) + nbrs.fit(X, y) + + msg = re.escape("Found array with 0 feature(s)") + with pytest.raises(ValueError, match=msg): + nbrs.predict([[]]) + + +@pytest.mark.parametrize( + "Estimator", + [ + neighbors.KNeighborsClassifier, + neighbors.RadiusNeighborsClassifier, + neighbors.KNeighborsRegressor, + neighbors.RadiusNeighborsRegressor, + ], +) +@pytest.mark.parametrize("n_features", [2, 100]) +@pytest.mark.parametrize("algorithm", ["auto", "brute"]) +def test_neighbors_minkowski_semimetric_algo_warn(Estimator, n_features, algorithm): + """ + Validation of all classes extending NeighborsBase with + Minkowski semi-metrics (i.e. when 0 < p < 1). That proper + Warning is raised for `algorithm="auto"` and "brute". + """ + X = rng.random_sample((10, n_features)) + y = np.ones(10) + + model = Estimator(p=0.1, algorithm=algorithm) + msg = ( + "Mind that for 0 < p < 1, Minkowski metrics are not distance" + " metrics. Continuing the execution with `algorithm='brute'`." + ) + with pytest.warns(UserWarning, match=msg): + model.fit(X, y) + + assert model._fit_method == "brute" + + +@pytest.mark.parametrize( + "Estimator", + [ + neighbors.KNeighborsClassifier, + neighbors.RadiusNeighborsClassifier, + neighbors.KNeighborsRegressor, + neighbors.RadiusNeighborsRegressor, + ], +) +@pytest.mark.parametrize("n_features", [2, 100]) +@pytest.mark.parametrize("algorithm", ["kd_tree", "ball_tree"]) +def test_neighbors_minkowski_semimetric_algo_error(Estimator, n_features, algorithm): + """Check that we raise a proper error if `algorithm!='brute'` and `p<1`.""" + X = rng.random_sample((10, 2)) + y = np.ones(10) + + model = Estimator(algorithm=algorithm, p=0.1) + msg = ( + f'algorithm="{algorithm}" does not support 0 < p < 1 for ' + "the Minkowski metric. To resolve this problem either " + 'set p >= 1 or algorithm="brute".' + ) + with pytest.raises(ValueError, match=msg): + model.fit(X, y) + + +# TODO: remove when NearestNeighbors methods uses parameter validation mechanism +def test_nearest_neighbors_validate_params(): + """Validate parameter of NearestNeighbors.""" + X = rng.random_sample((10, 2)) + + nbrs = neighbors.NearestNeighbors().fit(X) + msg = ( + 'Unsupported mode, must be one of "connectivity", or "distance" but got "blah"' + " instead" + ) + with pytest.raises(ValueError, match=msg): + nbrs.kneighbors_graph(X, mode="blah") + with pytest.raises(ValueError, match=msg): + nbrs.radius_neighbors_graph(X, mode="blah") + + +@pytest.mark.parametrize( + "metric", + sorted( + set(neighbors.VALID_METRICS["ball_tree"]).intersection( + neighbors.VALID_METRICS["brute"] + ) + - set(["pyfunc", *BOOL_METRICS]) + ) + + DISTANCE_METRIC_OBJS, +) +def test_neighbors_metrics( + global_dtype, metric, n_samples=20, n_features=3, n_query_pts=2, n_neighbors=5 +): + metric = _parse_metric(metric, global_dtype) + + # Test computing the neighbors for various metrics + algorithms = ["brute", "ball_tree", "kd_tree"] + X_train = rng.rand(n_samples, n_features).astype(global_dtype, copy=False) + X_test = rng.rand(n_query_pts, n_features).astype(global_dtype, copy=False) + + metric_params_list = _generate_test_params_for(metric, n_features) + + for metric_params in metric_params_list: + # Some metric (e.g. Weighted minkowski) are not supported by KDTree + exclude_kd_tree = ( + False + if isinstance(metric, DistanceMetric) + else metric not in neighbors.VALID_METRICS["kd_tree"] + or ("minkowski" in metric and "w" in metric_params) + ) + results = {} + p = metric_params.pop("p", 2) + for algorithm in algorithms: + if isinstance(metric, DistanceMetric) and global_dtype == np.float32: + if "tree" in algorithm: # pragma: nocover + pytest.skip( + "Neither KDTree nor BallTree support 32-bit distance metric" + " objects." + ) + neigh = neighbors.NearestNeighbors( + n_neighbors=n_neighbors, + algorithm=algorithm, + metric=metric, + p=p, + metric_params=metric_params, + ) + + if exclude_kd_tree and algorithm == "kd_tree": + with pytest.raises(ValueError): + neigh.fit(X_train) + continue + + # Haversine distance only accepts 2D data + if metric == "haversine": + feature_sl = slice(None, 2) + X_train = np.ascontiguousarray(X_train[:, feature_sl]) + X_test = np.ascontiguousarray(X_test[:, feature_sl]) + + neigh.fit(X_train) + results[algorithm] = neigh.kneighbors(X_test, return_distance=True) + + brute_dst, brute_idx = results["brute"] + ball_tree_dst, ball_tree_idx = results["ball_tree"] + + assert_allclose(brute_dst, ball_tree_dst) + assert_array_equal(brute_idx, ball_tree_idx) + + if not exclude_kd_tree: + kd_tree_dst, kd_tree_idx = results["kd_tree"] + assert_allclose(brute_dst, kd_tree_dst) + assert_array_equal(brute_idx, kd_tree_idx) + + assert_allclose(ball_tree_dst, kd_tree_dst) + assert_array_equal(ball_tree_idx, kd_tree_idx) + + +@pytest.mark.parametrize( + "metric", sorted(set(neighbors.VALID_METRICS["brute"]) - set(["precomputed"])) +) +def test_kneighbors_brute_backend( + metric, + global_dtype, + global_random_seed, + n_samples=2000, + n_features=30, + n_query_pts=5, + n_neighbors=5, +): + rng = np.random.RandomState(global_random_seed) + # Both backend for the 'brute' algorithm of kneighbors must give identical results. + X_train = rng.rand(n_samples, n_features).astype(global_dtype, copy=False) + X_test = rng.rand(n_query_pts, n_features).astype(global_dtype, copy=False) + + # Haversine distance only accepts 2D data + if metric == "haversine": + feature_sl = slice(None, 2) + X_train = np.ascontiguousarray(X_train[:, feature_sl]) + X_test = np.ascontiguousarray(X_test[:, feature_sl]) + + if metric in PAIRWISE_BOOLEAN_FUNCTIONS: + X_train = X_train > 0.5 + X_test = X_test > 0.5 + + metric_params_list = _generate_test_params_for(metric, n_features) + + for metric_params in metric_params_list: + p = metric_params.pop("p", 2) + + neigh = neighbors.NearestNeighbors( + n_neighbors=n_neighbors, + algorithm="brute", + metric=metric, + p=p, + metric_params=metric_params, + ) + + neigh.fit(X_train) + + with config_context(enable_cython_pairwise_dist=False): + # Use the legacy backend for brute + legacy_brute_dst, legacy_brute_idx = neigh.kneighbors( + X_test, return_distance=True + ) + with config_context(enable_cython_pairwise_dist=True): + # Use the pairwise-distances reduction backend for brute + pdr_brute_dst, pdr_brute_idx = neigh.kneighbors( + X_test, return_distance=True + ) + + assert_compatible_argkmin_results( + legacy_brute_dst, pdr_brute_dst, legacy_brute_idx, pdr_brute_idx + ) + + +def test_callable_metric(): + def custom_metric(x1, x2): + return np.sqrt(np.sum(x1**2 + x2**2)) + + X = np.random.RandomState(42).rand(20, 2) + nbrs1 = neighbors.NearestNeighbors( + n_neighbors=3, algorithm="auto", metric=custom_metric + ) + nbrs2 = neighbors.NearestNeighbors( + n_neighbors=3, algorithm="brute", metric=custom_metric + ) + + nbrs1.fit(X) + nbrs2.fit(X) + + dist1, ind1 = nbrs1.kneighbors(X) + dist2, ind2 = nbrs2.kneighbors(X) + + assert_allclose(dist1, dist2) + + +@pytest.mark.parametrize( + "metric", neighbors.VALID_METRICS["brute"] + DISTANCE_METRIC_OBJS +) +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_valid_brute_metric_for_auto_algorithm( + global_dtype, metric, csr_container, n_samples=20, n_features=12 +): + metric = _parse_metric(metric, global_dtype) + + X = rng.rand(n_samples, n_features).astype(global_dtype, copy=False) + Xcsr = csr_container(X) + + metric_params_list = _generate_test_params_for(metric, n_features) + + if metric == "precomputed": + X_precomputed = rng.random_sample((10, 4)) + Y_precomputed = rng.random_sample((3, 4)) + DXX = metrics.pairwise_distances(X_precomputed, metric="euclidean") + DYX = metrics.pairwise_distances( + Y_precomputed, X_precomputed, metric="euclidean" + ) + nb_p = neighbors.NearestNeighbors(n_neighbors=3, metric="precomputed") + nb_p.fit(DXX) + nb_p.kneighbors(DYX) + + else: + for metric_params in metric_params_list: + nn = neighbors.NearestNeighbors( + n_neighbors=3, + algorithm="auto", + metric=metric, + metric_params=metric_params, + ) + # Haversine distance only accepts 2D data + if metric == "haversine": + feature_sl = slice(None, 2) + X = np.ascontiguousarray(X[:, feature_sl]) + + nn.fit(X) + nn.kneighbors(X) + + if metric in VALID_METRICS_SPARSE["brute"]: + nn = neighbors.NearestNeighbors( + n_neighbors=3, algorithm="auto", metric=metric + ).fit(Xcsr) + nn.kneighbors(Xcsr) + + +def test_metric_params_interface(): + X = rng.rand(5, 5) + y = rng.randint(0, 2, 5) + est = neighbors.KNeighborsClassifier(metric_params={"p": 3}) + with pytest.warns(SyntaxWarning): + est.fit(X, y) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_predict_sparse_ball_kd_tree(csr_container): + rng = np.random.RandomState(0) + X = rng.rand(5, 5) + y = rng.randint(0, 2, 5) + nbrs1 = neighbors.KNeighborsClassifier(1, algorithm="kd_tree") + nbrs2 = neighbors.KNeighborsRegressor(1, algorithm="ball_tree") + for model in [nbrs1, nbrs2]: + model.fit(X, y) + with pytest.raises(ValueError): + model.predict(csr_container(X)) + + +def test_non_euclidean_kneighbors(): + rng = np.random.RandomState(0) + X = rng.rand(5, 5) + + # Find a reasonable radius. + dist_array = pairwise_distances(X).flatten() + np.sort(dist_array) + radius = dist_array[15] + + # Test kneighbors_graph + for metric in ["manhattan", "chebyshev"]: + nbrs_graph = neighbors.kneighbors_graph( + X, 3, metric=metric, mode="connectivity", include_self=True + ).toarray() + nbrs1 = neighbors.NearestNeighbors(n_neighbors=3, metric=metric).fit(X) + assert_array_equal(nbrs_graph, nbrs1.kneighbors_graph(X).toarray()) + + # Test radiusneighbors_graph + for metric in ["manhattan", "chebyshev"]: + nbrs_graph = neighbors.radius_neighbors_graph( + X, radius, metric=metric, mode="connectivity", include_self=True + ).toarray() + nbrs1 = neighbors.NearestNeighbors(metric=metric, radius=radius).fit(X) + assert_array_equal(nbrs_graph, nbrs1.radius_neighbors_graph(X).toarray()) + + # Raise error when wrong parameters are supplied, + X_nbrs = neighbors.NearestNeighbors(n_neighbors=3, metric="manhattan") + X_nbrs.fit(X) + with pytest.raises(ValueError): + neighbors.kneighbors_graph(X_nbrs, 3, metric="euclidean") + X_nbrs = neighbors.NearestNeighbors(radius=radius, metric="manhattan") + X_nbrs.fit(X) + with pytest.raises(ValueError): + neighbors.radius_neighbors_graph(X_nbrs, radius, metric="euclidean") + + +def check_object_arrays(nparray, list_check): + for ind, ele in enumerate(nparray): + assert_array_equal(ele, list_check[ind]) + + +def test_k_and_radius_neighbors_train_is_not_query(): + # Test kneighbors et.al when query is not training data + + for algorithm in ALGORITHMS: + nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm) + + X = [[0], [1]] + nn.fit(X) + test_data = [[2], [1]] + + # Test neighbors. + dist, ind = nn.kneighbors(test_data) + assert_array_equal(dist, [[1], [0]]) + assert_array_equal(ind, [[1], [1]]) + dist, ind = nn.radius_neighbors([[2], [1]], radius=1.5) + check_object_arrays(dist, [[1], [1, 0]]) + check_object_arrays(ind, [[1], [0, 1]]) + + # Test the graph variants. + assert_array_equal( + nn.kneighbors_graph(test_data).toarray(), [[0.0, 1.0], [0.0, 1.0]] + ) + assert_array_equal( + nn.kneighbors_graph([[2], [1]], mode="distance").toarray(), + np.array([[0.0, 1.0], [0.0, 0.0]]), + ) + rng = nn.radius_neighbors_graph([[2], [1]], radius=1.5) + assert_array_equal(rng.toarray(), [[0, 1], [1, 1]]) + + +@pytest.mark.parametrize("algorithm", ALGORITHMS) +def test_k_and_radius_neighbors_X_None(algorithm): + # Test kneighbors et.al when query is None + nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm) + + X = [[0], [1]] + nn.fit(X) + + dist, ind = nn.kneighbors() + assert_array_equal(dist, [[1], [1]]) + assert_array_equal(ind, [[1], [0]]) + dist, ind = nn.radius_neighbors(None, radius=1.5) + check_object_arrays(dist, [[1], [1]]) + check_object_arrays(ind, [[1], [0]]) + + # Test the graph variants. + rng = nn.radius_neighbors_graph(None, radius=1.5) + kng = nn.kneighbors_graph(None) + for graph in [rng, kng]: + assert_array_equal(graph.toarray(), [[0, 1], [1, 0]]) + assert_array_equal(graph.data, [1, 1]) + assert_array_equal(graph.indices, [1, 0]) + + X = [[0, 1], [0, 1], [1, 1]] + nn = neighbors.NearestNeighbors(n_neighbors=2, algorithm=algorithm) + nn.fit(X) + assert_array_equal( + nn.kneighbors_graph().toarray(), + np.array([[0.0, 1.0, 1.0], [1.0, 0.0, 1.0], [1.0, 1.0, 0]]), + ) + + +@pytest.mark.parametrize("algorithm", ALGORITHMS) +def test_k_and_radius_neighbors_duplicates(algorithm): + # Test behavior of kneighbors when duplicates are present in query + nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm) + duplicates = [[0], [1], [3]] + + nn.fit(duplicates) + + # Do not do anything special to duplicates. + kng = nn.kneighbors_graph(duplicates, mode="distance") + assert_allclose( + kng.toarray(), np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]) + ) + assert_allclose(kng.data, [0.0, 0.0, 0.0]) + assert_allclose(kng.indices, [0, 1, 2]) + + dist, ind = nn.radius_neighbors([[0], [1]], radius=1.5) + check_object_arrays(dist, [[0, 1], [1, 0]]) + check_object_arrays(ind, [[0, 1], [0, 1]]) + + rng = nn.radius_neighbors_graph(duplicates, radius=1.5) + assert_allclose( + rng.toarray(), np.array([[1.0, 1.0, 0.0], [1.0, 1.0, 0.0], [0.0, 0.0, 1.0]]) + ) + + rng = nn.radius_neighbors_graph([[0], [1]], radius=1.5, mode="distance") + rng.sort_indices() + assert_allclose(rng.toarray(), [[0, 1, 0], [1, 0, 0]]) + assert_allclose(rng.indices, [0, 1, 0, 1]) + assert_allclose(rng.data, [0, 1, 1, 0]) + + # Mask the first duplicates when n_duplicates > n_neighbors. + X = np.ones((3, 1)) + nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm="brute") + nn.fit(X) + dist, ind = nn.kneighbors() + assert_allclose(dist, np.zeros((3, 1))) + assert_allclose(ind, [[1], [0], [1]]) + + # Test that zeros are explicitly marked in kneighbors_graph. + kng = nn.kneighbors_graph(mode="distance") + assert_allclose(kng.toarray(), np.zeros((3, 3))) + assert_allclose(kng.data, np.zeros(3)) + assert_allclose(kng.indices, [1, 0, 1]) + assert_allclose( + nn.kneighbors_graph().toarray(), + np.array([[0.0, 1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0]]), + ) + + +def test_include_self_neighbors_graph(): + # Test include_self parameter in neighbors_graph + X = [[2, 3], [4, 5]] + kng = neighbors.kneighbors_graph(X, 1, include_self=True).toarray() + kng_not_self = neighbors.kneighbors_graph(X, 1, include_self=False).toarray() + assert_array_equal(kng, [[1.0, 0.0], [0.0, 1.0]]) + assert_array_equal(kng_not_self, [[0.0, 1.0], [1.0, 0.0]]) + + rng = neighbors.radius_neighbors_graph(X, 5.0, include_self=True).toarray() + rng_not_self = neighbors.radius_neighbors_graph( + X, 5.0, include_self=False + ).toarray() + assert_array_equal(rng, [[1.0, 1.0], [1.0, 1.0]]) + assert_array_equal(rng_not_self, [[0.0, 1.0], [1.0, 0.0]]) + + +@pytest.mark.parametrize("algorithm", ALGORITHMS) +def test_same_knn_parallel(algorithm): + X, y = datasets.make_classification( + n_samples=30, n_features=5, n_redundant=0, random_state=0 + ) + X_train, X_test, y_train, y_test = train_test_split(X, y) + + clf = neighbors.KNeighborsClassifier(n_neighbors=3, algorithm=algorithm) + clf.fit(X_train, y_train) + y = clf.predict(X_test) + dist, ind = clf.kneighbors(X_test) + graph = clf.kneighbors_graph(X_test, mode="distance").toarray() + + clf.set_params(n_jobs=3) + clf.fit(X_train, y_train) + y_parallel = clf.predict(X_test) + dist_parallel, ind_parallel = clf.kneighbors(X_test) + graph_parallel = clf.kneighbors_graph(X_test, mode="distance").toarray() + + assert_array_equal(y, y_parallel) + assert_allclose(dist, dist_parallel) + assert_array_equal(ind, ind_parallel) + assert_allclose(graph, graph_parallel) + + +@pytest.mark.parametrize("algorithm", ALGORITHMS) +def test_same_radius_neighbors_parallel(algorithm): + X, y = datasets.make_classification( + n_samples=30, n_features=5, n_redundant=0, random_state=0 + ) + X_train, X_test, y_train, y_test = train_test_split(X, y) + + clf = neighbors.RadiusNeighborsClassifier(radius=10, algorithm=algorithm) + clf.fit(X_train, y_train) + y = clf.predict(X_test) + dist, ind = clf.radius_neighbors(X_test) + graph = clf.radius_neighbors_graph(X_test, mode="distance").toarray() + + clf.set_params(n_jobs=3) + clf.fit(X_train, y_train) + y_parallel = clf.predict(X_test) + dist_parallel, ind_parallel = clf.radius_neighbors(X_test) + graph_parallel = clf.radius_neighbors_graph(X_test, mode="distance").toarray() + + assert_array_equal(y, y_parallel) + for i in range(len(dist)): + assert_allclose(dist[i], dist_parallel[i]) + assert_array_equal(ind[i], ind_parallel[i]) + assert_allclose(graph, graph_parallel) + + +@pytest.mark.parametrize("backend", ["threading", "loky"]) +@pytest.mark.parametrize("algorithm", ALGORITHMS) +def test_knn_forcing_backend(backend, algorithm): + # Non-regression test which ensures the knn methods are properly working + # even when forcing the global joblib backend. + with joblib.parallel_backend(backend): + X, y = datasets.make_classification( + n_samples=30, n_features=5, n_redundant=0, random_state=0 + ) + X_train, X_test, y_train, y_test = train_test_split(X, y) + + clf = neighbors.KNeighborsClassifier( + n_neighbors=3, algorithm=algorithm, n_jobs=2 + ) + clf.fit(X_train, y_train) + clf.predict(X_test) + clf.kneighbors(X_test) + clf.kneighbors_graph(X_test, mode="distance") + + +def test_dtype_convert(): + classifier = neighbors.KNeighborsClassifier(n_neighbors=1) + CLASSES = 15 + X = np.eye(CLASSES) + y = [ch for ch in "ABCDEFGHIJKLMNOPQRSTU"[:CLASSES]] + + result = classifier.fit(X, y).predict(X) + assert_array_equal(result, y) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_sparse_metric_callable(csr_container): + def sparse_metric(x, y): # Metric accepting sparse matrix input (only) + assert issparse(x) and issparse(y) + return x.dot(y.T).toarray().item() + + X = csr_container( + [[1, 1, 1, 1, 1], [1, 0, 1, 0, 1], [0, 0, 1, 0, 0]] # Population matrix + ) + + Y = csr_container([[1, 1, 0, 1, 1], [1, 0, 0, 1, 1]]) # Query matrix + + nn = neighbors.NearestNeighbors( + algorithm="brute", n_neighbors=2, metric=sparse_metric + ).fit(X) + N = nn.kneighbors(Y, return_distance=False) + + # GS indices of nearest neighbours in `X` for `sparse_metric` + gold_standard_nn = np.array([[2, 1], [2, 1]]) + + assert_array_equal(N, gold_standard_nn) + + +# ignore conversion to boolean in pairwise_distances +@ignore_warnings(category=DataConversionWarning) +def test_pairwise_boolean_distance(): + # Non-regression test for #4523 + # 'brute': uses scipy.spatial.distance through pairwise_distances + # 'ball_tree': uses sklearn.neighbors._dist_metrics + rng = np.random.RandomState(0) + X = rng.uniform(size=(6, 5)) + NN = neighbors.NearestNeighbors + + nn1 = NN(metric="jaccard", algorithm="brute").fit(X) + nn2 = NN(metric="jaccard", algorithm="ball_tree").fit(X) + assert_array_equal(nn1.kneighbors(X)[0], nn2.kneighbors(X)[0]) + + +def test_radius_neighbors_predict_proba(): + for seed in range(5): + X, y = datasets.make_classification( + n_samples=50, + n_features=5, + n_informative=3, + n_redundant=0, + n_classes=3, + random_state=seed, + ) + X_tr, X_te, y_tr, y_te = train_test_split(X, y, random_state=0) + outlier_label = int(2 - seed) + clf = neighbors.RadiusNeighborsClassifier(radius=2, outlier_label=outlier_label) + clf.fit(X_tr, y_tr) + pred = clf.predict(X_te) + proba = clf.predict_proba(X_te) + proba_label = proba.argmax(axis=1) + proba_label = np.where(proba.sum(axis=1) == 0, outlier_label, proba_label) + assert_array_equal(pred, proba_label) + + +def test_pipeline_with_nearest_neighbors_transformer(): + # Test chaining KNeighborsTransformer and classifiers/regressors + rng = np.random.RandomState(0) + X = 2 * rng.rand(40, 5) - 1 + X2 = 2 * rng.rand(40, 5) - 1 + y = rng.rand(40, 1) + + n_neighbors = 12 + radius = 1.5 + # We precompute more neighbors than necessary, to have equivalence between + # k-neighbors estimator after radius-neighbors transformer, and vice-versa. + factor = 2 + + k_trans = neighbors.KNeighborsTransformer(n_neighbors=n_neighbors, mode="distance") + k_trans_factor = neighbors.KNeighborsTransformer( + n_neighbors=int(n_neighbors * factor), mode="distance" + ) + + r_trans = neighbors.RadiusNeighborsTransformer(radius=radius, mode="distance") + r_trans_factor = neighbors.RadiusNeighborsTransformer( + radius=int(radius * factor), mode="distance" + ) + + k_reg = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors) + r_reg = neighbors.RadiusNeighborsRegressor(radius=radius) + + test_list = [ + (k_trans, k_reg), + (k_trans_factor, r_reg), + (r_trans, r_reg), + (r_trans_factor, k_reg), + ] + + for trans, reg in test_list: + # compare the chained version and the compact version + reg_compact = clone(reg) + reg_precomp = clone(reg) + reg_precomp.set_params(metric="precomputed") + + reg_chain = make_pipeline(clone(trans), reg_precomp) + + y_pred_chain = reg_chain.fit(X, y).predict(X2) + y_pred_compact = reg_compact.fit(X, y).predict(X2) + assert_allclose(y_pred_chain, y_pred_compact) + + +@pytest.mark.parametrize( + "X, metric, metric_params, expected_algo", + [ + (np.random.randint(10, size=(10, 10)), "precomputed", None, "brute"), + (np.random.randn(10, 20), "euclidean", None, "brute"), + (np.random.randn(8, 5), "euclidean", None, "brute"), + (np.random.randn(10, 5), "euclidean", None, "kd_tree"), + (np.random.randn(10, 5), "seuclidean", {"V": [2] * 5}, "ball_tree"), + (np.random.randn(10, 5), "correlation", None, "brute"), + ], +) +def test_auto_algorithm(X, metric, metric_params, expected_algo): + model = neighbors.NearestNeighbors( + n_neighbors=4, algorithm="auto", metric=metric, metric_params=metric_params + ) + model.fit(X) + assert model._fit_method == expected_algo + + +@pytest.mark.parametrize( + "metric", sorted(set(neighbors.VALID_METRICS["brute"]) - set(["precomputed"])) +) +def test_radius_neighbors_brute_backend( + metric, + global_random_seed, + global_dtype, + n_samples=2000, + n_features=30, + n_query_pts=5, + radius=1.0, +): + rng = np.random.RandomState(global_random_seed) + # Both backends for the 'brute' algorithm of radius_neighbors + # must give identical results. + X_train = rng.rand(n_samples, n_features).astype(global_dtype, copy=False) + X_test = rng.rand(n_query_pts, n_features).astype(global_dtype, copy=False) + + # Haversine distance only accepts 2D data + if metric == "haversine": + feature_sl = slice(None, 2) + X_train = np.ascontiguousarray(X_train[:, feature_sl]) + X_test = np.ascontiguousarray(X_test[:, feature_sl]) + + metric_params_list = _generate_test_params_for(metric, n_features) + + for metric_params in metric_params_list: + p = metric_params.pop("p", 2) + + neigh = neighbors.NearestNeighbors( + radius=radius, + algorithm="brute", + metric=metric, + p=p, + metric_params=metric_params, + ) + + neigh.fit(X_train) + + with config_context(enable_cython_pairwise_dist=False): + # Use the legacy backend for brute + legacy_brute_dst, legacy_brute_idx = neigh.radius_neighbors( + X_test, return_distance=True + ) + with config_context(enable_cython_pairwise_dist=True): + # Use the pairwise-distances reduction backend for brute + pdr_brute_dst, pdr_brute_idx = neigh.radius_neighbors( + X_test, return_distance=True + ) + + assert_compatible_radius_results( + legacy_brute_dst, + pdr_brute_dst, + legacy_brute_idx, + pdr_brute_idx, + radius=radius, + check_sorted=False, + ) + + +def test_valid_metrics_has_no_duplicate(): + for val in neighbors.VALID_METRICS.values(): + assert len(val) == len(set(val)) + + +def test_regressor_predict_on_arraylikes(): + """Ensures that `predict` works for array-likes when `weights` is a callable. + + Non-regression test for #22687. + """ + X = [[5, 1], [3, 1], [4, 3], [0, 3]] + y = [2, 3, 5, 6] + + def _weights(dist): + return np.ones_like(dist) + + est = KNeighborsRegressor(n_neighbors=1, algorithm="brute", weights=_weights) + est.fit(X, y) + assert_allclose(est.predict([[0, 2.5]]), [6]) + + +def test_predict_dataframe(): + """Check that KNN predict works with dataframes + + non-regression test for issue #26768 + """ + pd = pytest.importorskip("pandas") + + X = pd.DataFrame(np.array([[1, 2], [3, 4], [5, 6], [7, 8]]), columns=["a", "b"]) + y = np.array([1, 2, 3, 4]) + + knn = neighbors.KNeighborsClassifier(n_neighbors=2).fit(X, y) + knn.predict(X) + + +def test_nearest_neighbours_works_with_p_less_than_1(): + """Check that NearestNeighbors works with :math:`p \\in (0,1)` when `algorithm` + is `"auto"` or `"brute"` regardless of the dtype of X. + + Non-regression test for issue #26548 + """ + X = np.array([[1.0, 0.0], [0.0, 0.0], [0.0, 1.0]]) + neigh = neighbors.NearestNeighbors( + n_neighbors=3, algorithm="brute", metric_params={"p": 0.5} + ) + neigh.fit(X) + + y = neigh.radius_neighbors(X[0].reshape(1, -1), radius=4, return_distance=False) + assert_allclose(y[0], [0, 1, 2]) + + y = neigh.kneighbors(X[0].reshape(1, -1), return_distance=False) + assert_allclose(y[0], [0, 1, 2]) + + +def test_KNeighborsClassifier_raise_on_all_zero_weights(): + """Check that `predict` and `predict_proba` raises on sample of all zeros weights. + + Related to Issue #25854. + """ + X = [[0, 1], [1, 2], [2, 3], [3, 4]] + y = [0, 0, 1, 1] + + def _weights(dist): + return np.vectorize(lambda x: 0 if x > 0.5 else 1)(dist) + + est = neighbors.KNeighborsClassifier(n_neighbors=3, weights=_weights) + est.fit(X, y) + + msg = ( + "All neighbors of some sample is getting zero weights. " + "Please modify 'weights' to avoid this case if you are " + "using a user-defined function." + ) + + with pytest.raises(ValueError, match=msg): + est.predict([[1.1, 1.1]]) + + with pytest.raises(ValueError, match=msg): + est.predict_proba([[1.1, 1.1]]) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/tests/test_neighbors_pipeline.py b/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/tests/test_neighbors_pipeline.py new file mode 100644 index 0000000000000000000000000000000000000000..1d01a0d0a60a80d4cc84cfac28a179a002508dbf --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/tests/test_neighbors_pipeline.py @@ -0,0 +1,256 @@ +""" +This is testing the equivalence between some estimators with internal nearest +neighbors computations, and the corresponding pipeline versions with +KNeighborsTransformer or RadiusNeighborsTransformer to precompute the +neighbors. +""" + +import numpy as np + +from sklearn.base import clone +from sklearn.cluster import DBSCAN, SpectralClustering +from sklearn.cluster.tests.common import generate_clustered_data +from sklearn.datasets import make_blobs +from sklearn.manifold import TSNE, Isomap, SpectralEmbedding +from sklearn.neighbors import ( + KNeighborsRegressor, + KNeighborsTransformer, + LocalOutlierFactor, + RadiusNeighborsRegressor, + RadiusNeighborsTransformer, +) +from sklearn.pipeline import make_pipeline +from sklearn.utils._testing import assert_array_almost_equal + + +def test_spectral_clustering(): + # Test chaining KNeighborsTransformer and SpectralClustering + n_neighbors = 5 + X, _ = make_blobs(random_state=0) + + # compare the chained version and the compact version + est_chain = make_pipeline( + KNeighborsTransformer(n_neighbors=n_neighbors, mode="connectivity"), + SpectralClustering( + n_neighbors=n_neighbors, affinity="precomputed", random_state=42 + ), + ) + est_compact = SpectralClustering( + n_neighbors=n_neighbors, affinity="nearest_neighbors", random_state=42 + ) + labels_compact = est_compact.fit_predict(X) + labels_chain = est_chain.fit_predict(X) + assert_array_almost_equal(labels_chain, labels_compact) + + +def test_spectral_embedding(): + # Test chaining KNeighborsTransformer and SpectralEmbedding + n_neighbors = 5 + + n_samples = 1000 + centers = np.array( + [ + [0.0, 5.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 4.0, 0.0, 0.0], + [1.0, 0.0, 0.0, 5.0, 1.0], + ] + ) + S, true_labels = make_blobs( + n_samples=n_samples, centers=centers, cluster_std=1.0, random_state=42 + ) + + # compare the chained version and the compact version + est_chain = make_pipeline( + KNeighborsTransformer(n_neighbors=n_neighbors, mode="connectivity"), + SpectralEmbedding( + n_neighbors=n_neighbors, affinity="precomputed", random_state=42 + ), + ) + est_compact = SpectralEmbedding( + n_neighbors=n_neighbors, affinity="nearest_neighbors", random_state=42 + ) + St_compact = est_compact.fit_transform(S) + St_chain = est_chain.fit_transform(S) + assert_array_almost_equal(St_chain, St_compact) + + +def test_dbscan(): + # Test chaining RadiusNeighborsTransformer and DBSCAN + radius = 0.3 + n_clusters = 3 + X = generate_clustered_data(n_clusters=n_clusters) + + # compare the chained version and the compact version + est_chain = make_pipeline( + RadiusNeighborsTransformer(radius=radius, mode="distance"), + DBSCAN(metric="precomputed", eps=radius), + ) + est_compact = DBSCAN(eps=radius) + + labels_chain = est_chain.fit_predict(X) + labels_compact = est_compact.fit_predict(X) + assert_array_almost_equal(labels_chain, labels_compact) + + +def test_isomap(): + # Test chaining KNeighborsTransformer and Isomap with + # neighbors_algorithm='precomputed' + algorithm = "auto" + n_neighbors = 10 + + X, _ = make_blobs(random_state=0) + X2, _ = make_blobs(random_state=1) + + # compare the chained version and the compact version + est_chain = make_pipeline( + KNeighborsTransformer( + n_neighbors=n_neighbors, algorithm=algorithm, mode="distance" + ), + Isomap(n_neighbors=n_neighbors, metric="precomputed"), + ) + est_compact = Isomap(n_neighbors=n_neighbors, neighbors_algorithm=algorithm) + + Xt_chain = est_chain.fit_transform(X) + Xt_compact = est_compact.fit_transform(X) + assert_array_almost_equal(Xt_chain, Xt_compact) + + Xt_chain = est_chain.transform(X2) + Xt_compact = est_compact.transform(X2) + assert_array_almost_equal(Xt_chain, Xt_compact) + + +def test_tsne(): + # Test chaining KNeighborsTransformer and TSNE + n_iter = 250 + perplexity = 5 + n_neighbors = int(3.0 * perplexity + 1) + + rng = np.random.RandomState(0) + X = rng.randn(20, 2) + + for metric in ["minkowski", "sqeuclidean"]: + # compare the chained version and the compact version + est_chain = make_pipeline( + KNeighborsTransformer( + n_neighbors=n_neighbors, mode="distance", metric=metric + ), + TSNE( + init="random", + metric="precomputed", + perplexity=perplexity, + method="barnes_hut", + random_state=42, + n_iter=n_iter, + ), + ) + est_compact = TSNE( + init="random", + metric=metric, + perplexity=perplexity, + n_iter=n_iter, + method="barnes_hut", + random_state=42, + ) + + Xt_chain = est_chain.fit_transform(X) + Xt_compact = est_compact.fit_transform(X) + assert_array_almost_equal(Xt_chain, Xt_compact) + + +def test_lof_novelty_false(): + # Test chaining KNeighborsTransformer and LocalOutlierFactor + n_neighbors = 4 + + rng = np.random.RandomState(0) + X = rng.randn(40, 2) + + # compare the chained version and the compact version + est_chain = make_pipeline( + KNeighborsTransformer(n_neighbors=n_neighbors, mode="distance"), + LocalOutlierFactor( + metric="precomputed", + n_neighbors=n_neighbors, + novelty=False, + contamination="auto", + ), + ) + est_compact = LocalOutlierFactor( + n_neighbors=n_neighbors, novelty=False, contamination="auto" + ) + + pred_chain = est_chain.fit_predict(X) + pred_compact = est_compact.fit_predict(X) + assert_array_almost_equal(pred_chain, pred_compact) + + +def test_lof_novelty_true(): + # Test chaining KNeighborsTransformer and LocalOutlierFactor + n_neighbors = 4 + + rng = np.random.RandomState(0) + X1 = rng.randn(40, 2) + X2 = rng.randn(40, 2) + + # compare the chained version and the compact version + est_chain = make_pipeline( + KNeighborsTransformer(n_neighbors=n_neighbors, mode="distance"), + LocalOutlierFactor( + metric="precomputed", + n_neighbors=n_neighbors, + novelty=True, + contamination="auto", + ), + ) + est_compact = LocalOutlierFactor( + n_neighbors=n_neighbors, novelty=True, contamination="auto" + ) + + pred_chain = est_chain.fit(X1).predict(X2) + pred_compact = est_compact.fit(X1).predict(X2) + assert_array_almost_equal(pred_chain, pred_compact) + + +def test_kneighbors_regressor(): + # Test chaining KNeighborsTransformer and classifiers/regressors + rng = np.random.RandomState(0) + X = 2 * rng.rand(40, 5) - 1 + X2 = 2 * rng.rand(40, 5) - 1 + y = rng.rand(40, 1) + + n_neighbors = 12 + radius = 1.5 + # We precompute more neighbors than necessary, to have equivalence between + # k-neighbors estimator after radius-neighbors transformer, and vice-versa. + factor = 2 + + k_trans = KNeighborsTransformer(n_neighbors=n_neighbors, mode="distance") + k_trans_factor = KNeighborsTransformer( + n_neighbors=int(n_neighbors * factor), mode="distance" + ) + + r_trans = RadiusNeighborsTransformer(radius=radius, mode="distance") + r_trans_factor = RadiusNeighborsTransformer( + radius=int(radius * factor), mode="distance" + ) + + k_reg = KNeighborsRegressor(n_neighbors=n_neighbors) + r_reg = RadiusNeighborsRegressor(radius=radius) + + test_list = [ + (k_trans, k_reg), + (k_trans_factor, r_reg), + (r_trans, r_reg), + (r_trans_factor, k_reg), + ] + + for trans, reg in test_list: + # compare the chained version and the compact version + reg_compact = clone(reg) + reg_precomp = clone(reg) + reg_precomp.set_params(metric="precomputed") + + reg_chain = make_pipeline(clone(trans), reg_precomp) + + y_pred_chain = reg_chain.fit(X, y).predict(X2) + y_pred_compact = reg_compact.fit(X, y).predict(X2) + assert_array_almost_equal(y_pred_chain, y_pred_compact) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/tests/test_neighbors_tree.py b/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/tests/test_neighbors_tree.py new file mode 100644 index 0000000000000000000000000000000000000000..4d8bac12f7423caadf1a392b5d42313f6a3f32f8 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/tests/test_neighbors_tree.py @@ -0,0 +1,296 @@ +# License: BSD 3 clause + +import itertools +import pickle + +import numpy as np +import pytest +from numpy.testing import assert_allclose, assert_array_almost_equal + +from sklearn.metrics import DistanceMetric +from sklearn.neighbors._ball_tree import ( + BallTree, + kernel_norm, +) +from sklearn.neighbors._ball_tree import ( + NeighborsHeap64 as NeighborsHeapBT, +) +from sklearn.neighbors._ball_tree import ( + nodeheap_sort as nodeheap_sort_bt, +) +from sklearn.neighbors._ball_tree import ( + simultaneous_sort as simultaneous_sort_bt, +) +from sklearn.neighbors._kd_tree import ( + KDTree, +) +from sklearn.neighbors._kd_tree import ( + NeighborsHeap64 as NeighborsHeapKDT, +) +from sklearn.neighbors._kd_tree import ( + nodeheap_sort as nodeheap_sort_kdt, +) +from sklearn.neighbors._kd_tree import ( + simultaneous_sort as simultaneous_sort_kdt, +) +from sklearn.utils import check_random_state + +rng = np.random.RandomState(42) +V_mahalanobis = rng.rand(3, 3) +V_mahalanobis = np.dot(V_mahalanobis, V_mahalanobis.T) + +DIMENSION = 3 + +METRICS = { + "euclidean": {}, + "manhattan": {}, + "minkowski": dict(p=3), + "chebyshev": {}, + "seuclidean": dict(V=rng.random_sample(DIMENSION)), + "mahalanobis": dict(V=V_mahalanobis), +} + +KD_TREE_METRICS = ["euclidean", "manhattan", "chebyshev", "minkowski"] +BALL_TREE_METRICS = list(METRICS) + + +def dist_func(x1, x2, p): + return np.sum((x1 - x2) ** p) ** (1.0 / p) + + +def compute_kernel_slow(Y, X, kernel, h): + d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1)) + norm = kernel_norm(h, X.shape[1], kernel) + + if kernel == "gaussian": + return norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1) + elif kernel == "tophat": + return norm * (d < h).sum(-1) + elif kernel == "epanechnikov": + return norm * ((1.0 - (d * d) / (h * h)) * (d < h)).sum(-1) + elif kernel == "exponential": + return norm * (np.exp(-d / h)).sum(-1) + elif kernel == "linear": + return norm * ((1 - d / h) * (d < h)).sum(-1) + elif kernel == "cosine": + return norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1) + else: + raise ValueError("kernel not recognized") + + +def brute_force_neighbors(X, Y, k, metric, **kwargs): + D = DistanceMetric.get_metric(metric, **kwargs).pairwise(Y, X) + ind = np.argsort(D, axis=1)[:, :k] + dist = D[np.arange(Y.shape[0])[:, None], ind] + return dist, ind + + +@pytest.mark.parametrize("Cls", [KDTree, BallTree]) +@pytest.mark.parametrize( + "kernel", ["gaussian", "tophat", "epanechnikov", "exponential", "linear", "cosine"] +) +@pytest.mark.parametrize("h", [0.01, 0.1, 1]) +@pytest.mark.parametrize("rtol", [0, 1e-5]) +@pytest.mark.parametrize("atol", [1e-6, 1e-2]) +@pytest.mark.parametrize("breadth_first", [True, False]) +def test_kernel_density( + Cls, kernel, h, rtol, atol, breadth_first, n_samples=100, n_features=3 +): + rng = check_random_state(1) + X = rng.random_sample((n_samples, n_features)) + Y = rng.random_sample((n_samples, n_features)) + dens_true = compute_kernel_slow(Y, X, kernel, h) + + tree = Cls(X, leaf_size=10) + dens = tree.kernel_density( + Y, h, atol=atol, rtol=rtol, kernel=kernel, breadth_first=breadth_first + ) + assert_allclose(dens, dens_true, atol=atol, rtol=max(rtol, 1e-7)) + + +@pytest.mark.parametrize("Cls", [KDTree, BallTree]) +def test_neighbor_tree_query_radius(Cls, n_samples=100, n_features=10): + rng = check_random_state(0) + X = 2 * rng.random_sample(size=(n_samples, n_features)) - 1 + query_pt = np.zeros(n_features, dtype=float) + + eps = 1e-15 # roundoff error can cause test to fail + tree = Cls(X, leaf_size=5) + rad = np.sqrt(((X - query_pt) ** 2).sum(1)) + + for r in np.linspace(rad[0], rad[-1], 100): + ind = tree.query_radius([query_pt], r + eps)[0] + i = np.where(rad <= r + eps)[0] + + ind.sort() + i.sort() + + assert_array_almost_equal(i, ind) + + +@pytest.mark.parametrize("Cls", [KDTree, BallTree]) +def test_neighbor_tree_query_radius_distance(Cls, n_samples=100, n_features=10): + rng = check_random_state(0) + X = 2 * rng.random_sample(size=(n_samples, n_features)) - 1 + query_pt = np.zeros(n_features, dtype=float) + + eps = 1e-15 # roundoff error can cause test to fail + tree = Cls(X, leaf_size=5) + rad = np.sqrt(((X - query_pt) ** 2).sum(1)) + + for r in np.linspace(rad[0], rad[-1], 100): + ind, dist = tree.query_radius([query_pt], r + eps, return_distance=True) + + ind = ind[0] + dist = dist[0] + + d = np.sqrt(((query_pt - X[ind]) ** 2).sum(1)) + + assert_array_almost_equal(d, dist) + + +@pytest.mark.parametrize("Cls", [KDTree, BallTree]) +@pytest.mark.parametrize("dualtree", (True, False)) +def test_neighbor_tree_two_point(Cls, dualtree, n_samples=100, n_features=3): + rng = check_random_state(0) + X = rng.random_sample((n_samples, n_features)) + Y = rng.random_sample((n_samples, n_features)) + r = np.linspace(0, 1, 10) + tree = Cls(X, leaf_size=10) + + D = DistanceMetric.get_metric("euclidean").pairwise(Y, X) + counts_true = [(D <= ri).sum() for ri in r] + + counts = tree.two_point_correlation(Y, r=r, dualtree=dualtree) + assert_array_almost_equal(counts, counts_true) + + +@pytest.mark.parametrize("NeighborsHeap", [NeighborsHeapBT, NeighborsHeapKDT]) +def test_neighbors_heap(NeighborsHeap, n_pts=5, n_nbrs=10): + heap = NeighborsHeap(n_pts, n_nbrs) + rng = check_random_state(0) + + for row in range(n_pts): + d_in = rng.random_sample(2 * n_nbrs).astype(np.float64, copy=False) + i_in = np.arange(2 * n_nbrs, dtype=np.intp) + for d, i in zip(d_in, i_in): + heap.push(row, d, i) + + ind = np.argsort(d_in) + d_in = d_in[ind] + i_in = i_in[ind] + + d_heap, i_heap = heap.get_arrays(sort=True) + + assert_array_almost_equal(d_in[:n_nbrs], d_heap[row]) + assert_array_almost_equal(i_in[:n_nbrs], i_heap[row]) + + +@pytest.mark.parametrize("nodeheap_sort", [nodeheap_sort_bt, nodeheap_sort_kdt]) +def test_node_heap(nodeheap_sort, n_nodes=50): + rng = check_random_state(0) + vals = rng.random_sample(n_nodes).astype(np.float64, copy=False) + + i1 = np.argsort(vals) + vals2, i2 = nodeheap_sort(vals) + + assert_array_almost_equal(i1, i2) + assert_array_almost_equal(vals[i1], vals2) + + +@pytest.mark.parametrize( + "simultaneous_sort", [simultaneous_sort_bt, simultaneous_sort_kdt] +) +def test_simultaneous_sort(simultaneous_sort, n_rows=10, n_pts=201): + rng = check_random_state(0) + dist = rng.random_sample((n_rows, n_pts)).astype(np.float64, copy=False) + ind = (np.arange(n_pts) + np.zeros((n_rows, 1))).astype(np.intp, copy=False) + + dist2 = dist.copy() + ind2 = ind.copy() + + # simultaneous sort rows using function + simultaneous_sort(dist, ind) + + # simultaneous sort rows using numpy + i = np.argsort(dist2, axis=1) + row_ind = np.arange(n_rows)[:, None] + dist2 = dist2[row_ind, i] + ind2 = ind2[row_ind, i] + + assert_array_almost_equal(dist, dist2) + assert_array_almost_equal(ind, ind2) + + +@pytest.mark.parametrize("Cls", [KDTree, BallTree]) +def test_gaussian_kde(Cls, n_samples=1000): + # Compare gaussian KDE results to scipy.stats.gaussian_kde + from scipy.stats import gaussian_kde + + rng = check_random_state(0) + x_in = rng.normal(0, 1, n_samples) + x_out = np.linspace(-5, 5, 30) + + for h in [0.01, 0.1, 1]: + tree = Cls(x_in[:, None]) + gkde = gaussian_kde(x_in, bw_method=h / np.std(x_in)) + + dens_tree = tree.kernel_density(x_out[:, None], h) / n_samples + dens_gkde = gkde.evaluate(x_out) + + assert_array_almost_equal(dens_tree, dens_gkde, decimal=3) + + +@pytest.mark.parametrize( + "Cls, metric", + itertools.chain( + [(KDTree, metric) for metric in KD_TREE_METRICS], + [(BallTree, metric) for metric in BALL_TREE_METRICS], + ), +) +@pytest.mark.parametrize("k", (1, 3, 5)) +@pytest.mark.parametrize("dualtree", (True, False)) +@pytest.mark.parametrize("breadth_first", (True, False)) +def test_nn_tree_query(Cls, metric, k, dualtree, breadth_first): + rng = check_random_state(0) + X = rng.random_sample((40, DIMENSION)) + Y = rng.random_sample((10, DIMENSION)) + + kwargs = METRICS[metric] + + kdt = Cls(X, leaf_size=1, metric=metric, **kwargs) + dist1, ind1 = kdt.query(Y, k, dualtree=dualtree, breadth_first=breadth_first) + dist2, ind2 = brute_force_neighbors(X, Y, k, metric, **kwargs) + + # don't check indices here: if there are any duplicate distances, + # the indices may not match. Distances should not have this problem. + assert_array_almost_equal(dist1, dist2) + + +@pytest.mark.parametrize( + "Cls, metric", + [(KDTree, "euclidean"), (BallTree, "euclidean"), (BallTree, dist_func)], +) +@pytest.mark.parametrize("protocol", (0, 1, 2)) +def test_pickle(Cls, metric, protocol): + rng = check_random_state(0) + X = rng.random_sample((10, 3)) + + if hasattr(metric, "__call__"): + kwargs = {"p": 2} + else: + kwargs = {} + + tree1 = Cls(X, leaf_size=1, metric=metric, **kwargs) + + ind1, dist1 = tree1.query(X) + + s = pickle.dumps(tree1, protocol=protocol) + tree2 = pickle.loads(s) + + ind2, dist2 = tree2.query(X) + + assert_array_almost_equal(ind1, ind2) + assert_array_almost_equal(dist1, dist2) + + assert isinstance(tree2, Cls) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/tests/test_quad_tree.py b/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/tests/test_quad_tree.py new file mode 100644 index 0000000000000000000000000000000000000000..be9a4c5fe549d32a130f9c6a55f6675fa0e42f20 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/tests/test_quad_tree.py @@ -0,0 +1,144 @@ +import pickle + +import numpy as np +import pytest + +from sklearn.neighbors._quad_tree import _QuadTree +from sklearn.utils import check_random_state + + +def test_quadtree_boundary_computation(): + # Introduce a point into a quad tree with boundaries not easy to compute. + Xs = [] + + # check a random case + Xs.append(np.array([[-1, 1], [-4, -1]], dtype=np.float32)) + # check the case where only 0 are inserted + Xs.append(np.array([[0, 0], [0, 0]], dtype=np.float32)) + # check the case where only negative are inserted + Xs.append(np.array([[-1, -2], [-4, 0]], dtype=np.float32)) + # check the case where only small numbers are inserted + Xs.append(np.array([[-1e-6, 1e-6], [-4e-6, -1e-6]], dtype=np.float32)) + + for X in Xs: + tree = _QuadTree(n_dimensions=2, verbose=0) + tree.build_tree(X) + tree._check_coherence() + + +def test_quadtree_similar_point(): + # Introduce a point into a quad tree where a similar point already exists. + # Test will hang if it doesn't complete. + Xs = [] + + # check the case where points are actually different + Xs.append(np.array([[1, 2], [3, 4]], dtype=np.float32)) + # check the case where points are the same on X axis + Xs.append(np.array([[1.0, 2.0], [1.0, 3.0]], dtype=np.float32)) + # check the case where points are arbitrarily close on X axis + Xs.append(np.array([[1.00001, 2.0], [1.00002, 3.0]], dtype=np.float32)) + # check the case where points are the same on Y axis + Xs.append(np.array([[1.0, 2.0], [3.0, 2.0]], dtype=np.float32)) + # check the case where points are arbitrarily close on Y axis + Xs.append(np.array([[1.0, 2.00001], [3.0, 2.00002]], dtype=np.float32)) + # check the case where points are arbitrarily close on both axes + Xs.append(np.array([[1.00001, 2.00001], [1.00002, 2.00002]], dtype=np.float32)) + + # check the case where points are arbitrarily close on both axes + # close to machine epsilon - x axis + Xs.append(np.array([[1, 0.0003817754041], [2, 0.0003817753750]], dtype=np.float32)) + + # check the case where points are arbitrarily close on both axes + # close to machine epsilon - y axis + Xs.append( + np.array([[0.0003817754041, 1.0], [0.0003817753750, 2.0]], dtype=np.float32) + ) + + for X in Xs: + tree = _QuadTree(n_dimensions=2, verbose=0) + tree.build_tree(X) + tree._check_coherence() + + +@pytest.mark.parametrize("n_dimensions", (2, 3)) +@pytest.mark.parametrize("protocol", (0, 1, 2)) +def test_quad_tree_pickle(n_dimensions, protocol): + rng = check_random_state(0) + + X = rng.random_sample((10, n_dimensions)) + + tree = _QuadTree(n_dimensions=n_dimensions, verbose=0) + tree.build_tree(X) + + s = pickle.dumps(tree, protocol=protocol) + bt2 = pickle.loads(s) + + for x in X: + cell_x_tree = tree.get_cell(x) + cell_x_bt2 = bt2.get_cell(x) + assert cell_x_tree == cell_x_bt2 + + +@pytest.mark.parametrize("n_dimensions", (2, 3)) +def test_qt_insert_duplicate(n_dimensions): + rng = check_random_state(0) + + X = rng.random_sample((10, n_dimensions)) + Xd = np.r_[X, X[:5]] + tree = _QuadTree(n_dimensions=n_dimensions, verbose=0) + tree.build_tree(Xd) + + cumulative_size = tree.cumulative_size + leafs = tree.leafs + + # Assert that the first 5 are indeed duplicated and that the next + # ones are single point leaf + for i, x in enumerate(X): + cell_id = tree.get_cell(x) + assert leafs[cell_id] + assert cumulative_size[cell_id] == 1 + (i < 5) + + +def test_summarize(): + # Simple check for quad tree's summarize + + angle = 0.9 + X = np.array( + [[-10.0, -10.0], [9.0, 10.0], [10.0, 9.0], [10.0, 10.0]], dtype=np.float32 + ) + query_pt = X[0, :] + n_dimensions = X.shape[1] + offset = n_dimensions + 2 + + qt = _QuadTree(n_dimensions, verbose=0) + qt.build_tree(X) + + idx, summary = qt._py_summarize(query_pt, X, angle) + + node_dist = summary[n_dimensions] + node_size = summary[n_dimensions + 1] + + # Summary should contain only 1 node with size 3 and distance to + # X[1:] barycenter + barycenter = X[1:].mean(axis=0) + ds2c = ((X[0] - barycenter) ** 2).sum() + + assert idx == offset + assert node_size == 3, "summary size = {}".format(node_size) + assert np.isclose(node_dist, ds2c) + + # Summary should contain all 3 node with size 1 and distance to + # each point in X[1:] for ``angle=0`` + idx, summary = qt._py_summarize(query_pt, X, 0.0) + barycenter = X[1:].mean(axis=0) + ds2c = ((X[0] - barycenter) ** 2).sum() + + assert idx == 3 * (offset) + for i in range(3): + node_dist = summary[i * offset + n_dimensions] + node_size = summary[i * offset + n_dimensions + 1] + + ds2c = ((X[0] - X[i + 1]) ** 2).sum() + + assert node_size == 1, "summary size = {}".format(node_size) + assert np.isclose(node_dist, ds2c) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/preprocessing/__pycache__/_discretization.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/preprocessing/__pycache__/_discretization.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9fcc37b5518e0fce0ae3d32b6fee3760dda8e91e Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/preprocessing/__pycache__/_discretization.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/preprocessing/__pycache__/_label.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/preprocessing/__pycache__/_label.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b6fd61c72f30cb421c55fc51c2aa96e2f0bc4eb1 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/preprocessing/__pycache__/_label.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/preprocessing/__pycache__/_polynomial.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/preprocessing/__pycache__/_polynomial.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..28472b1c05e671987b552ac06da51b16e10e2d47 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/preprocessing/__pycache__/_polynomial.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/preprocessing/tests/__pycache__/test_common.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/preprocessing/tests/__pycache__/test_common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6c0222d5e9936920522987bb0b5bd8261f2d4336 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/preprocessing/tests/__pycache__/test_common.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/preprocessing/tests/__pycache__/test_data.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/preprocessing/tests/__pycache__/test_data.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5087c14f0a957f38bd02b187e58bb3411bfd8f63 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/preprocessing/tests/__pycache__/test_data.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/preprocessing/tests/__pycache__/test_function_transformer.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/preprocessing/tests/__pycache__/test_function_transformer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8a41cb912e3fe651141dcc6f729fda762e8bb3ab Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/preprocessing/tests/__pycache__/test_function_transformer.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/preprocessing/tests/__pycache__/test_target_encoder.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/preprocessing/tests/__pycache__/test_target_encoder.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0d3118aee6adf65c8bd0d22ba4499d1ae567ef49 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/preprocessing/tests/__pycache__/test_target_encoder.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/preprocessing/tests/test_common.py b/env-llmeval/lib/python3.10/site-packages/sklearn/preprocessing/tests/test_common.py new file mode 100644 index 0000000000000000000000000000000000000000..09f702f64ce2367ef6fe47fdb789e0475bf11def --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/preprocessing/tests/test_common.py @@ -0,0 +1,187 @@ +import warnings + +import numpy as np +import pytest + +from sklearn.base import clone +from sklearn.datasets import load_iris +from sklearn.model_selection import train_test_split +from sklearn.preprocessing import ( + MaxAbsScaler, + MinMaxScaler, + PowerTransformer, + QuantileTransformer, + RobustScaler, + StandardScaler, + maxabs_scale, + minmax_scale, + power_transform, + quantile_transform, + robust_scale, + scale, +) +from sklearn.utils._testing import assert_allclose, assert_array_equal +from sklearn.utils.fixes import ( + BSR_CONTAINERS, + COO_CONTAINERS, + CSC_CONTAINERS, + CSR_CONTAINERS, + DIA_CONTAINERS, + DOK_CONTAINERS, + LIL_CONTAINERS, +) + +iris = load_iris() + + +def _get_valid_samples_by_column(X, col): + """Get non NaN samples in column of X""" + return X[:, [col]][~np.isnan(X[:, col])] + + +@pytest.mark.parametrize( + "est, func, support_sparse, strictly_positive, omit_kwargs", + [ + (MaxAbsScaler(), maxabs_scale, True, False, []), + (MinMaxScaler(), minmax_scale, False, False, ["clip"]), + (StandardScaler(), scale, False, False, []), + (StandardScaler(with_mean=False), scale, True, False, []), + (PowerTransformer("yeo-johnson"), power_transform, False, False, []), + (PowerTransformer("box-cox"), power_transform, False, True, []), + (QuantileTransformer(n_quantiles=10), quantile_transform, True, False, []), + (RobustScaler(), robust_scale, False, False, []), + (RobustScaler(with_centering=False), robust_scale, True, False, []), + ], +) +def test_missing_value_handling( + est, func, support_sparse, strictly_positive, omit_kwargs +): + # check that the preprocessing method let pass nan + rng = np.random.RandomState(42) + X = iris.data.copy() + n_missing = 50 + X[ + rng.randint(X.shape[0], size=n_missing), rng.randint(X.shape[1], size=n_missing) + ] = np.nan + if strictly_positive: + X += np.nanmin(X) + 0.1 + X_train, X_test = train_test_split(X, random_state=1) + # sanity check + assert not np.all(np.isnan(X_train), axis=0).any() + assert np.any(np.isnan(X_train), axis=0).all() + assert np.any(np.isnan(X_test), axis=0).all() + X_test[:, 0] = np.nan # make sure this boundary case is tested + + with warnings.catch_warnings(): + warnings.simplefilter("error", RuntimeWarning) + Xt = est.fit(X_train).transform(X_test) + # ensure no warnings are raised + # missing values should still be missing, and only them + assert_array_equal(np.isnan(Xt), np.isnan(X_test)) + + # check that the function leads to the same results as the class + with warnings.catch_warnings(): + warnings.simplefilter("error", RuntimeWarning) + Xt_class = est.transform(X_train) + kwargs = est.get_params() + # remove the parameters which should be omitted because they + # are not defined in the counterpart function of the preprocessing class + for kwarg in omit_kwargs: + _ = kwargs.pop(kwarg) + Xt_func = func(X_train, **kwargs) + assert_array_equal(np.isnan(Xt_func), np.isnan(Xt_class)) + assert_allclose(Xt_func[~np.isnan(Xt_func)], Xt_class[~np.isnan(Xt_class)]) + + # check that the inverse transform keep NaN + Xt_inv = est.inverse_transform(Xt) + assert_array_equal(np.isnan(Xt_inv), np.isnan(X_test)) + # FIXME: we can introduce equal_nan=True in recent version of numpy. + # For the moment which just check that non-NaN values are almost equal. + assert_allclose(Xt_inv[~np.isnan(Xt_inv)], X_test[~np.isnan(X_test)]) + + for i in range(X.shape[1]): + # train only on non-NaN + est.fit(_get_valid_samples_by_column(X_train, i)) + # check transforming with NaN works even when training without NaN + with warnings.catch_warnings(): + warnings.simplefilter("error", RuntimeWarning) + Xt_col = est.transform(X_test[:, [i]]) + assert_allclose(Xt_col, Xt[:, [i]]) + # check non-NaN is handled as before - the 1st column is all nan + if not np.isnan(X_test[:, i]).all(): + Xt_col_nonan = est.transform(_get_valid_samples_by_column(X_test, i)) + assert_array_equal(Xt_col_nonan, Xt_col[~np.isnan(Xt_col.squeeze())]) + + if support_sparse: + est_dense = clone(est) + est_sparse = clone(est) + + with warnings.catch_warnings(): + warnings.simplefilter("error", RuntimeWarning) + Xt_dense = est_dense.fit(X_train).transform(X_test) + Xt_inv_dense = est_dense.inverse_transform(Xt_dense) + + for sparse_container in ( + BSR_CONTAINERS + + COO_CONTAINERS + + CSC_CONTAINERS + + CSR_CONTAINERS + + DIA_CONTAINERS + + DOK_CONTAINERS + + LIL_CONTAINERS + ): + # check that the dense and sparse inputs lead to the same results + # precompute the matrix to avoid catching side warnings + X_train_sp = sparse_container(X_train) + X_test_sp = sparse_container(X_test) + with warnings.catch_warnings(): + warnings.simplefilter("ignore", PendingDeprecationWarning) + warnings.simplefilter("error", RuntimeWarning) + Xt_sp = est_sparse.fit(X_train_sp).transform(X_test_sp) + + assert_allclose(Xt_sp.toarray(), Xt_dense) + with warnings.catch_warnings(): + warnings.simplefilter("ignore", PendingDeprecationWarning) + warnings.simplefilter("error", RuntimeWarning) + Xt_inv_sp = est_sparse.inverse_transform(Xt_sp) + + assert_allclose(Xt_inv_sp.toarray(), Xt_inv_dense) + + +@pytest.mark.parametrize( + "est, func", + [ + (MaxAbsScaler(), maxabs_scale), + (MinMaxScaler(), minmax_scale), + (StandardScaler(), scale), + (StandardScaler(with_mean=False), scale), + (PowerTransformer("yeo-johnson"), power_transform), + ( + PowerTransformer("box-cox"), + power_transform, + ), + (QuantileTransformer(n_quantiles=3), quantile_transform), + (RobustScaler(), robust_scale), + (RobustScaler(with_centering=False), robust_scale), + ], +) +def test_missing_value_pandas_na_support(est, func): + # Test pandas IntegerArray with pd.NA + pd = pytest.importorskip("pandas") + + X = np.array( + [ + [1, 2, 3, np.nan, np.nan, 4, 5, 1], + [np.nan, np.nan, 8, 4, 6, np.nan, np.nan, 8], + [1, 2, 3, 4, 5, 6, 7, 8], + ] + ).T + + # Creates dataframe with IntegerArrays with pd.NA + X_df = pd.DataFrame(X, dtype="Int16", columns=["a", "b", "c"]) + X_df["c"] = X_df["c"].astype("int") + + X_trans = est.fit_transform(X) + X_df_trans = est.fit_transform(X_df) + + assert_allclose(X_trans, X_df_trans) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/preprocessing/tests/test_encoders.py b/env-llmeval/lib/python3.10/site-packages/sklearn/preprocessing/tests/test_encoders.py new file mode 100644 index 0000000000000000000000000000000000000000..ee5e1152fc710e5791e446ca8ffe0bc87beb001b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/preprocessing/tests/test_encoders.py @@ -0,0 +1,2338 @@ +import re + +import numpy as np +import pytest +from scipy import sparse + +from sklearn.exceptions import NotFittedError +from sklearn.preprocessing import OneHotEncoder, OrdinalEncoder +from sklearn.utils import is_scalar_nan +from sklearn.utils._testing import ( + _convert_container, + assert_allclose, + assert_array_equal, +) +from sklearn.utils.fixes import CSR_CONTAINERS + + +def test_one_hot_encoder_sparse_dense(): + # check that sparse and dense will give the same results + + X = np.array([[3, 2, 1], [0, 1, 1]]) + enc_sparse = OneHotEncoder() + enc_dense = OneHotEncoder(sparse_output=False) + + X_trans_sparse = enc_sparse.fit_transform(X) + X_trans_dense = enc_dense.fit_transform(X) + + assert X_trans_sparse.shape == (2, 5) + assert X_trans_dense.shape == (2, 5) + + assert sparse.issparse(X_trans_sparse) + assert not sparse.issparse(X_trans_dense) + + # check outcome + assert_array_equal( + X_trans_sparse.toarray(), [[0.0, 1.0, 0.0, 1.0, 1.0], [1.0, 0.0, 1.0, 0.0, 1.0]] + ) + assert_array_equal(X_trans_sparse.toarray(), X_trans_dense) + + +@pytest.mark.parametrize("handle_unknown", ["ignore", "infrequent_if_exist"]) +def test_one_hot_encoder_handle_unknown(handle_unknown): + X = np.array([[0, 2, 1], [1, 0, 3], [1, 0, 2]]) + X2 = np.array([[4, 1, 1]]) + + # Test that one hot encoder raises error for unknown features + # present during transform. + oh = OneHotEncoder(handle_unknown="error") + oh.fit(X) + with pytest.raises(ValueError, match="Found unknown categories"): + oh.transform(X2) + + # Test the ignore option, ignores unknown features (giving all 0's) + oh = OneHotEncoder(handle_unknown=handle_unknown) + oh.fit(X) + X2_passed = X2.copy() + assert_array_equal( + oh.transform(X2_passed).toarray(), + np.array([[0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0]]), + ) + # ensure transformed data was not modified in place + assert_allclose(X2, X2_passed) + + +@pytest.mark.parametrize("handle_unknown", ["ignore", "infrequent_if_exist"]) +def test_one_hot_encoder_handle_unknown_strings(handle_unknown): + X = np.array(["11111111", "22", "333", "4444"]).reshape((-1, 1)) + X2 = np.array(["55555", "22"]).reshape((-1, 1)) + # Non Regression test for the issue #12470 + # Test the ignore option, when categories are numpy string dtype + # particularly when the known category strings are larger + # than the unknown category strings + oh = OneHotEncoder(handle_unknown=handle_unknown) + oh.fit(X) + X2_passed = X2.copy() + assert_array_equal( + oh.transform(X2_passed).toarray(), + np.array([[0.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0]]), + ) + # ensure transformed data was not modified in place + assert_array_equal(X2, X2_passed) + + +@pytest.mark.parametrize("output_dtype", [np.int32, np.float32, np.float64]) +@pytest.mark.parametrize("input_dtype", [np.int32, np.float32, np.float64]) +def test_one_hot_encoder_dtype(input_dtype, output_dtype): + X = np.asarray([[0, 1]], dtype=input_dtype).T + X_expected = np.asarray([[1, 0], [0, 1]], dtype=output_dtype) + + oh = OneHotEncoder(categories="auto", dtype=output_dtype) + assert_array_equal(oh.fit_transform(X).toarray(), X_expected) + assert_array_equal(oh.fit(X).transform(X).toarray(), X_expected) + + oh = OneHotEncoder(categories="auto", dtype=output_dtype, sparse_output=False) + assert_array_equal(oh.fit_transform(X), X_expected) + assert_array_equal(oh.fit(X).transform(X), X_expected) + + +@pytest.mark.parametrize("output_dtype", [np.int32, np.float32, np.float64]) +def test_one_hot_encoder_dtype_pandas(output_dtype): + pd = pytest.importorskip("pandas") + + X_df = pd.DataFrame({"A": ["a", "b"], "B": [1, 2]}) + X_expected = np.array([[1, 0, 1, 0], [0, 1, 0, 1]], dtype=output_dtype) + + oh = OneHotEncoder(dtype=output_dtype) + assert_array_equal(oh.fit_transform(X_df).toarray(), X_expected) + assert_array_equal(oh.fit(X_df).transform(X_df).toarray(), X_expected) + + oh = OneHotEncoder(dtype=output_dtype, sparse_output=False) + assert_array_equal(oh.fit_transform(X_df), X_expected) + assert_array_equal(oh.fit(X_df).transform(X_df), X_expected) + + +def test_one_hot_encoder_feature_names(): + enc = OneHotEncoder() + X = [ + ["Male", 1, "girl", 2, 3], + ["Female", 41, "girl", 1, 10], + ["Male", 51, "boy", 12, 3], + ["Male", 91, "girl", 21, 30], + ] + + enc.fit(X) + feature_names = enc.get_feature_names_out() + + assert_array_equal( + [ + "x0_Female", + "x0_Male", + "x1_1", + "x1_41", + "x1_51", + "x1_91", + "x2_boy", + "x2_girl", + "x3_1", + "x3_2", + "x3_12", + "x3_21", + "x4_3", + "x4_10", + "x4_30", + ], + feature_names, + ) + + feature_names2 = enc.get_feature_names_out(["one", "two", "three", "four", "five"]) + + assert_array_equal( + [ + "one_Female", + "one_Male", + "two_1", + "two_41", + "two_51", + "two_91", + "three_boy", + "three_girl", + "four_1", + "four_2", + "four_12", + "four_21", + "five_3", + "five_10", + "five_30", + ], + feature_names2, + ) + + with pytest.raises(ValueError, match="input_features should have length"): + enc.get_feature_names_out(["one", "two"]) + + +def test_one_hot_encoder_feature_names_unicode(): + enc = OneHotEncoder() + X = np.array([["c❤t1", "dat2"]], dtype=object).T + enc.fit(X) + feature_names = enc.get_feature_names_out() + assert_array_equal(["x0_c❤t1", "x0_dat2"], feature_names) + feature_names = enc.get_feature_names_out(input_features=["n👍me"]) + assert_array_equal(["n👍me_c❤t1", "n👍me_dat2"], feature_names) + + +def test_one_hot_encoder_custom_feature_name_combiner(): + """Check the behaviour of `feature_name_combiner` as a callable.""" + + def name_combiner(feature, category): + return feature + "_" + repr(category) + + enc = OneHotEncoder(feature_name_combiner=name_combiner) + X = np.array([["None", None]], dtype=object).T + enc.fit(X) + feature_names = enc.get_feature_names_out() + assert_array_equal(["x0_'None'", "x0_None"], feature_names) + feature_names = enc.get_feature_names_out(input_features=["a"]) + assert_array_equal(["a_'None'", "a_None"], feature_names) + + def wrong_combiner(feature, category): + # we should be returning a Python string + return 0 + + enc = OneHotEncoder(feature_name_combiner=wrong_combiner).fit(X) + err_msg = ( + "When `feature_name_combiner` is a callable, it should return a Python string." + ) + with pytest.raises(TypeError, match=err_msg): + enc.get_feature_names_out() + + +def test_one_hot_encoder_set_params(): + X = np.array([[1, 2]]).T + oh = OneHotEncoder() + # set params on not yet fitted object + oh.set_params(categories=[[0, 1, 2, 3]]) + assert oh.get_params()["categories"] == [[0, 1, 2, 3]] + assert oh.fit_transform(X).toarray().shape == (2, 4) + # set params on already fitted object + oh.set_params(categories=[[0, 1, 2, 3, 4]]) + assert oh.fit_transform(X).toarray().shape == (2, 5) + + +def check_categorical_onehot(X): + enc = OneHotEncoder(categories="auto") + Xtr1 = enc.fit_transform(X) + + enc = OneHotEncoder(categories="auto", sparse_output=False) + Xtr2 = enc.fit_transform(X) + + assert_allclose(Xtr1.toarray(), Xtr2) + + assert sparse.issparse(Xtr1) and Xtr1.format == "csr" + return Xtr1.toarray() + + +@pytest.mark.parametrize( + "X", + [ + [["def", 1, 55], ["abc", 2, 55]], + np.array([[10, 1, 55], [5, 2, 55]]), + np.array([["b", "A", "cat"], ["a", "B", "cat"]], dtype=object), + np.array([["b", 1, "cat"], ["a", np.nan, "cat"]], dtype=object), + np.array([["b", 1, "cat"], ["a", float("nan"), "cat"]], dtype=object), + np.array([[None, 1, "cat"], ["a", 2, "cat"]], dtype=object), + np.array([[None, 1, None], ["a", np.nan, None]], dtype=object), + np.array([[None, 1, None], ["a", float("nan"), None]], dtype=object), + ], + ids=[ + "mixed", + "numeric", + "object", + "mixed-nan", + "mixed-float-nan", + "mixed-None", + "mixed-None-nan", + "mixed-None-float-nan", + ], +) +def test_one_hot_encoder(X): + Xtr = check_categorical_onehot(np.array(X)[:, [0]]) + assert_allclose(Xtr, [[0, 1], [1, 0]]) + + Xtr = check_categorical_onehot(np.array(X)[:, [0, 1]]) + assert_allclose(Xtr, [[0, 1, 1, 0], [1, 0, 0, 1]]) + + Xtr = OneHotEncoder(categories="auto").fit_transform(X) + assert_allclose(Xtr.toarray(), [[0, 1, 1, 0, 1], [1, 0, 0, 1, 1]]) + + +@pytest.mark.parametrize("handle_unknown", ["ignore", "infrequent_if_exist"]) +@pytest.mark.parametrize("sparse_", [False, True]) +@pytest.mark.parametrize("drop", [None, "first"]) +def test_one_hot_encoder_inverse(handle_unknown, sparse_, drop): + X = [["abc", 2, 55], ["def", 1, 55], ["abc", 3, 55]] + enc = OneHotEncoder(sparse_output=sparse_, drop=drop) + X_tr = enc.fit_transform(X) + exp = np.array(X, dtype=object) + assert_array_equal(enc.inverse_transform(X_tr), exp) + + X = [[2, 55], [1, 55], [3, 55]] + enc = OneHotEncoder(sparse_output=sparse_, categories="auto", drop=drop) + X_tr = enc.fit_transform(X) + exp = np.array(X) + assert_array_equal(enc.inverse_transform(X_tr), exp) + + if drop is None: + # with unknown categories + # drop is incompatible with handle_unknown=ignore + X = [["abc", 2, 55], ["def", 1, 55], ["abc", 3, 55]] + enc = OneHotEncoder( + sparse_output=sparse_, + handle_unknown=handle_unknown, + categories=[["abc", "def"], [1, 2], [54, 55, 56]], + ) + X_tr = enc.fit_transform(X) + exp = np.array(X, dtype=object) + exp[2, 1] = None + assert_array_equal(enc.inverse_transform(X_tr), exp) + + # with an otherwise numerical output, still object if unknown + X = [[2, 55], [1, 55], [3, 55]] + enc = OneHotEncoder( + sparse_output=sparse_, + categories=[[1, 2], [54, 56]], + handle_unknown=handle_unknown, + ) + X_tr = enc.fit_transform(X) + exp = np.array(X, dtype=object) + exp[2, 0] = None + exp[:, 1] = None + assert_array_equal(enc.inverse_transform(X_tr), exp) + + # incorrect shape raises + X_tr = np.array([[0, 1, 1], [1, 0, 1]]) + msg = re.escape("Shape of the passed X data is not correct") + with pytest.raises(ValueError, match=msg): + enc.inverse_transform(X_tr) + + +@pytest.mark.parametrize("sparse_", [False, True]) +@pytest.mark.parametrize( + "X, X_trans", + [ + ([[2, 55], [1, 55], [2, 55]], [[0, 1, 1], [0, 0, 0], [0, 1, 1]]), + ( + [["one", "a"], ["two", "a"], ["three", "b"], ["two", "a"]], + [[0, 0, 0, 0, 0], [0, 0, 0, 0, 1], [0, 1, 0, 0, 0]], + ), + ], +) +def test_one_hot_encoder_inverse_transform_raise_error_with_unknown( + X, X_trans, sparse_ +): + """Check that `inverse_transform` raise an error with unknown samples, no + dropped feature, and `handle_unknow="error`. + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/14934 + """ + enc = OneHotEncoder(sparse_output=sparse_).fit(X) + msg = ( + r"Samples \[(\d )*\d\] can not be inverted when drop=None and " + r"handle_unknown='error' because they contain all zeros" + ) + + if sparse_: + # emulate sparse data transform by a one-hot encoder sparse. + X_trans = _convert_container(X_trans, "sparse") + with pytest.raises(ValueError, match=msg): + enc.inverse_transform(X_trans) + + +def test_one_hot_encoder_inverse_if_binary(): + X = np.array([["Male", 1], ["Female", 3], ["Female", 2]], dtype=object) + ohe = OneHotEncoder(drop="if_binary", sparse_output=False) + X_tr = ohe.fit_transform(X) + assert_array_equal(ohe.inverse_transform(X_tr), X) + + +@pytest.mark.parametrize("drop", ["if_binary", "first", None]) +@pytest.mark.parametrize("reset_drop", ["if_binary", "first", None]) +def test_one_hot_encoder_drop_reset(drop, reset_drop): + # check that resetting drop option without refitting does not throw an error + X = np.array([["Male", 1], ["Female", 3], ["Female", 2]], dtype=object) + ohe = OneHotEncoder(drop=drop, sparse_output=False) + ohe.fit(X) + X_tr = ohe.transform(X) + feature_names = ohe.get_feature_names_out() + ohe.set_params(drop=reset_drop) + assert_array_equal(ohe.inverse_transform(X_tr), X) + assert_allclose(ohe.transform(X), X_tr) + assert_array_equal(ohe.get_feature_names_out(), feature_names) + + +@pytest.mark.parametrize("method", ["fit", "fit_transform"]) +@pytest.mark.parametrize("X", [[1, 2], np.array([3.0, 4.0])]) +def test_X_is_not_1D(X, method): + oh = OneHotEncoder() + + msg = "Expected 2D array, got 1D array instead" + with pytest.raises(ValueError, match=msg): + getattr(oh, method)(X) + + +@pytest.mark.parametrize("method", ["fit", "fit_transform"]) +def test_X_is_not_1D_pandas(method): + pd = pytest.importorskip("pandas") + X = pd.Series([6, 3, 4, 6]) + oh = OneHotEncoder() + + msg = f"Expected a 2-dimensional container but got {type(X)} instead." + with pytest.raises(ValueError, match=msg): + getattr(oh, method)(X) + + +@pytest.mark.parametrize( + "X, cat_exp, cat_dtype", + [ + ([["abc", 55], ["def", 55]], [["abc", "def"], [55]], np.object_), + (np.array([[1, 2], [3, 2]]), [[1, 3], [2]], np.integer), + ( + np.array([["A", "cat"], ["B", "cat"]], dtype=object), + [["A", "B"], ["cat"]], + np.object_, + ), + (np.array([["A", "cat"], ["B", "cat"]]), [["A", "B"], ["cat"]], np.str_), + (np.array([[1, 2], [np.nan, 2]]), [[1, np.nan], [2]], np.float64), + ( + np.array([["A", np.nan], [None, np.nan]], dtype=object), + [["A", None], [np.nan]], + np.object_, + ), + ( + np.array([["A", float("nan")], [None, float("nan")]], dtype=object), + [["A", None], [float("nan")]], + np.object_, + ), + ], + ids=[ + "mixed", + "numeric", + "object", + "string", + "missing-float", + "missing-np.nan-object", + "missing-float-nan-object", + ], +) +def test_one_hot_encoder_categories(X, cat_exp, cat_dtype): + # order of categories should not depend on order of samples + for Xi in [X, X[::-1]]: + enc = OneHotEncoder(categories="auto") + enc.fit(Xi) + # assert enc.categories == 'auto' + assert isinstance(enc.categories_, list) + for res, exp in zip(enc.categories_, cat_exp): + res_list = res.tolist() + if is_scalar_nan(exp[-1]): + assert is_scalar_nan(res_list[-1]) + assert res_list[:-1] == exp[:-1] + else: + assert res.tolist() == exp + assert np.issubdtype(res.dtype, cat_dtype) + + +@pytest.mark.parametrize("handle_unknown", ["ignore", "infrequent_if_exist"]) +@pytest.mark.parametrize( + "X, X2, cats, cat_dtype", + [ + ( + np.array([["a", "b"]], dtype=object).T, + np.array([["a", "d"]], dtype=object).T, + [["a", "b", "c"]], + np.object_, + ), + ( + np.array([[1, 2]], dtype="int64").T, + np.array([[1, 4]], dtype="int64").T, + [[1, 2, 3]], + np.int64, + ), + ( + np.array([["a", "b"]], dtype=object).T, + np.array([["a", "d"]], dtype=object).T, + [np.array(["a", "b", "c"])], + np.object_, + ), + ( + np.array([[None, "a"]], dtype=object).T, + np.array([[None, "b"]], dtype=object).T, + [[None, "a", "z"]], + object, + ), + ( + np.array([["a", "b"]], dtype=object).T, + np.array([["a", np.nan]], dtype=object).T, + [["a", "b", "z"]], + object, + ), + ( + np.array([["a", None]], dtype=object).T, + np.array([["a", np.nan]], dtype=object).T, + [["a", None, "z"]], + object, + ), + ], + ids=[ + "object", + "numeric", + "object-string", + "object-string-none", + "object-string-nan", + "object-None-and-nan", + ], +) +def test_one_hot_encoder_specified_categories(X, X2, cats, cat_dtype, handle_unknown): + enc = OneHotEncoder(categories=cats) + exp = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0]]) + assert_array_equal(enc.fit_transform(X).toarray(), exp) + assert list(enc.categories[0]) == list(cats[0]) + assert enc.categories_[0].tolist() == list(cats[0]) + # manually specified categories should have same dtype as + # the data when coerced from lists + assert enc.categories_[0].dtype == cat_dtype + + # when specifying categories manually, unknown categories should already + # raise when fitting + enc = OneHotEncoder(categories=cats) + with pytest.raises(ValueError, match="Found unknown categories"): + enc.fit(X2) + enc = OneHotEncoder(categories=cats, handle_unknown=handle_unknown) + exp = np.array([[1.0, 0.0, 0.0], [0.0, 0.0, 0.0]]) + assert_array_equal(enc.fit(X2).transform(X2).toarray(), exp) + + +def test_one_hot_encoder_unsorted_categories(): + X = np.array([["a", "b"]], dtype=object).T + + enc = OneHotEncoder(categories=[["b", "a", "c"]]) + exp = np.array([[0.0, 1.0, 0.0], [1.0, 0.0, 0.0]]) + assert_array_equal(enc.fit(X).transform(X).toarray(), exp) + assert_array_equal(enc.fit_transform(X).toarray(), exp) + assert enc.categories_[0].tolist() == ["b", "a", "c"] + assert np.issubdtype(enc.categories_[0].dtype, np.object_) + + # unsorted passed categories still raise for numerical values + X = np.array([[1, 2]]).T + enc = OneHotEncoder(categories=[[2, 1, 3]]) + msg = "Unsorted categories are not supported" + with pytest.raises(ValueError, match=msg): + enc.fit_transform(X) + + +@pytest.mark.parametrize("Encoder", [OneHotEncoder, OrdinalEncoder]) +def test_encoder_nan_ending_specified_categories(Encoder): + """Test encoder for specified categories that nan is at the end. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/27088 + """ + cats = [np.array([0, np.nan, 1])] + enc = Encoder(categories=cats) + X = np.array([[0, 1]], dtype=object).T + with pytest.raises(ValueError, match="Nan should be the last element"): + enc.fit(X) + + +def test_one_hot_encoder_specified_categories_mixed_columns(): + # multiple columns + X = np.array([["a", "b"], [0, 2]], dtype=object).T + enc = OneHotEncoder(categories=[["a", "b", "c"], [0, 1, 2]]) + exp = np.array([[1.0, 0.0, 0.0, 1.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0, 0.0, 1.0]]) + assert_array_equal(enc.fit_transform(X).toarray(), exp) + assert enc.categories_[0].tolist() == ["a", "b", "c"] + assert np.issubdtype(enc.categories_[0].dtype, np.object_) + assert enc.categories_[1].tolist() == [0, 1, 2] + # integer categories but from object dtype data + assert np.issubdtype(enc.categories_[1].dtype, np.object_) + + +def test_one_hot_encoder_pandas(): + pd = pytest.importorskip("pandas") + + X_df = pd.DataFrame({"A": ["a", "b"], "B": [1, 2]}) + + Xtr = check_categorical_onehot(X_df) + assert_allclose(Xtr, [[1, 0, 1, 0], [0, 1, 0, 1]]) + + +@pytest.mark.parametrize( + "drop, expected_names", + [ + ("first", ["x0_c", "x2_b"]), + ("if_binary", ["x0_c", "x1_2", "x2_b"]), + (["c", 2, "b"], ["x0_b", "x2_a"]), + ], + ids=["first", "binary", "manual"], +) +def test_one_hot_encoder_feature_names_drop(drop, expected_names): + X = [["c", 2, "a"], ["b", 2, "b"]] + + ohe = OneHotEncoder(drop=drop) + ohe.fit(X) + feature_names = ohe.get_feature_names_out() + assert_array_equal(expected_names, feature_names) + + +def test_one_hot_encoder_drop_equals_if_binary(): + # Canonical case + X = [[10, "yes"], [20, "no"], [30, "yes"]] + expected = np.array( + [[1.0, 0.0, 0.0, 1.0], [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 1.0]] + ) + expected_drop_idx = np.array([None, 0]) + + ohe = OneHotEncoder(drop="if_binary", sparse_output=False) + result = ohe.fit_transform(X) + assert_array_equal(ohe.drop_idx_, expected_drop_idx) + assert_allclose(result, expected) + + # with only one cat, the behaviour is equivalent to drop=None + X = [["true", "a"], ["false", "a"], ["false", "a"]] + expected = np.array([[1.0, 1.0], [0.0, 1.0], [0.0, 1.0]]) + expected_drop_idx = np.array([0, None]) + + ohe = OneHotEncoder(drop="if_binary", sparse_output=False) + result = ohe.fit_transform(X) + assert_array_equal(ohe.drop_idx_, expected_drop_idx) + assert_allclose(result, expected) + + +@pytest.mark.parametrize( + "X", + [ + [["abc", 2, 55], ["def", 1, 55]], + np.array([[10, 2, 55], [20, 1, 55]]), + np.array([["a", "B", "cat"], ["b", "A", "cat"]], dtype=object), + ], + ids=["mixed", "numeric", "object"], +) +def test_ordinal_encoder(X): + enc = OrdinalEncoder() + exp = np.array([[0, 1, 0], [1, 0, 0]], dtype="int64") + assert_array_equal(enc.fit_transform(X), exp.astype("float64")) + enc = OrdinalEncoder(dtype="int64") + assert_array_equal(enc.fit_transform(X), exp) + + +@pytest.mark.parametrize( + "X, X2, cats, cat_dtype", + [ + ( + np.array([["a", "b"]], dtype=object).T, + np.array([["a", "d"]], dtype=object).T, + [["a", "b", "c"]], + np.object_, + ), + ( + np.array([[1, 2]], dtype="int64").T, + np.array([[1, 4]], dtype="int64").T, + [[1, 2, 3]], + np.int64, + ), + ( + np.array([["a", "b"]], dtype=object).T, + np.array([["a", "d"]], dtype=object).T, + [np.array(["a", "b", "c"])], + np.object_, + ), + ], + ids=["object", "numeric", "object-string-cat"], +) +def test_ordinal_encoder_specified_categories(X, X2, cats, cat_dtype): + enc = OrdinalEncoder(categories=cats) + exp = np.array([[0.0], [1.0]]) + assert_array_equal(enc.fit_transform(X), exp) + assert list(enc.categories[0]) == list(cats[0]) + assert enc.categories_[0].tolist() == list(cats[0]) + # manually specified categories should have same dtype as + # the data when coerced from lists + assert enc.categories_[0].dtype == cat_dtype + + # when specifying categories manually, unknown categories should already + # raise when fitting + enc = OrdinalEncoder(categories=cats) + with pytest.raises(ValueError, match="Found unknown categories"): + enc.fit(X2) + + +def test_ordinal_encoder_inverse(): + X = [["abc", 2, 55], ["def", 1, 55]] + enc = OrdinalEncoder() + X_tr = enc.fit_transform(X) + exp = np.array(X, dtype=object) + assert_array_equal(enc.inverse_transform(X_tr), exp) + + # incorrect shape raises + X_tr = np.array([[0, 1, 1, 2], [1, 0, 1, 0]]) + msg = re.escape("Shape of the passed X data is not correct") + with pytest.raises(ValueError, match=msg): + enc.inverse_transform(X_tr) + + +def test_ordinal_encoder_handle_unknowns_string(): + enc = OrdinalEncoder(handle_unknown="use_encoded_value", unknown_value=-2) + X_fit = np.array([["a", "x"], ["b", "y"], ["c", "z"]], dtype=object) + X_trans = np.array([["c", "xy"], ["bla", "y"], ["a", "x"]], dtype=object) + enc.fit(X_fit) + + X_trans_enc = enc.transform(X_trans) + exp = np.array([[2, -2], [-2, 1], [0, 0]], dtype="int64") + assert_array_equal(X_trans_enc, exp) + + X_trans_inv = enc.inverse_transform(X_trans_enc) + inv_exp = np.array([["c", None], [None, "y"], ["a", "x"]], dtype=object) + assert_array_equal(X_trans_inv, inv_exp) + + +@pytest.mark.parametrize("dtype", [float, int]) +def test_ordinal_encoder_handle_unknowns_numeric(dtype): + enc = OrdinalEncoder(handle_unknown="use_encoded_value", unknown_value=-999) + X_fit = np.array([[1, 7], [2, 8], [3, 9]], dtype=dtype) + X_trans = np.array([[3, 12], [23, 8], [1, 7]], dtype=dtype) + enc.fit(X_fit) + + X_trans_enc = enc.transform(X_trans) + exp = np.array([[2, -999], [-999, 1], [0, 0]], dtype="int64") + assert_array_equal(X_trans_enc, exp) + + X_trans_inv = enc.inverse_transform(X_trans_enc) + inv_exp = np.array([[3, None], [None, 8], [1, 7]], dtype=object) + assert_array_equal(X_trans_inv, inv_exp) + + +def test_ordinal_encoder_handle_unknowns_nan(): + # Make sure unknown_value=np.nan properly works + + enc = OrdinalEncoder(handle_unknown="use_encoded_value", unknown_value=np.nan) + + X_fit = np.array([[1], [2], [3]]) + enc.fit(X_fit) + X_trans = enc.transform([[1], [2], [4]]) + assert_array_equal(X_trans, [[0], [1], [np.nan]]) + + +def test_ordinal_encoder_handle_unknowns_nan_non_float_dtype(): + # Make sure an error is raised when unknown_value=np.nan and the dtype + # isn't a float dtype + enc = OrdinalEncoder( + handle_unknown="use_encoded_value", unknown_value=np.nan, dtype=int + ) + + X_fit = np.array([[1], [2], [3]]) + with pytest.raises(ValueError, match="dtype parameter should be a float dtype"): + enc.fit(X_fit) + + +def test_ordinal_encoder_raise_categories_shape(): + X = np.array([["Low", "Medium", "High", "Medium", "Low"]], dtype=object).T + cats = ["Low", "Medium", "High"] + enc = OrdinalEncoder(categories=cats) + msg = "Shape mismatch: if categories is an array," + + with pytest.raises(ValueError, match=msg): + enc.fit(X) + + +def test_encoder_dtypes(): + # check that dtypes are preserved when determining categories + enc = OneHotEncoder(categories="auto") + exp = np.array([[1.0, 0.0, 1.0, 0.0], [0.0, 1.0, 0.0, 1.0]], dtype="float64") + + for X in [ + np.array([[1, 2], [3, 4]], dtype="int64"), + np.array([[1, 2], [3, 4]], dtype="float64"), + np.array([["a", "b"], ["c", "d"]]), # str dtype + np.array([[b"a", b"b"], [b"c", b"d"]]), # bytes dtype + np.array([[1, "a"], [3, "b"]], dtype="object"), + ]: + enc.fit(X) + assert all([enc.categories_[i].dtype == X.dtype for i in range(2)]) + assert_array_equal(enc.transform(X).toarray(), exp) + + X = [[1, 2], [3, 4]] + enc.fit(X) + assert all([np.issubdtype(enc.categories_[i].dtype, np.integer) for i in range(2)]) + assert_array_equal(enc.transform(X).toarray(), exp) + + X = [[1, "a"], [3, "b"]] + enc.fit(X) + assert all([enc.categories_[i].dtype == "object" for i in range(2)]) + assert_array_equal(enc.transform(X).toarray(), exp) + + +def test_encoder_dtypes_pandas(): + # check dtype (similar to test_categorical_encoder_dtypes for dataframes) + pd = pytest.importorskip("pandas") + + enc = OneHotEncoder(categories="auto") + exp = np.array( + [[1.0, 0.0, 1.0, 0.0, 1.0, 0.0], [0.0, 1.0, 0.0, 1.0, 0.0, 1.0]], + dtype="float64", + ) + + X = pd.DataFrame({"A": [1, 2], "B": [3, 4], "C": [5, 6]}, dtype="int64") + enc.fit(X) + assert all([enc.categories_[i].dtype == "int64" for i in range(2)]) + assert_array_equal(enc.transform(X).toarray(), exp) + + X = pd.DataFrame({"A": [1, 2], "B": ["a", "b"], "C": [3.0, 4.0]}) + X_type = [X["A"].dtype, X["B"].dtype, X["C"].dtype] + enc.fit(X) + assert all([enc.categories_[i].dtype == X_type[i] for i in range(3)]) + assert_array_equal(enc.transform(X).toarray(), exp) + + +def test_one_hot_encoder_warning(): + enc = OneHotEncoder() + X = [["Male", 1], ["Female", 3]] + np.testing.assert_no_warnings(enc.fit_transform, X) + + +@pytest.mark.parametrize("missing_value", [np.nan, None, float("nan")]) +def test_one_hot_encoder_drop_manual(missing_value): + cats_to_drop = ["def", 12, 3, 56, missing_value] + enc = OneHotEncoder(drop=cats_to_drop) + X = [ + ["abc", 12, 2, 55, "a"], + ["def", 12, 1, 55, "a"], + ["def", 12, 3, 56, missing_value], + ] + trans = enc.fit_transform(X).toarray() + exp = [[1, 0, 1, 1, 1], [0, 1, 0, 1, 1], [0, 0, 0, 0, 0]] + assert_array_equal(trans, exp) + assert enc.drop is cats_to_drop + + dropped_cats = [ + cat[feature] for cat, feature in zip(enc.categories_, enc.drop_idx_) + ] + X_inv_trans = enc.inverse_transform(trans) + X_array = np.array(X, dtype=object) + + # last value is np.nan + if is_scalar_nan(cats_to_drop[-1]): + assert_array_equal(dropped_cats[:-1], cats_to_drop[:-1]) + assert is_scalar_nan(dropped_cats[-1]) + assert is_scalar_nan(cats_to_drop[-1]) + # do not include the last column which includes missing values + assert_array_equal(X_array[:, :-1], X_inv_trans[:, :-1]) + + # check last column is the missing value + assert_array_equal(X_array[-1, :-1], X_inv_trans[-1, :-1]) + assert is_scalar_nan(X_array[-1, -1]) + assert is_scalar_nan(X_inv_trans[-1, -1]) + else: + assert_array_equal(dropped_cats, cats_to_drop) + assert_array_equal(X_array, X_inv_trans) + + +@pytest.mark.parametrize("drop", [["abc", 3], ["abc", 3, 41, "a"]]) +def test_invalid_drop_length(drop): + enc = OneHotEncoder(drop=drop) + err_msg = "`drop` should have length equal to the number" + with pytest.raises(ValueError, match=err_msg): + enc.fit([["abc", 2, 55], ["def", 1, 55], ["def", 3, 59]]) + + +@pytest.mark.parametrize("density", [True, False], ids=["sparse", "dense"]) +@pytest.mark.parametrize("drop", ["first", ["a", 2, "b"]], ids=["first", "manual"]) +def test_categories(density, drop): + ohe_base = OneHotEncoder(sparse_output=density) + ohe_test = OneHotEncoder(sparse_output=density, drop=drop) + X = [["c", 1, "a"], ["a", 2, "b"]] + ohe_base.fit(X) + ohe_test.fit(X) + assert_array_equal(ohe_base.categories_, ohe_test.categories_) + if drop == "first": + assert_array_equal(ohe_test.drop_idx_, 0) + else: + for drop_cat, drop_idx, cat_list in zip( + drop, ohe_test.drop_idx_, ohe_test.categories_ + ): + assert cat_list[int(drop_idx)] == drop_cat + assert isinstance(ohe_test.drop_idx_, np.ndarray) + assert ohe_test.drop_idx_.dtype == object + + +@pytest.mark.parametrize("Encoder", [OneHotEncoder, OrdinalEncoder]) +def test_encoders_has_categorical_tags(Encoder): + assert "categorical" in Encoder()._get_tags()["X_types"] + + +@pytest.mark.parametrize( + "kwargs", + [ + {"max_categories": 2}, + {"min_frequency": 11}, + {"min_frequency": 0.29}, + {"max_categories": 2, "min_frequency": 6}, + {"max_categories": 4, "min_frequency": 12}, + ], +) +@pytest.mark.parametrize("categories", ["auto", [["a", "b", "c", "d"]]]) +def test_ohe_infrequent_two_levels(kwargs, categories): + """Test that different parameters for combine 'a', 'c', and 'd' into + the infrequent category works as expected.""" + + X_train = np.array([["a"] * 5 + ["b"] * 20 + ["c"] * 10 + ["d"] * 3]).T + ohe = OneHotEncoder( + categories=categories, + handle_unknown="infrequent_if_exist", + sparse_output=False, + **kwargs, + ).fit(X_train) + assert_array_equal(ohe.infrequent_categories_, [["a", "c", "d"]]) + + X_test = [["b"], ["a"], ["c"], ["d"], ["e"]] + expected = np.array([[1, 0], [0, 1], [0, 1], [0, 1], [0, 1]]) + + X_trans = ohe.transform(X_test) + assert_allclose(expected, X_trans) + + expected_inv = [[col] for col in ["b"] + ["infrequent_sklearn"] * 4] + X_inv = ohe.inverse_transform(X_trans) + assert_array_equal(expected_inv, X_inv) + + feature_names = ohe.get_feature_names_out() + assert_array_equal(["x0_b", "x0_infrequent_sklearn"], feature_names) + + +@pytest.mark.parametrize("drop", ["if_binary", "first", ["b"]]) +def test_ohe_infrequent_two_levels_drop_frequent(drop): + """Test two levels and dropping the frequent category.""" + + X_train = np.array([["a"] * 5 + ["b"] * 20 + ["c"] * 10 + ["d"] * 3]).T + ohe = OneHotEncoder( + handle_unknown="infrequent_if_exist", + sparse_output=False, + max_categories=2, + drop=drop, + ).fit(X_train) + assert ohe.categories_[0][ohe.drop_idx_[0]] == "b" + + X_test = np.array([["b"], ["c"]]) + X_trans = ohe.transform(X_test) + assert_allclose([[0], [1]], X_trans) + + feature_names = ohe.get_feature_names_out() + assert_array_equal(["x0_infrequent_sklearn"], feature_names) + + X_inverse = ohe.inverse_transform(X_trans) + assert_array_equal([["b"], ["infrequent_sklearn"]], X_inverse) + + +@pytest.mark.parametrize("drop", [["a"], ["d"]]) +def test_ohe_infrequent_two_levels_drop_infrequent_errors(drop): + """Test two levels and dropping any infrequent category removes the + whole infrequent category.""" + + X_train = np.array([["a"] * 5 + ["b"] * 20 + ["c"] * 10 + ["d"] * 3]).T + ohe = OneHotEncoder( + handle_unknown="infrequent_if_exist", + sparse_output=False, + max_categories=2, + drop=drop, + ) + + msg = f"Unable to drop category {drop[0]!r} from feature 0 because it is infrequent" + with pytest.raises(ValueError, match=msg): + ohe.fit(X_train) + + +@pytest.mark.parametrize( + "kwargs", + [ + {"max_categories": 3}, + {"min_frequency": 6}, + {"min_frequency": 9}, + {"min_frequency": 0.24}, + {"min_frequency": 0.16}, + {"max_categories": 3, "min_frequency": 8}, + {"max_categories": 4, "min_frequency": 6}, + ], +) +def test_ohe_infrequent_three_levels(kwargs): + """Test that different parameters for combing 'a', and 'd' into + the infrequent category works as expected.""" + + X_train = np.array([["a"] * 5 + ["b"] * 20 + ["c"] * 10 + ["d"] * 3]).T + ohe = OneHotEncoder( + handle_unknown="infrequent_if_exist", sparse_output=False, **kwargs + ).fit(X_train) + assert_array_equal(ohe.infrequent_categories_, [["a", "d"]]) + + X_test = [["b"], ["a"], ["c"], ["d"], ["e"]] + expected = np.array([[1, 0, 0], [0, 0, 1], [0, 1, 0], [0, 0, 1], [0, 0, 1]]) + + X_trans = ohe.transform(X_test) + assert_allclose(expected, X_trans) + + expected_inv = [ + ["b"], + ["infrequent_sklearn"], + ["c"], + ["infrequent_sklearn"], + ["infrequent_sklearn"], + ] + X_inv = ohe.inverse_transform(X_trans) + assert_array_equal(expected_inv, X_inv) + + feature_names = ohe.get_feature_names_out() + assert_array_equal(["x0_b", "x0_c", "x0_infrequent_sklearn"], feature_names) + + +@pytest.mark.parametrize("drop", ["first", ["b"]]) +def test_ohe_infrequent_three_levels_drop_frequent(drop): + """Test three levels and dropping the frequent category.""" + + X_train = np.array([["a"] * 5 + ["b"] * 20 + ["c"] * 10 + ["d"] * 3]).T + ohe = OneHotEncoder( + handle_unknown="infrequent_if_exist", + sparse_output=False, + max_categories=3, + drop=drop, + ).fit(X_train) + + X_test = np.array([["b"], ["c"], ["d"]]) + assert_allclose([[0, 0], [1, 0], [0, 1]], ohe.transform(X_test)) + + # Check handle_unknown="ignore" + ohe.set_params(handle_unknown="ignore").fit(X_train) + msg = "Found unknown categories" + with pytest.warns(UserWarning, match=msg): + X_trans = ohe.transform([["b"], ["e"]]) + + assert_allclose([[0, 0], [0, 0]], X_trans) + + +@pytest.mark.parametrize("drop", [["a"], ["d"]]) +def test_ohe_infrequent_three_levels_drop_infrequent_errors(drop): + """Test three levels and dropping the infrequent category.""" + X_train = np.array([["a"] * 5 + ["b"] * 20 + ["c"] * 10 + ["d"] * 3]).T + ohe = OneHotEncoder( + handle_unknown="infrequent_if_exist", + sparse_output=False, + max_categories=3, + drop=drop, + ) + + msg = f"Unable to drop category {drop[0]!r} from feature 0 because it is infrequent" + with pytest.raises(ValueError, match=msg): + ohe.fit(X_train) + + +def test_ohe_infrequent_handle_unknown_error(): + """Test that different parameters for combining 'a', and 'd' into + the infrequent category works as expected.""" + + X_train = np.array([["a"] * 5 + ["b"] * 20 + ["c"] * 10 + ["d"] * 3]).T + ohe = OneHotEncoder( + handle_unknown="error", sparse_output=False, max_categories=3 + ).fit(X_train) + assert_array_equal(ohe.infrequent_categories_, [["a", "d"]]) + + # all categories are known + X_test = [["b"], ["a"], ["c"], ["d"]] + expected = np.array([[1, 0, 0], [0, 0, 1], [0, 1, 0], [0, 0, 1]]) + + X_trans = ohe.transform(X_test) + assert_allclose(expected, X_trans) + + # 'bad' is not known and will error + X_test = [["bad"]] + msg = r"Found unknown categories \['bad'\] in column 0" + with pytest.raises(ValueError, match=msg): + ohe.transform(X_test) + + +@pytest.mark.parametrize( + "kwargs", [{"max_categories": 3, "min_frequency": 1}, {"min_frequency": 4}] +) +def test_ohe_infrequent_two_levels_user_cats_one_frequent(kwargs): + """'a' is the only frequent category, all other categories are infrequent.""" + + X_train = np.array([["a"] * 5 + ["e"] * 30], dtype=object).T + ohe = OneHotEncoder( + categories=[["c", "d", "a", "b"]], + sparse_output=False, + handle_unknown="infrequent_if_exist", + **kwargs, + ).fit(X_train) + + X_test = [["a"], ["b"], ["c"], ["d"], ["e"]] + expected = np.array([[1, 0], [0, 1], [0, 1], [0, 1], [0, 1]]) + + X_trans = ohe.transform(X_test) + assert_allclose(expected, X_trans) + + # 'a' is dropped + drops = ["first", "if_binary", ["a"]] + X_test = [["a"], ["c"]] + for drop in drops: + ohe.set_params(drop=drop).fit(X_train) + assert_allclose([[0], [1]], ohe.transform(X_test)) + + +def test_ohe_infrequent_two_levels_user_cats(): + """Test that the order of the categories provided by a user is respected.""" + X_train = np.array( + [["a"] * 5 + ["b"] * 20 + ["c"] * 10 + ["d"] * 3], dtype=object + ).T + ohe = OneHotEncoder( + categories=[["c", "d", "a", "b"]], + sparse_output=False, + handle_unknown="infrequent_if_exist", + max_categories=2, + ).fit(X_train) + + assert_array_equal(ohe.infrequent_categories_, [["c", "d", "a"]]) + + X_test = [["b"], ["a"], ["c"], ["d"], ["e"]] + expected = np.array([[1, 0], [0, 1], [0, 1], [0, 1], [0, 1]]) + + X_trans = ohe.transform(X_test) + assert_allclose(expected, X_trans) + + # 'infrequent' is used to denote the infrequent categories for + # `inverse_transform` + expected_inv = [[col] for col in ["b"] + ["infrequent_sklearn"] * 4] + X_inv = ohe.inverse_transform(X_trans) + assert_array_equal(expected_inv, X_inv) + + +def test_ohe_infrequent_three_levels_user_cats(): + """Test that the order of the categories provided by a user is respected. + In this case 'c' is encoded as the first category and 'b' is encoded + as the second one.""" + + X_train = np.array( + [["a"] * 5 + ["b"] * 20 + ["c"] * 10 + ["d"] * 3], dtype=object + ).T + ohe = OneHotEncoder( + categories=[["c", "d", "b", "a"]], + sparse_output=False, + handle_unknown="infrequent_if_exist", + max_categories=3, + ).fit(X_train) + + assert_array_equal(ohe.infrequent_categories_, [["d", "a"]]) + + X_test = [["b"], ["a"], ["c"], ["d"], ["e"]] + expected = np.array([[0, 1, 0], [0, 0, 1], [1, 0, 0], [0, 0, 1], [0, 0, 1]]) + + X_trans = ohe.transform(X_test) + assert_allclose(expected, X_trans) + + # 'infrequent' is used to denote the infrequent categories for + # `inverse_transform` + expected_inv = [ + ["b"], + ["infrequent_sklearn"], + ["c"], + ["infrequent_sklearn"], + ["infrequent_sklearn"], + ] + X_inv = ohe.inverse_transform(X_trans) + assert_array_equal(expected_inv, X_inv) + + +def test_ohe_infrequent_mixed(): + """Test infrequent categories where feature 0 has infrequent categories, + and feature 1 does not.""" + + # X[:, 0] 1 and 2 are infrequent + # X[:, 1] nothing is infrequent + X = np.c_[[0, 1, 3, 3, 3, 3, 2, 0, 3], [0, 0, 0, 0, 1, 1, 1, 1, 1]] + + ohe = OneHotEncoder(max_categories=3, drop="if_binary", sparse_output=False) + ohe.fit(X) + + X_test = [[3, 0], [1, 1]] + X_trans = ohe.transform(X_test) + + # feature 1 is binary so it drops a category 0 + assert_allclose(X_trans, [[0, 1, 0, 0], [0, 0, 1, 1]]) + + +def test_ohe_infrequent_multiple_categories(): + """Test infrequent categories with feature matrix with 3 features.""" + + X = np.c_[ + [0, 1, 3, 3, 3, 3, 2, 0, 3], + [0, 0, 5, 1, 1, 10, 5, 5, 0], + [1, 0, 1, 0, 1, 0, 1, 0, 1], + ] + + ohe = OneHotEncoder( + categories="auto", max_categories=3, handle_unknown="infrequent_if_exist" + ) + # X[:, 0] 1 and 2 are infrequent + # X[:, 1] 1 and 10 are infrequent + # X[:, 2] nothing is infrequent + + X_trans = ohe.fit_transform(X).toarray() + assert_array_equal(ohe.infrequent_categories_[0], [1, 2]) + assert_array_equal(ohe.infrequent_categories_[1], [1, 10]) + assert_array_equal(ohe.infrequent_categories_[2], None) + + # 'infrequent' is used to denote the infrequent categories + # For the first column, 1 and 2 have the same frequency. In this case, + # 1 will be chosen to be the feature name because is smaller lexiconically + feature_names = ohe.get_feature_names_out() + assert_array_equal( + [ + "x0_0", + "x0_3", + "x0_infrequent_sklearn", + "x1_0", + "x1_5", + "x1_infrequent_sklearn", + "x2_0", + "x2_1", + ], + feature_names, + ) + + expected = [ + [1, 0, 0, 1, 0, 0, 0, 1], + [0, 0, 1, 1, 0, 0, 1, 0], + [0, 1, 0, 0, 1, 0, 0, 1], + [0, 1, 0, 0, 0, 1, 1, 0], + [0, 1, 0, 0, 0, 1, 0, 1], + [0, 1, 0, 0, 0, 1, 1, 0], + [0, 0, 1, 0, 1, 0, 0, 1], + [1, 0, 0, 0, 1, 0, 1, 0], + [0, 1, 0, 1, 0, 0, 0, 1], + ] + + assert_allclose(expected, X_trans) + + X_test = [[3, 1, 2], [4, 0, 3]] + + X_test_trans = ohe.transform(X_test) + + # X[:, 2] does not have an infrequent category, thus it is encoded as all + # zeros + expected = [[0, 1, 0, 0, 0, 1, 0, 0], [0, 0, 1, 1, 0, 0, 0, 0]] + assert_allclose(expected, X_test_trans.toarray()) + + X_inv = ohe.inverse_transform(X_test_trans) + expected_inv = np.array( + [[3, "infrequent_sklearn", None], ["infrequent_sklearn", 0, None]], dtype=object + ) + assert_array_equal(expected_inv, X_inv) + + # error for unknown categories + ohe = OneHotEncoder( + categories="auto", max_categories=3, handle_unknown="error" + ).fit(X) + with pytest.raises(ValueError, match="Found unknown categories"): + ohe.transform(X_test) + + # only infrequent or known categories + X_test = [[1, 1, 1], [3, 10, 0]] + X_test_trans = ohe.transform(X_test) + + expected = [[0, 0, 1, 0, 0, 1, 0, 1], [0, 1, 0, 0, 0, 1, 1, 0]] + assert_allclose(expected, X_test_trans.toarray()) + + X_inv = ohe.inverse_transform(X_test_trans) + + expected_inv = np.array( + [["infrequent_sklearn", "infrequent_sklearn", 1], [3, "infrequent_sklearn", 0]], + dtype=object, + ) + assert_array_equal(expected_inv, X_inv) + + +def test_ohe_infrequent_multiple_categories_dtypes(): + """Test infrequent categories with a pandas dataframe with multiple dtypes.""" + + pd = pytest.importorskip("pandas") + X = pd.DataFrame( + { + "str": ["a", "f", "c", "f", "f", "a", "c", "b", "b"], + "int": [5, 3, 0, 10, 10, 12, 0, 3, 5], + }, + columns=["str", "int"], + ) + + ohe = OneHotEncoder( + categories="auto", max_categories=3, handle_unknown="infrequent_if_exist" + ) + # X[:, 0] 'a', 'b', 'c' have the same frequency. 'a' and 'b' will be + # considered infrequent because they are greater + + # X[:, 1] 0, 3, 5, 10 has frequency 2 and 12 has frequency 1. + # 0, 3, 12 will be considered infrequent + + X_trans = ohe.fit_transform(X).toarray() + assert_array_equal(ohe.infrequent_categories_[0], ["a", "b"]) + assert_array_equal(ohe.infrequent_categories_[1], [0, 3, 12]) + + expected = [ + [0, 0, 1, 1, 0, 0], + [0, 1, 0, 0, 0, 1], + [1, 0, 0, 0, 0, 1], + [0, 1, 0, 0, 1, 0], + [0, 1, 0, 0, 1, 0], + [0, 0, 1, 0, 0, 1], + [1, 0, 0, 0, 0, 1], + [0, 0, 1, 0, 0, 1], + [0, 0, 1, 1, 0, 0], + ] + + assert_allclose(expected, X_trans) + + X_test = pd.DataFrame({"str": ["b", "f"], "int": [14, 12]}, columns=["str", "int"]) + + expected = [[0, 0, 1, 0, 0, 1], [0, 1, 0, 0, 0, 1]] + X_test_trans = ohe.transform(X_test) + assert_allclose(expected, X_test_trans.toarray()) + + X_inv = ohe.inverse_transform(X_test_trans) + expected_inv = np.array( + [["infrequent_sklearn", "infrequent_sklearn"], ["f", "infrequent_sklearn"]], + dtype=object, + ) + assert_array_equal(expected_inv, X_inv) + + # only infrequent or known categories + X_test = pd.DataFrame({"str": ["c", "b"], "int": [12, 5]}, columns=["str", "int"]) + X_test_trans = ohe.transform(X_test).toarray() + expected = [[1, 0, 0, 0, 0, 1], [0, 0, 1, 1, 0, 0]] + assert_allclose(expected, X_test_trans) + + X_inv = ohe.inverse_transform(X_test_trans) + expected_inv = np.array( + [["c", "infrequent_sklearn"], ["infrequent_sklearn", 5]], dtype=object + ) + assert_array_equal(expected_inv, X_inv) + + +@pytest.mark.parametrize("kwargs", [{"min_frequency": 21, "max_categories": 1}]) +def test_ohe_infrequent_one_level_errors(kwargs): + """All user provided categories are infrequent.""" + X_train = np.array([["a"] * 5 + ["b"] * 20 + ["c"] * 10 + ["d"] * 2]).T + + ohe = OneHotEncoder( + handle_unknown="infrequent_if_exist", sparse_output=False, **kwargs + ) + ohe.fit(X_train) + + X_trans = ohe.transform([["a"]]) + assert_allclose(X_trans, [[1]]) + + +@pytest.mark.parametrize("kwargs", [{"min_frequency": 2, "max_categories": 3}]) +def test_ohe_infrequent_user_cats_unknown_training_errors(kwargs): + """All user provided categories are infrequent.""" + + X_train = np.array([["e"] * 3], dtype=object).T + ohe = OneHotEncoder( + categories=[["c", "d", "a", "b"]], + sparse_output=False, + handle_unknown="infrequent_if_exist", + **kwargs, + ).fit(X_train) + + X_trans = ohe.transform([["a"], ["e"]]) + assert_allclose(X_trans, [[1], [1]]) + + +# deliberately omit 'OS' as an invalid combo +@pytest.mark.parametrize( + "input_dtype, category_dtype", ["OO", "OU", "UO", "UU", "SO", "SU", "SS"] +) +@pytest.mark.parametrize("array_type", ["list", "array", "dataframe"]) +def test_encoders_string_categories(input_dtype, category_dtype, array_type): + """Check that encoding work with object, unicode, and byte string dtypes. + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/15616 + https://github.com/scikit-learn/scikit-learn/issues/15726 + https://github.com/scikit-learn/scikit-learn/issues/19677 + """ + + X = np.array([["b"], ["a"]], dtype=input_dtype) + categories = [np.array(["b", "a"], dtype=category_dtype)] + ohe = OneHotEncoder(categories=categories, sparse_output=False).fit(X) + + X_test = _convert_container( + [["a"], ["a"], ["b"], ["a"]], array_type, dtype=input_dtype + ) + X_trans = ohe.transform(X_test) + + expected = np.array([[0, 1], [0, 1], [1, 0], [0, 1]]) + assert_allclose(X_trans, expected) + + oe = OrdinalEncoder(categories=categories).fit(X) + X_trans = oe.transform(X_test) + + expected = np.array([[1], [1], [0], [1]]) + assert_array_equal(X_trans, expected) + + +def test_mixed_string_bytes_categoricals(): + """Check that this mixture of predefined categories and X raises an error. + + Categories defined as bytes can not easily be compared to data that is + a string. + """ + # data as unicode + X = np.array([["b"], ["a"]], dtype="U") + # predefined categories as bytes + categories = [np.array(["b", "a"], dtype="S")] + ohe = OneHotEncoder(categories=categories, sparse_output=False) + + msg = re.escape( + "In column 0, the predefined categories have type 'bytes' which is incompatible" + " with values of type 'str_'." + ) + + with pytest.raises(ValueError, match=msg): + ohe.fit(X) + + +@pytest.mark.parametrize("missing_value", [np.nan, None]) +def test_ohe_missing_values_get_feature_names(missing_value): + # encoder with missing values with object dtypes + X = np.array([["a", "b", missing_value, "a", missing_value]], dtype=object).T + ohe = OneHotEncoder(sparse_output=False, handle_unknown="ignore").fit(X) + names = ohe.get_feature_names_out() + assert_array_equal(names, ["x0_a", "x0_b", f"x0_{missing_value}"]) + + +def test_ohe_missing_value_support_pandas(): + # check support for pandas with mixed dtypes and missing values + pd = pytest.importorskip("pandas") + df = pd.DataFrame( + { + "col1": ["dog", "cat", None, "cat"], + "col2": np.array([3, 0, 4, np.nan], dtype=float), + }, + columns=["col1", "col2"], + ) + expected_df_trans = np.array( + [ + [0, 1, 0, 0, 1, 0, 0], + [1, 0, 0, 1, 0, 0, 0], + [0, 0, 1, 0, 0, 1, 0], + [1, 0, 0, 0, 0, 0, 1], + ] + ) + + Xtr = check_categorical_onehot(df) + assert_allclose(Xtr, expected_df_trans) + + +@pytest.mark.parametrize("handle_unknown", ["infrequent_if_exist", "ignore"]) +@pytest.mark.parametrize("pd_nan_type", ["pd.NA", "np.nan"]) +def test_ohe_missing_value_support_pandas_categorical(pd_nan_type, handle_unknown): + # checks pandas dataframe with categorical features + pd = pytest.importorskip("pandas") + + pd_missing_value = pd.NA if pd_nan_type == "pd.NA" else np.nan + + df = pd.DataFrame( + { + "col1": pd.Series(["c", "a", pd_missing_value, "b", "a"], dtype="category"), + } + ) + expected_df_trans = np.array( + [ + [0, 0, 1, 0], + [1, 0, 0, 0], + [0, 0, 0, 1], + [0, 1, 0, 0], + [1, 0, 0, 0], + ] + ) + + ohe = OneHotEncoder(sparse_output=False, handle_unknown=handle_unknown) + df_trans = ohe.fit_transform(df) + assert_allclose(expected_df_trans, df_trans) + + assert len(ohe.categories_) == 1 + assert_array_equal(ohe.categories_[0][:-1], ["a", "b", "c"]) + assert np.isnan(ohe.categories_[0][-1]) + + +@pytest.mark.parametrize("handle_unknown", ["ignore", "infrequent_if_exist"]) +def test_ohe_drop_first_handle_unknown_ignore_warns(handle_unknown): + """Check drop='first' and handle_unknown='ignore'/'infrequent_if_exist' + during transform.""" + X = [["a", 0], ["b", 2], ["b", 1]] + + ohe = OneHotEncoder( + drop="first", sparse_output=False, handle_unknown=handle_unknown + ) + X_trans = ohe.fit_transform(X) + + X_expected = np.array( + [ + [0, 0, 0], + [1, 0, 1], + [1, 1, 0], + ] + ) + assert_allclose(X_trans, X_expected) + + # Both categories are unknown + X_test = [["c", 3]] + X_expected = np.array([[0, 0, 0]]) + + warn_msg = ( + r"Found unknown categories in columns \[0, 1\] during " + "transform. These unknown categories will be encoded as all " + "zeros" + ) + with pytest.warns(UserWarning, match=warn_msg): + X_trans = ohe.transform(X_test) + assert_allclose(X_trans, X_expected) + + # inverse_transform maps to None + X_inv = ohe.inverse_transform(X_expected) + assert_array_equal(X_inv, np.array([["a", 0]], dtype=object)) + + +@pytest.mark.parametrize("handle_unknown", ["ignore", "infrequent_if_exist"]) +def test_ohe_drop_if_binary_handle_unknown_ignore_warns(handle_unknown): + """Check drop='if_binary' and handle_unknown='ignore' during transform.""" + X = [["a", 0], ["b", 2], ["b", 1]] + + ohe = OneHotEncoder( + drop="if_binary", sparse_output=False, handle_unknown=handle_unknown + ) + X_trans = ohe.fit_transform(X) + + X_expected = np.array( + [ + [0, 1, 0, 0], + [1, 0, 0, 1], + [1, 0, 1, 0], + ] + ) + assert_allclose(X_trans, X_expected) + + # Both categories are unknown + X_test = [["c", 3]] + X_expected = np.array([[0, 0, 0, 0]]) + + warn_msg = ( + r"Found unknown categories in columns \[0, 1\] during " + "transform. These unknown categories will be encoded as all " + "zeros" + ) + with pytest.warns(UserWarning, match=warn_msg): + X_trans = ohe.transform(X_test) + assert_allclose(X_trans, X_expected) + + # inverse_transform maps to None + X_inv = ohe.inverse_transform(X_expected) + assert_array_equal(X_inv, np.array([["a", None]], dtype=object)) + + +@pytest.mark.parametrize("handle_unknown", ["ignore", "infrequent_if_exist"]) +def test_ohe_drop_first_explicit_categories(handle_unknown): + """Check drop='first' and handle_unknown='ignore'/'infrequent_if_exist' + during fit with categories passed in.""" + + X = [["a", 0], ["b", 2], ["b", 1]] + + ohe = OneHotEncoder( + drop="first", + sparse_output=False, + handle_unknown=handle_unknown, + categories=[["b", "a"], [1, 2]], + ) + ohe.fit(X) + + X_test = [["c", 1]] + X_expected = np.array([[0, 0]]) + + warn_msg = ( + r"Found unknown categories in columns \[0\] during transform. " + r"These unknown categories will be encoded as all zeros" + ) + with pytest.warns(UserWarning, match=warn_msg): + X_trans = ohe.transform(X_test) + assert_allclose(X_trans, X_expected) + + +def test_ohe_more_informative_error_message(): + """Raise informative error message when pandas output and sparse_output=True.""" + pd = pytest.importorskip("pandas") + df = pd.DataFrame({"a": [1, 2, 3], "b": ["z", "b", "b"]}, columns=["a", "b"]) + + ohe = OneHotEncoder(sparse_output=True) + ohe.set_output(transform="pandas") + + msg = ( + "Pandas output does not support sparse data. Set " + "sparse_output=False to output pandas dataframes or disable Pandas output" + ) + with pytest.raises(ValueError, match=msg): + ohe.fit_transform(df) + + ohe.fit(df) + with pytest.raises(ValueError, match=msg): + ohe.transform(df) + + +def test_ordinal_encoder_passthrough_missing_values_float_errors_dtype(): + """Test ordinal encoder with nan passthrough fails when dtype=np.int32.""" + + X = np.array([[np.nan, 3.0, 1.0, 3.0]]).T + oe = OrdinalEncoder(dtype=np.int32) + + msg = ( + r"There are missing values in features \[0\]. For OrdinalEncoder " + f"to encode missing values with dtype: {np.int32}" + ) + with pytest.raises(ValueError, match=msg): + oe.fit(X) + + +@pytest.mark.parametrize("encoded_missing_value", [np.nan, -2]) +def test_ordinal_encoder_passthrough_missing_values_float(encoded_missing_value): + """Test ordinal encoder with nan on float dtypes.""" + + X = np.array([[np.nan, 3.0, 1.0, 3.0]], dtype=np.float64).T + oe = OrdinalEncoder(encoded_missing_value=encoded_missing_value).fit(X) + + assert len(oe.categories_) == 1 + + assert_allclose(oe.categories_[0], [1.0, 3.0, np.nan]) + + X_trans = oe.transform(X) + assert_allclose(X_trans, [[encoded_missing_value], [1.0], [0.0], [1.0]]) + + X_inverse = oe.inverse_transform(X_trans) + assert_allclose(X_inverse, X) + + +@pytest.mark.parametrize("pd_nan_type", ["pd.NA", "np.nan"]) +@pytest.mark.parametrize("encoded_missing_value", [np.nan, -2]) +def test_ordinal_encoder_missing_value_support_pandas_categorical( + pd_nan_type, encoded_missing_value +): + """Check ordinal encoder is compatible with pandas.""" + # checks pandas dataframe with categorical features + pd = pytest.importorskip("pandas") + + pd_missing_value = pd.NA if pd_nan_type == "pd.NA" else np.nan + + df = pd.DataFrame( + { + "col1": pd.Series(["c", "a", pd_missing_value, "b", "a"], dtype="category"), + } + ) + + oe = OrdinalEncoder(encoded_missing_value=encoded_missing_value).fit(df) + assert len(oe.categories_) == 1 + assert_array_equal(oe.categories_[0][:3], ["a", "b", "c"]) + assert np.isnan(oe.categories_[0][-1]) + + df_trans = oe.transform(df) + + assert_allclose(df_trans, [[2.0], [0.0], [encoded_missing_value], [1.0], [0.0]]) + + X_inverse = oe.inverse_transform(df_trans) + assert X_inverse.shape == (5, 1) + assert_array_equal(X_inverse[:2, 0], ["c", "a"]) + assert_array_equal(X_inverse[3:, 0], ["b", "a"]) + assert np.isnan(X_inverse[2, 0]) + + +@pytest.mark.parametrize( + "X, X2, cats, cat_dtype", + [ + ( + ( + np.array([["a", np.nan]], dtype=object).T, + np.array([["a", "b"]], dtype=object).T, + [np.array(["a", "d", np.nan], dtype=object)], + np.object_, + ) + ), + ( + ( + np.array([["a", np.nan]], dtype=object).T, + np.array([["a", "b"]], dtype=object).T, + [np.array(["a", "d", np.nan], dtype=object)], + np.object_, + ) + ), + ( + ( + np.array([[2.0, np.nan]], dtype=np.float64).T, + np.array([[3.0]], dtype=np.float64).T, + [np.array([2.0, 4.0, np.nan])], + np.float64, + ) + ), + ], + ids=[ + "object-None-missing-value", + "object-nan-missing_value", + "numeric-missing-value", + ], +) +def test_ordinal_encoder_specified_categories_missing_passthrough( + X, X2, cats, cat_dtype +): + """Test ordinal encoder for specified categories.""" + oe = OrdinalEncoder(categories=cats) + exp = np.array([[0.0], [np.nan]]) + assert_array_equal(oe.fit_transform(X), exp) + # manually specified categories should have same dtype as + # the data when coerced from lists + assert oe.categories_[0].dtype == cat_dtype + + # when specifying categories manually, unknown categories should already + # raise when fitting + oe = OrdinalEncoder(categories=cats) + with pytest.raises(ValueError, match="Found unknown categories"): + oe.fit(X2) + + +@pytest.mark.parametrize("Encoder", [OneHotEncoder, OrdinalEncoder]) +def test_encoder_duplicate_specified_categories(Encoder): + """Test encoder for specified categories have duplicate values. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/27088 + """ + cats = [np.array(["a", "b", "a"], dtype=object)] + enc = Encoder(categories=cats) + X = np.array([["a", "b"]], dtype=object).T + with pytest.raises( + ValueError, match="the predefined categories contain duplicate elements." + ): + enc.fit(X) + + +@pytest.mark.parametrize( + "X, expected_X_trans, X_test", + [ + ( + np.array([[1.0, np.nan, 3.0]]).T, + np.array([[0.0, np.nan, 1.0]]).T, + np.array([[4.0]]), + ), + ( + np.array([[1.0, 4.0, 3.0]]).T, + np.array([[0.0, 2.0, 1.0]]).T, + np.array([[np.nan]]), + ), + ( + np.array([["c", np.nan, "b"]], dtype=object).T, + np.array([[1.0, np.nan, 0.0]]).T, + np.array([["d"]], dtype=object), + ), + ( + np.array([["c", "a", "b"]], dtype=object).T, + np.array([[2.0, 0.0, 1.0]]).T, + np.array([[np.nan]], dtype=object), + ), + ], +) +def test_ordinal_encoder_handle_missing_and_unknown(X, expected_X_trans, X_test): + """Test the interaction between missing values and handle_unknown""" + + oe = OrdinalEncoder(handle_unknown="use_encoded_value", unknown_value=-1) + + X_trans = oe.fit_transform(X) + assert_allclose(X_trans, expected_X_trans) + + assert_allclose(oe.transform(X_test), [[-1.0]]) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_ordinal_encoder_sparse(csr_container): + """Check that we raise proper error with sparse input in OrdinalEncoder. + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/19878 + """ + X = np.array([[3, 2, 1], [0, 1, 1]]) + X_sparse = csr_container(X) + + encoder = OrdinalEncoder() + + err_msg = "Sparse data was passed, but dense data is required" + with pytest.raises(TypeError, match=err_msg): + encoder.fit(X_sparse) + with pytest.raises(TypeError, match=err_msg): + encoder.fit_transform(X_sparse) + + X_trans = encoder.fit_transform(X) + X_trans_sparse = csr_container(X_trans) + with pytest.raises(TypeError, match=err_msg): + encoder.inverse_transform(X_trans_sparse) + + +def test_ordinal_encoder_fit_with_unseen_category(): + """Check OrdinalEncoder.fit works with unseen category when + `handle_unknown="use_encoded_value"`. + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/19872 + """ + X = np.array([0, 0, 1, 0, 2, 5])[:, np.newaxis] + oe = OrdinalEncoder( + categories=[[-1, 0, 1]], handle_unknown="use_encoded_value", unknown_value=-999 + ) + oe.fit(X) + + oe = OrdinalEncoder(categories=[[-1, 0, 1]], handle_unknown="error") + with pytest.raises(ValueError, match="Found unknown categories"): + oe.fit(X) + + +@pytest.mark.parametrize( + "X_train", + [ + [["AA", "B"]], + np.array([["AA", "B"]], dtype="O"), + np.array([["AA", "B"]], dtype="U"), + ], +) +@pytest.mark.parametrize( + "X_test", + [ + [["A", "B"]], + np.array([["A", "B"]], dtype="O"), + np.array([["A", "B"]], dtype="U"), + ], +) +def test_ordinal_encoder_handle_unknown_string_dtypes(X_train, X_test): + """Checks that `OrdinalEncoder` transforms string dtypes. + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/19872 + """ + enc = OrdinalEncoder(handle_unknown="use_encoded_value", unknown_value=-9) + enc.fit(X_train) + + X_trans = enc.transform(X_test) + assert_allclose(X_trans, [[-9, 0]]) + + +def test_ordinal_encoder_python_integer(): + """Check that `OrdinalEncoder` accepts Python integers that are potentially + larger than 64 bits. + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/20721 + """ + X = np.array( + [ + 44253463435747313673, + 9867966753463435747313673, + 44253462342215747313673, + 442534634357764313673, + ] + ).reshape(-1, 1) + encoder = OrdinalEncoder().fit(X) + assert_array_equal(encoder.categories_, np.sort(X, axis=0).T) + X_trans = encoder.transform(X) + assert_array_equal(X_trans, [[0], [3], [2], [1]]) + + +def test_ordinal_encoder_features_names_out_pandas(): + """Check feature names out is same as the input.""" + pd = pytest.importorskip("pandas") + + names = ["b", "c", "a"] + X = pd.DataFrame([[1, 2, 3]], columns=names) + enc = OrdinalEncoder().fit(X) + + feature_names_out = enc.get_feature_names_out() + assert_array_equal(names, feature_names_out) + + +def test_ordinal_encoder_unknown_missing_interaction(): + """Check interactions between encode_unknown and missing value encoding.""" + + X = np.array([["a"], ["b"], [np.nan]], dtype=object) + + oe = OrdinalEncoder( + handle_unknown="use_encoded_value", + unknown_value=np.nan, + encoded_missing_value=-3, + ).fit(X) + + X_trans = oe.transform(X) + assert_allclose(X_trans, [[0], [1], [-3]]) + + # "c" is unknown and is mapped to np.nan + # "None" is a missing value and is set to -3 + X_test = np.array([["c"], [np.nan]], dtype=object) + X_test_trans = oe.transform(X_test) + assert_allclose(X_test_trans, [[np.nan], [-3]]) + + # Non-regression test for #24082 + X_roundtrip = oe.inverse_transform(X_test_trans) + + # np.nan is unknown so it maps to None + assert X_roundtrip[0][0] is None + + # -3 is the encoded missing value so it maps back to nan + assert np.isnan(X_roundtrip[1][0]) + + +@pytest.mark.parametrize("with_pandas", [True, False]) +def test_ordinal_encoder_encoded_missing_value_error(with_pandas): + """Check OrdinalEncoder errors when encoded_missing_value is used by + an known category.""" + X = np.array([["a", "dog"], ["b", "cat"], ["c", np.nan]], dtype=object) + + # The 0-th feature has no missing values so it is not included in the list of + # features + error_msg = ( + r"encoded_missing_value \(1\) is already used to encode a known category " + r"in features: " + ) + + if with_pandas: + pd = pytest.importorskip("pandas") + X = pd.DataFrame(X, columns=["letter", "pet"]) + error_msg = error_msg + r"\['pet'\]" + else: + error_msg = error_msg + r"\[1\]" + + oe = OrdinalEncoder(encoded_missing_value=1) + + with pytest.raises(ValueError, match=error_msg): + oe.fit(X) + + +@pytest.mark.parametrize( + "X_train, X_test_trans_expected, X_roundtrip_expected", + [ + ( + # missing value is not in training set + # inverse transform will considering encoded nan as unknown + np.array([["a"], ["1"]], dtype=object), + [[0], [np.nan], [np.nan]], + np.asarray([["1"], [None], [None]], dtype=object), + ), + ( + # missing value in training set, + # inverse transform will considering encoded nan as missing + np.array([[np.nan], ["1"], ["a"]], dtype=object), + [[0], [np.nan], [np.nan]], + np.asarray([["1"], [np.nan], [np.nan]], dtype=object), + ), + ], +) +def test_ordinal_encoder_unknown_missing_interaction_both_nan( + X_train, X_test_trans_expected, X_roundtrip_expected +): + """Check transform when unknown_value and encoded_missing_value is nan. + + Non-regression test for #24082. + """ + oe = OrdinalEncoder( + handle_unknown="use_encoded_value", + unknown_value=np.nan, + encoded_missing_value=np.nan, + ).fit(X_train) + + X_test = np.array([["1"], [np.nan], ["b"]]) + X_test_trans = oe.transform(X_test) + + # both nan and unknown are encoded as nan + assert_allclose(X_test_trans, X_test_trans_expected) + X_roundtrip = oe.inverse_transform(X_test_trans) + + n_samples = X_roundtrip_expected.shape[0] + for i in range(n_samples): + expected_val = X_roundtrip_expected[i, 0] + val = X_roundtrip[i, 0] + + if expected_val is None: + assert val is None + elif is_scalar_nan(expected_val): + assert np.isnan(val) + else: + assert val == expected_val + + +def test_one_hot_encoder_set_output(): + """Check OneHotEncoder works with set_output.""" + pd = pytest.importorskip("pandas") + + X_df = pd.DataFrame({"A": ["a", "b"], "B": [1, 2]}) + ohe = OneHotEncoder() + + ohe.set_output(transform="pandas") + + match = "Pandas output does not support sparse data. Set sparse_output=False" + with pytest.raises(ValueError, match=match): + ohe.fit_transform(X_df) + + ohe_default = OneHotEncoder(sparse_output=False).set_output(transform="default") + ohe_pandas = OneHotEncoder(sparse_output=False).set_output(transform="pandas") + + X_default = ohe_default.fit_transform(X_df) + X_pandas = ohe_pandas.fit_transform(X_df) + + assert_allclose(X_pandas.to_numpy(), X_default) + assert_array_equal(ohe_pandas.get_feature_names_out(), X_pandas.columns) + + +def test_ordinal_set_output(): + """Check OrdinalEncoder works with set_output.""" + pd = pytest.importorskip("pandas") + + X_df = pd.DataFrame({"A": ["a", "b"], "B": [1, 2]}) + + ord_default = OrdinalEncoder().set_output(transform="default") + ord_pandas = OrdinalEncoder().set_output(transform="pandas") + + X_default = ord_default.fit_transform(X_df) + X_pandas = ord_pandas.fit_transform(X_df) + + assert_allclose(X_pandas.to_numpy(), X_default) + assert_array_equal(ord_pandas.get_feature_names_out(), X_pandas.columns) + + +def test_predefined_categories_dtype(): + """Check that the categories_ dtype is `object` for string categories + + Regression test for gh-25171. + """ + categories = [["as", "mmas", "eas", "ras", "acs"], ["1", "2"]] + + enc = OneHotEncoder(categories=categories) + + enc.fit([["as", "1"]]) + + assert len(categories) == len(enc.categories_) + for n, cat in enumerate(enc.categories_): + assert cat.dtype == object + assert_array_equal(categories[n], cat) + + +def test_ordinal_encoder_missing_unknown_encoding_max(): + """Check missing value or unknown encoding can equal the cardinality.""" + X = np.array([["dog"], ["cat"], [np.nan]], dtype=object) + X_trans = OrdinalEncoder(encoded_missing_value=2).fit_transform(X) + assert_allclose(X_trans, [[1], [0], [2]]) + + enc = OrdinalEncoder(handle_unknown="use_encoded_value", unknown_value=2).fit(X) + X_test = np.array([["snake"]]) + X_trans = enc.transform(X_test) + assert_allclose(X_trans, [[2]]) + + +def test_drop_idx_infrequent_categories(): + """Check drop_idx is defined correctly with infrequent categories. + + Non-regression test for gh-25550. + """ + X = np.array( + [["a"] * 2 + ["b"] * 4 + ["c"] * 4 + ["d"] * 4 + ["e"] * 4], dtype=object + ).T + ohe = OneHotEncoder(min_frequency=4, sparse_output=False, drop="first").fit(X) + assert_array_equal( + ohe.get_feature_names_out(), ["x0_c", "x0_d", "x0_e", "x0_infrequent_sklearn"] + ) + assert ohe.categories_[0][ohe.drop_idx_[0]] == "b" + + X = np.array([["a"] * 2 + ["b"] * 2 + ["c"] * 10], dtype=object).T + ohe = OneHotEncoder(min_frequency=4, sparse_output=False, drop="if_binary").fit(X) + assert_array_equal(ohe.get_feature_names_out(), ["x0_infrequent_sklearn"]) + assert ohe.categories_[0][ohe.drop_idx_[0]] == "c" + + X = np.array( + [["a"] * 2 + ["b"] * 4 + ["c"] * 4 + ["d"] * 4 + ["e"] * 4], dtype=object + ).T + ohe = OneHotEncoder(min_frequency=4, sparse_output=False, drop=["d"]).fit(X) + assert_array_equal( + ohe.get_feature_names_out(), ["x0_b", "x0_c", "x0_e", "x0_infrequent_sklearn"] + ) + assert ohe.categories_[0][ohe.drop_idx_[0]] == "d" + + ohe = OneHotEncoder(min_frequency=4, sparse_output=False, drop=None).fit(X) + assert_array_equal( + ohe.get_feature_names_out(), + ["x0_b", "x0_c", "x0_d", "x0_e", "x0_infrequent_sklearn"], + ) + assert ohe.drop_idx_ is None + + +@pytest.mark.parametrize( + "kwargs", + [ + {"max_categories": 3}, + {"min_frequency": 6}, + {"min_frequency": 9}, + {"min_frequency": 0.24}, + {"min_frequency": 0.16}, + {"max_categories": 3, "min_frequency": 8}, + {"max_categories": 4, "min_frequency": 6}, + ], +) +def test_ordinal_encoder_infrequent_three_levels(kwargs): + """Test parameters for grouping 'a', and 'd' into the infrequent category.""" + + X_train = np.array([["a"] * 5 + ["b"] * 20 + ["c"] * 10 + ["d"] * 3]).T + ordinal = OrdinalEncoder( + handle_unknown="use_encoded_value", unknown_value=-1, **kwargs + ).fit(X_train) + assert_array_equal(ordinal.categories_, [["a", "b", "c", "d"]]) + assert_array_equal(ordinal.infrequent_categories_, [["a", "d"]]) + + X_test = [["a"], ["b"], ["c"], ["d"], ["z"]] + expected_trans = [[2], [0], [1], [2], [-1]] + + X_trans = ordinal.transform(X_test) + assert_allclose(X_trans, expected_trans) + + X_inverse = ordinal.inverse_transform(X_trans) + expected_inverse = [ + ["infrequent_sklearn"], + ["b"], + ["c"], + ["infrequent_sklearn"], + [None], + ] + assert_array_equal(X_inverse, expected_inverse) + + +def test_ordinal_encoder_infrequent_three_levels_user_cats(): + """Test that the order of the categories provided by a user is respected. + + In this case 'c' is encoded as the first category and 'b' is encoded + as the second one. + """ + + X_train = np.array( + [["a"] * 5 + ["b"] * 20 + ["c"] * 10 + ["d"] * 3], dtype=object + ).T + ordinal = OrdinalEncoder( + categories=[["c", "d", "b", "a"]], + max_categories=3, + handle_unknown="use_encoded_value", + unknown_value=-1, + ).fit(X_train) + assert_array_equal(ordinal.categories_, [["c", "d", "b", "a"]]) + assert_array_equal(ordinal.infrequent_categories_, [["d", "a"]]) + + X_test = [["a"], ["b"], ["c"], ["d"], ["z"]] + expected_trans = [[2], [1], [0], [2], [-1]] + + X_trans = ordinal.transform(X_test) + assert_allclose(X_trans, expected_trans) + + X_inverse = ordinal.inverse_transform(X_trans) + expected_inverse = [ + ["infrequent_sklearn"], + ["b"], + ["c"], + ["infrequent_sklearn"], + [None], + ] + assert_array_equal(X_inverse, expected_inverse) + + +def test_ordinal_encoder_infrequent_mixed(): + """Test when feature 0 has infrequent categories and feature 1 does not.""" + + X = np.column_stack(([0, 1, 3, 3, 3, 3, 2, 0, 3], [0, 0, 0, 0, 1, 1, 1, 1, 1])) + + ordinal = OrdinalEncoder(max_categories=3).fit(X) + + assert_array_equal(ordinal.infrequent_categories_[0], [1, 2]) + assert ordinal.infrequent_categories_[1] is None + + X_test = [[3, 0], [1, 1]] + expected_trans = [[1, 0], [2, 1]] + + X_trans = ordinal.transform(X_test) + assert_allclose(X_trans, expected_trans) + + X_inverse = ordinal.inverse_transform(X_trans) + expected_inverse = np.array([[3, 0], ["infrequent_sklearn", 1]], dtype=object) + assert_array_equal(X_inverse, expected_inverse) + + +def test_ordinal_encoder_infrequent_multiple_categories_dtypes(): + """Test infrequent categories with a pandas DataFrame with multiple dtypes.""" + + pd = pytest.importorskip("pandas") + categorical_dtype = pd.CategoricalDtype(["bird", "cat", "dog", "snake"]) + X = pd.DataFrame( + { + "str": ["a", "f", "c", "f", "f", "a", "c", "b", "b"], + "int": [5, 3, 0, 10, 10, 12, 0, 3, 5], + "categorical": pd.Series( + ["dog"] * 4 + ["cat"] * 3 + ["snake"] + ["bird"], + dtype=categorical_dtype, + ), + }, + columns=["str", "int", "categorical"], + ) + + ordinal = OrdinalEncoder(max_categories=3).fit(X) + # X[:, 0] 'a', 'b', 'c' have the same frequency. 'a' and 'b' will be + # considered infrequent because they appear first when sorted + + # X[:, 1] 0, 3, 5, 10 has frequency 2 and 12 has frequency 1. + # 0, 3, 12 will be considered infrequent because they appear first when + # sorted. + + # X[:, 2] "snake" and "bird" or infrequent + + assert_array_equal(ordinal.infrequent_categories_[0], ["a", "b"]) + assert_array_equal(ordinal.infrequent_categories_[1], [0, 3, 12]) + assert_array_equal(ordinal.infrequent_categories_[2], ["bird", "snake"]) + + X_test = pd.DataFrame( + { + "str": ["a", "b", "f", "c"], + "int": [12, 0, 10, 5], + "categorical": pd.Series( + ["cat"] + ["snake"] + ["bird"] + ["dog"], + dtype=categorical_dtype, + ), + }, + columns=["str", "int", "categorical"], + ) + expected_trans = [[2, 2, 0], [2, 2, 2], [1, 1, 2], [0, 0, 1]] + + X_trans = ordinal.transform(X_test) + assert_allclose(X_trans, expected_trans) + + +def test_ordinal_encoder_infrequent_custom_mapping(): + """Check behavior of unknown_value and encoded_missing_value with infrequent.""" + X_train = np.array( + [["a"] * 5 + ["b"] * 20 + ["c"] * 10 + ["d"] * 3 + [np.nan]], dtype=object + ).T + + ordinal = OrdinalEncoder( + handle_unknown="use_encoded_value", + unknown_value=2, + max_categories=2, + encoded_missing_value=3, + ).fit(X_train) + assert_array_equal(ordinal.infrequent_categories_, [["a", "c", "d"]]) + + X_test = np.array([["a"], ["b"], ["c"], ["d"], ["e"], [np.nan]], dtype=object) + expected_trans = [[1], [0], [1], [1], [2], [3]] + + X_trans = ordinal.transform(X_test) + assert_allclose(X_trans, expected_trans) + + +@pytest.mark.parametrize( + "kwargs", + [ + {"max_categories": 6}, + {"min_frequency": 2}, + ], +) +def test_ordinal_encoder_all_frequent(kwargs): + """All categories are considered frequent have same encoding as default encoder.""" + X_train = np.array( + [["a"] * 5 + ["b"] * 20 + ["c"] * 10 + ["d"] * 3], dtype=object + ).T + + adjusted_encoder = OrdinalEncoder( + **kwargs, handle_unknown="use_encoded_value", unknown_value=-1 + ).fit(X_train) + default_encoder = OrdinalEncoder( + handle_unknown="use_encoded_value", unknown_value=-1 + ).fit(X_train) + + X_test = [["a"], ["b"], ["c"], ["d"], ["e"]] + + assert_allclose( + adjusted_encoder.transform(X_test), default_encoder.transform(X_test) + ) + + +@pytest.mark.parametrize( + "kwargs", + [ + {"max_categories": 1}, + {"min_frequency": 100}, + ], +) +def test_ordinal_encoder_all_infrequent(kwargs): + """When all categories are infrequent, they are all encoded as zero.""" + X_train = np.array( + [["a"] * 5 + ["b"] * 20 + ["c"] * 10 + ["d"] * 3], dtype=object + ).T + encoder = OrdinalEncoder( + **kwargs, handle_unknown="use_encoded_value", unknown_value=-1 + ).fit(X_train) + + X_test = [["a"], ["b"], ["c"], ["d"], ["e"]] + assert_allclose(encoder.transform(X_test), [[0], [0], [0], [0], [-1]]) + + +def test_ordinal_encoder_missing_appears_frequent(): + """Check behavior when missing value appears frequently.""" + X = np.array( + [[np.nan] * 20 + ["dog"] * 10 + ["cat"] * 5 + ["snake"] + ["deer"]], + dtype=object, + ).T + ordinal = OrdinalEncoder(max_categories=3).fit(X) + + X_test = np.array([["snake", "cat", "dog", np.nan]], dtype=object).T + X_trans = ordinal.transform(X_test) + assert_allclose(X_trans, [[2], [0], [1], [np.nan]]) + + +def test_ordinal_encoder_missing_appears_infrequent(): + """Check behavior when missing value appears infrequently.""" + + # feature 0 has infrequent categories + # feature 1 has no infrequent categories + X = np.array( + [ + [np.nan] + ["dog"] * 10 + ["cat"] * 5 + ["snake"] + ["deer"], + ["red"] * 9 + ["green"] * 9, + ], + dtype=object, + ).T + ordinal = OrdinalEncoder(min_frequency=4).fit(X) + + X_test = np.array( + [ + ["snake", "red"], + ["deer", "green"], + [np.nan, "green"], + ["dog", "green"], + ["cat", "red"], + ], + dtype=object, + ) + X_trans = ordinal.transform(X_test) + assert_allclose(X_trans, [[2, 1], [2, 0], [np.nan, 0], [1, 0], [0, 1]]) + + +@pytest.mark.parametrize("Encoder", [OneHotEncoder, OrdinalEncoder]) +def test_encoder_not_fitted(Encoder): + """Check that we raise a `NotFittedError` by calling transform before fit with + the encoders. + + One could expect that the passing the `categories` argument to the encoder + would make it stateless. However, `fit` is making a couple of check, such as the + position of `np.nan`. + """ + X = np.array([["A"], ["B"], ["C"]], dtype=object) + encoder = Encoder(categories=[["A", "B", "C"]]) + with pytest.raises(NotFittedError): + encoder.transform(X) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/preprocessing/tests/test_label.py b/env-llmeval/lib/python3.10/site-packages/sklearn/preprocessing/tests/test_label.py new file mode 100644 index 0000000000000000000000000000000000000000..cce0ddc5c267eb77ef85b64e5257080d75d09449 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/preprocessing/tests/test_label.py @@ -0,0 +1,699 @@ +import numpy as np +import pytest +from scipy.sparse import issparse + +from sklearn import datasets +from sklearn.preprocessing._label import ( + LabelBinarizer, + LabelEncoder, + MultiLabelBinarizer, + _inverse_binarize_multiclass, + _inverse_binarize_thresholding, + label_binarize, +) +from sklearn.utils import _to_object_array +from sklearn.utils._testing import assert_array_equal, ignore_warnings +from sklearn.utils.fixes import ( + COO_CONTAINERS, + CSC_CONTAINERS, + CSR_CONTAINERS, + DOK_CONTAINERS, + LIL_CONTAINERS, +) +from sklearn.utils.multiclass import type_of_target + +iris = datasets.load_iris() + + +def toarray(a): + if hasattr(a, "toarray"): + a = a.toarray() + return a + + +def test_label_binarizer(): + # one-class case defaults to negative label + # For dense case: + inp = ["pos", "pos", "pos", "pos"] + lb = LabelBinarizer(sparse_output=False) + expected = np.array([[0, 0, 0, 0]]).T + got = lb.fit_transform(inp) + assert_array_equal(lb.classes_, ["pos"]) + assert_array_equal(expected, got) + assert_array_equal(lb.inverse_transform(got), inp) + + # For sparse case: + lb = LabelBinarizer(sparse_output=True) + got = lb.fit_transform(inp) + assert issparse(got) + assert_array_equal(lb.classes_, ["pos"]) + assert_array_equal(expected, got.toarray()) + assert_array_equal(lb.inverse_transform(got.toarray()), inp) + + lb = LabelBinarizer(sparse_output=False) + # two-class case + inp = ["neg", "pos", "pos", "neg"] + expected = np.array([[0, 1, 1, 0]]).T + got = lb.fit_transform(inp) + assert_array_equal(lb.classes_, ["neg", "pos"]) + assert_array_equal(expected, got) + + to_invert = np.array([[1, 0], [0, 1], [0, 1], [1, 0]]) + assert_array_equal(lb.inverse_transform(to_invert), inp) + + # multi-class case + inp = ["spam", "ham", "eggs", "ham", "0"] + expected = np.array( + [[0, 0, 0, 1], [0, 0, 1, 0], [0, 1, 0, 0], [0, 0, 1, 0], [1, 0, 0, 0]] + ) + got = lb.fit_transform(inp) + assert_array_equal(lb.classes_, ["0", "eggs", "ham", "spam"]) + assert_array_equal(expected, got) + assert_array_equal(lb.inverse_transform(got), inp) + + +def test_label_binarizer_unseen_labels(): + lb = LabelBinarizer() + + expected = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) + got = lb.fit_transform(["b", "d", "e"]) + assert_array_equal(expected, got) + + expected = np.array( + [[0, 0, 0], [1, 0, 0], [0, 0, 0], [0, 1, 0], [0, 0, 1], [0, 0, 0]] + ) + got = lb.transform(["a", "b", "c", "d", "e", "f"]) + assert_array_equal(expected, got) + + +def test_label_binarizer_set_label_encoding(): + lb = LabelBinarizer(neg_label=-2, pos_label=0) + + # two-class case with pos_label=0 + inp = np.array([0, 1, 1, 0]) + expected = np.array([[-2, 0, 0, -2]]).T + got = lb.fit_transform(inp) + assert_array_equal(expected, got) + assert_array_equal(lb.inverse_transform(got), inp) + + lb = LabelBinarizer(neg_label=-2, pos_label=2) + + # multi-class case + inp = np.array([3, 2, 1, 2, 0]) + expected = np.array( + [ + [-2, -2, -2, +2], + [-2, -2, +2, -2], + [-2, +2, -2, -2], + [-2, -2, +2, -2], + [+2, -2, -2, -2], + ] + ) + got = lb.fit_transform(inp) + assert_array_equal(expected, got) + assert_array_equal(lb.inverse_transform(got), inp) + + +@pytest.mark.parametrize("dtype", ["Int64", "Float64", "boolean"]) +@pytest.mark.parametrize("unique_first", [True, False]) +def test_label_binarizer_pandas_nullable(dtype, unique_first): + """Checks that LabelBinarizer works with pandas nullable dtypes. + + Non-regression test for gh-25637. + """ + pd = pytest.importorskip("pandas") + + y_true = pd.Series([1, 0, 0, 1, 0, 1, 1, 0, 1], dtype=dtype) + if unique_first: + # Calling unique creates a pandas array which has a different interface + # compared to a pandas Series. Specifically, pandas arrays do not have "iloc". + y_true = y_true.unique() + lb = LabelBinarizer().fit(y_true) + y_out = lb.transform([1, 0]) + + assert_array_equal(y_out, [[1], [0]]) + + +@ignore_warnings +def test_label_binarizer_errors(): + # Check that invalid arguments yield ValueError + one_class = np.array([0, 0, 0, 0]) + lb = LabelBinarizer().fit(one_class) + + multi_label = [(2, 3), (0,), (0, 2)] + err_msg = "You appear to be using a legacy multi-label data representation." + with pytest.raises(ValueError, match=err_msg): + lb.transform(multi_label) + + lb = LabelBinarizer() + err_msg = "This LabelBinarizer instance is not fitted yet" + with pytest.raises(ValueError, match=err_msg): + lb.transform([]) + with pytest.raises(ValueError, match=err_msg): + lb.inverse_transform([]) + + input_labels = [0, 1, 0, 1] + err_msg = "neg_label=2 must be strictly less than pos_label=1." + lb = LabelBinarizer(neg_label=2, pos_label=1) + with pytest.raises(ValueError, match=err_msg): + lb.fit(input_labels) + err_msg = "neg_label=2 must be strictly less than pos_label=2." + lb = LabelBinarizer(neg_label=2, pos_label=2) + with pytest.raises(ValueError, match=err_msg): + lb.fit(input_labels) + err_msg = ( + "Sparse binarization is only supported with non zero pos_label and zero " + "neg_label, got pos_label=2 and neg_label=1" + ) + lb = LabelBinarizer(neg_label=1, pos_label=2, sparse_output=True) + with pytest.raises(ValueError, match=err_msg): + lb.fit(input_labels) + + # Sequence of seq type should raise ValueError + y_seq_of_seqs = [[], [1, 2], [3], [0, 1, 3], [2]] + err_msg = "You appear to be using a legacy multi-label data representation" + with pytest.raises(ValueError, match=err_msg): + LabelBinarizer().fit_transform(y_seq_of_seqs) + + # Fail on the dimension of 'binary' + err_msg = "output_type='binary', but y.shape" + with pytest.raises(ValueError, match=err_msg): + _inverse_binarize_thresholding( + y=np.array([[1, 2, 3], [2, 1, 3]]), + output_type="binary", + classes=[1, 2, 3], + threshold=0, + ) + + # Fail on multioutput data + err_msg = "Multioutput target data is not supported with label binarization" + with pytest.raises(ValueError, match=err_msg): + LabelBinarizer().fit(np.array([[1, 3], [2, 1]])) + with pytest.raises(ValueError, match=err_msg): + label_binarize(np.array([[1, 3], [2, 1]]), classes=[1, 2, 3]) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_label_binarizer_sparse_errors(csr_container): + # Fail on y_type + err_msg = "foo format is not supported" + with pytest.raises(ValueError, match=err_msg): + _inverse_binarize_thresholding( + y=csr_container([[1, 2], [2, 1]]), + output_type="foo", + classes=[1, 2], + threshold=0, + ) + + # Fail on the number of classes + err_msg = "The number of class is not equal to the number of dimension of y." + with pytest.raises(ValueError, match=err_msg): + _inverse_binarize_thresholding( + y=csr_container([[1, 2], [2, 1]]), + output_type="foo", + classes=[1, 2, 3], + threshold=0, + ) + + +@pytest.mark.parametrize( + "values, classes, unknown", + [ + ( + np.array([2, 1, 3, 1, 3], dtype="int64"), + np.array([1, 2, 3], dtype="int64"), + np.array([4], dtype="int64"), + ), + ( + np.array(["b", "a", "c", "a", "c"], dtype=object), + np.array(["a", "b", "c"], dtype=object), + np.array(["d"], dtype=object), + ), + ( + np.array(["b", "a", "c", "a", "c"]), + np.array(["a", "b", "c"]), + np.array(["d"]), + ), + ], + ids=["int64", "object", "str"], +) +def test_label_encoder(values, classes, unknown): + # Test LabelEncoder's transform, fit_transform and + # inverse_transform methods + le = LabelEncoder() + le.fit(values) + assert_array_equal(le.classes_, classes) + assert_array_equal(le.transform(values), [1, 0, 2, 0, 2]) + assert_array_equal(le.inverse_transform([1, 0, 2, 0, 2]), values) + le = LabelEncoder() + ret = le.fit_transform(values) + assert_array_equal(ret, [1, 0, 2, 0, 2]) + + with pytest.raises(ValueError, match="unseen labels"): + le.transform(unknown) + + +def test_label_encoder_negative_ints(): + le = LabelEncoder() + le.fit([1, 1, 4, 5, -1, 0]) + assert_array_equal(le.classes_, [-1, 0, 1, 4, 5]) + assert_array_equal(le.transform([0, 1, 4, 4, 5, -1, -1]), [1, 2, 3, 3, 4, 0, 0]) + assert_array_equal( + le.inverse_transform([1, 2, 3, 3, 4, 0, 0]), [0, 1, 4, 4, 5, -1, -1] + ) + with pytest.raises(ValueError): + le.transform([0, 6]) + + +@pytest.mark.parametrize("dtype", ["str", "object"]) +def test_label_encoder_str_bad_shape(dtype): + le = LabelEncoder() + le.fit(np.array(["apple", "orange"], dtype=dtype)) + msg = "should be a 1d array" + with pytest.raises(ValueError, match=msg): + le.transform("apple") + + +def test_label_encoder_errors(): + # Check that invalid arguments yield ValueError + le = LabelEncoder() + with pytest.raises(ValueError): + le.transform([]) + with pytest.raises(ValueError): + le.inverse_transform([]) + + # Fail on unseen labels + le = LabelEncoder() + le.fit([1, 2, 3, -1, 1]) + msg = "contains previously unseen labels" + with pytest.raises(ValueError, match=msg): + le.inverse_transform([-2]) + with pytest.raises(ValueError, match=msg): + le.inverse_transform([-2, -3, -4]) + + # Fail on inverse_transform("") + msg = r"should be a 1d array.+shape \(\)" + with pytest.raises(ValueError, match=msg): + le.inverse_transform("") + + +@pytest.mark.parametrize( + "values", + [ + np.array([2, 1, 3, 1, 3], dtype="int64"), + np.array(["b", "a", "c", "a", "c"], dtype=object), + np.array(["b", "a", "c", "a", "c"]), + ], + ids=["int64", "object", "str"], +) +def test_label_encoder_empty_array(values): + le = LabelEncoder() + le.fit(values) + # test empty transform + transformed = le.transform([]) + assert_array_equal(np.array([]), transformed) + # test empty inverse transform + inverse_transformed = le.inverse_transform([]) + assert_array_equal(np.array([]), inverse_transformed) + + +def test_sparse_output_multilabel_binarizer(): + # test input as iterable of iterables + inputs = [ + lambda: [(2, 3), (1,), (1, 2)], + lambda: ({2, 3}, {1}, {1, 2}), + lambda: iter([iter((2, 3)), iter((1,)), {1, 2}]), + ] + indicator_mat = np.array([[0, 1, 1], [1, 0, 0], [1, 1, 0]]) + + inverse = inputs[0]() + for sparse_output in [True, False]: + for inp in inputs: + # With fit_transform + mlb = MultiLabelBinarizer(sparse_output=sparse_output) + got = mlb.fit_transform(inp()) + assert issparse(got) == sparse_output + if sparse_output: + # verify CSR assumption that indices and indptr have same dtype + assert got.indices.dtype == got.indptr.dtype + got = got.toarray() + assert_array_equal(indicator_mat, got) + assert_array_equal([1, 2, 3], mlb.classes_) + assert mlb.inverse_transform(got) == inverse + + # With fit + mlb = MultiLabelBinarizer(sparse_output=sparse_output) + got = mlb.fit(inp()).transform(inp()) + assert issparse(got) == sparse_output + if sparse_output: + # verify CSR assumption that indices and indptr have same dtype + assert got.indices.dtype == got.indptr.dtype + got = got.toarray() + assert_array_equal(indicator_mat, got) + assert_array_equal([1, 2, 3], mlb.classes_) + assert mlb.inverse_transform(got) == inverse + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_sparse_output_multilabel_binarizer_errors(csr_container): + inp = iter([iter((2, 3)), iter((1,)), {1, 2}]) + mlb = MultiLabelBinarizer(sparse_output=False) + mlb.fit(inp) + with pytest.raises(ValueError): + mlb.inverse_transform( + csr_container(np.array([[0, 1, 1], [2, 0, 0], [1, 1, 0]])) + ) + + +def test_multilabel_binarizer(): + # test input as iterable of iterables + inputs = [ + lambda: [(2, 3), (1,), (1, 2)], + lambda: ({2, 3}, {1}, {1, 2}), + lambda: iter([iter((2, 3)), iter((1,)), {1, 2}]), + ] + indicator_mat = np.array([[0, 1, 1], [1, 0, 0], [1, 1, 0]]) + inverse = inputs[0]() + for inp in inputs: + # With fit_transform + mlb = MultiLabelBinarizer() + got = mlb.fit_transform(inp()) + assert_array_equal(indicator_mat, got) + assert_array_equal([1, 2, 3], mlb.classes_) + assert mlb.inverse_transform(got) == inverse + + # With fit + mlb = MultiLabelBinarizer() + got = mlb.fit(inp()).transform(inp()) + assert_array_equal(indicator_mat, got) + assert_array_equal([1, 2, 3], mlb.classes_) + assert mlb.inverse_transform(got) == inverse + + +def test_multilabel_binarizer_empty_sample(): + mlb = MultiLabelBinarizer() + y = [[1, 2], [1], []] + Y = np.array([[1, 1], [1, 0], [0, 0]]) + assert_array_equal(mlb.fit_transform(y), Y) + + +def test_multilabel_binarizer_unknown_class(): + mlb = MultiLabelBinarizer() + y = [[1, 2]] + Y = np.array([[1, 0], [0, 1]]) + warning_message = "unknown class.* will be ignored" + with pytest.warns(UserWarning, match=warning_message): + matrix = mlb.fit(y).transform([[4, 1], [2, 0]]) + + Y = np.array([[1, 0, 0], [0, 1, 0]]) + mlb = MultiLabelBinarizer(classes=[1, 2, 3]) + with pytest.warns(UserWarning, match=warning_message): + matrix = mlb.fit(y).transform([[4, 1], [2, 0]]) + assert_array_equal(matrix, Y) + + +def test_multilabel_binarizer_given_classes(): + inp = [(2, 3), (1,), (1, 2)] + indicator_mat = np.array([[0, 1, 1], [1, 0, 0], [1, 0, 1]]) + # fit_transform() + mlb = MultiLabelBinarizer(classes=[1, 3, 2]) + assert_array_equal(mlb.fit_transform(inp), indicator_mat) + assert_array_equal(mlb.classes_, [1, 3, 2]) + + # fit().transform() + mlb = MultiLabelBinarizer(classes=[1, 3, 2]) + assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat) + assert_array_equal(mlb.classes_, [1, 3, 2]) + + # ensure works with extra class + mlb = MultiLabelBinarizer(classes=[4, 1, 3, 2]) + assert_array_equal( + mlb.fit_transform(inp), np.hstack(([[0], [0], [0]], indicator_mat)) + ) + assert_array_equal(mlb.classes_, [4, 1, 3, 2]) + + # ensure fit is no-op as iterable is not consumed + inp = iter(inp) + mlb = MultiLabelBinarizer(classes=[1, 3, 2]) + assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat) + + # ensure a ValueError is thrown if given duplicate classes + err_msg = ( + "The classes argument contains duplicate classes. Remove " + "these duplicates before passing them to MultiLabelBinarizer." + ) + mlb = MultiLabelBinarizer(classes=[1, 3, 2, 3]) + with pytest.raises(ValueError, match=err_msg): + mlb.fit(inp) + + +def test_multilabel_binarizer_multiple_calls(): + inp = [(2, 3), (1,), (1, 2)] + indicator_mat = np.array([[0, 1, 1], [1, 0, 0], [1, 0, 1]]) + + indicator_mat2 = np.array([[0, 1, 1], [1, 0, 0], [1, 1, 0]]) + + # first call + mlb = MultiLabelBinarizer(classes=[1, 3, 2]) + assert_array_equal(mlb.fit_transform(inp), indicator_mat) + # second call change class + mlb.classes = [1, 2, 3] + assert_array_equal(mlb.fit_transform(inp), indicator_mat2) + + +def test_multilabel_binarizer_same_length_sequence(): + # Ensure sequences of the same length are not interpreted as a 2-d array + inp = [[1], [0], [2]] + indicator_mat = np.array([[0, 1, 0], [1, 0, 0], [0, 0, 1]]) + # fit_transform() + mlb = MultiLabelBinarizer() + assert_array_equal(mlb.fit_transform(inp), indicator_mat) + assert_array_equal(mlb.inverse_transform(indicator_mat), inp) + + # fit().transform() + mlb = MultiLabelBinarizer() + assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat) + assert_array_equal(mlb.inverse_transform(indicator_mat), inp) + + +def test_multilabel_binarizer_non_integer_labels(): + tuple_classes = _to_object_array([(1,), (2,), (3,)]) + inputs = [ + ([("2", "3"), ("1",), ("1", "2")], ["1", "2", "3"]), + ([("b", "c"), ("a",), ("a", "b")], ["a", "b", "c"]), + ([((2,), (3,)), ((1,),), ((1,), (2,))], tuple_classes), + ] + indicator_mat = np.array([[0, 1, 1], [1, 0, 0], [1, 1, 0]]) + for inp, classes in inputs: + # fit_transform() + mlb = MultiLabelBinarizer() + inp = np.array(inp, dtype=object) + assert_array_equal(mlb.fit_transform(inp), indicator_mat) + assert_array_equal(mlb.classes_, classes) + indicator_mat_inv = np.array(mlb.inverse_transform(indicator_mat), dtype=object) + assert_array_equal(indicator_mat_inv, inp) + + # fit().transform() + mlb = MultiLabelBinarizer() + assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat) + assert_array_equal(mlb.classes_, classes) + indicator_mat_inv = np.array(mlb.inverse_transform(indicator_mat), dtype=object) + assert_array_equal(indicator_mat_inv, inp) + + mlb = MultiLabelBinarizer() + with pytest.raises(TypeError): + mlb.fit_transform([({}), ({}, {"a": "b"})]) + + +def test_multilabel_binarizer_non_unique(): + inp = [(1, 1, 1, 0)] + indicator_mat = np.array([[1, 1]]) + mlb = MultiLabelBinarizer() + assert_array_equal(mlb.fit_transform(inp), indicator_mat) + + +def test_multilabel_binarizer_inverse_validation(): + inp = [(1, 1, 1, 0)] + mlb = MultiLabelBinarizer() + mlb.fit_transform(inp) + # Not binary + with pytest.raises(ValueError): + mlb.inverse_transform(np.array([[1, 3]])) + # The following binary cases are fine, however + mlb.inverse_transform(np.array([[0, 0]])) + mlb.inverse_transform(np.array([[1, 1]])) + mlb.inverse_transform(np.array([[1, 0]])) + + # Wrong shape + with pytest.raises(ValueError): + mlb.inverse_transform(np.array([[1]])) + with pytest.raises(ValueError): + mlb.inverse_transform(np.array([[1, 1, 1]])) + + +def test_label_binarize_with_class_order(): + out = label_binarize([1, 6], classes=[1, 2, 4, 6]) + expected = np.array([[1, 0, 0, 0], [0, 0, 0, 1]]) + assert_array_equal(out, expected) + + # Modified class order + out = label_binarize([1, 6], classes=[1, 6, 4, 2]) + expected = np.array([[1, 0, 0, 0], [0, 1, 0, 0]]) + assert_array_equal(out, expected) + + out = label_binarize([0, 1, 2, 3], classes=[3, 2, 0, 1]) + expected = np.array([[0, 0, 1, 0], [0, 0, 0, 1], [0, 1, 0, 0], [1, 0, 0, 0]]) + assert_array_equal(out, expected) + + +def check_binarized_results(y, classes, pos_label, neg_label, expected): + for sparse_output in [True, False]: + if (pos_label == 0 or neg_label != 0) and sparse_output: + with pytest.raises(ValueError): + label_binarize( + y, + classes=classes, + neg_label=neg_label, + pos_label=pos_label, + sparse_output=sparse_output, + ) + continue + + # check label_binarize + binarized = label_binarize( + y, + classes=classes, + neg_label=neg_label, + pos_label=pos_label, + sparse_output=sparse_output, + ) + assert_array_equal(toarray(binarized), expected) + assert issparse(binarized) == sparse_output + + # check inverse + y_type = type_of_target(y) + if y_type == "multiclass": + inversed = _inverse_binarize_multiclass(binarized, classes=classes) + + else: + inversed = _inverse_binarize_thresholding( + binarized, + output_type=y_type, + classes=classes, + threshold=((neg_label + pos_label) / 2.0), + ) + + assert_array_equal(toarray(inversed), toarray(y)) + + # Check label binarizer + lb = LabelBinarizer( + neg_label=neg_label, pos_label=pos_label, sparse_output=sparse_output + ) + binarized = lb.fit_transform(y) + assert_array_equal(toarray(binarized), expected) + assert issparse(binarized) == sparse_output + inverse_output = lb.inverse_transform(binarized) + assert_array_equal(toarray(inverse_output), toarray(y)) + assert issparse(inverse_output) == issparse(y) + + +def test_label_binarize_binary(): + y = [0, 1, 0] + classes = [0, 1] + pos_label = 2 + neg_label = -1 + expected = np.array([[2, -1], [-1, 2], [2, -1]])[:, 1].reshape((-1, 1)) + + check_binarized_results(y, classes, pos_label, neg_label, expected) + + # Binary case where sparse_output = True will not result in a ValueError + y = [0, 1, 0] + classes = [0, 1] + pos_label = 3 + neg_label = 0 + expected = np.array([[3, 0], [0, 3], [3, 0]])[:, 1].reshape((-1, 1)) + + check_binarized_results(y, classes, pos_label, neg_label, expected) + + +def test_label_binarize_multiclass(): + y = [0, 1, 2] + classes = [0, 1, 2] + pos_label = 2 + neg_label = 0 + expected = 2 * np.eye(3) + + check_binarized_results(y, classes, pos_label, neg_label, expected) + + with pytest.raises(ValueError): + label_binarize( + y, classes=classes, neg_label=-1, pos_label=pos_label, sparse_output=True + ) + + +@pytest.mark.parametrize( + "arr_type", + [np.array] + + COO_CONTAINERS + + CSC_CONTAINERS + + CSR_CONTAINERS + + DOK_CONTAINERS + + LIL_CONTAINERS, +) +def test_label_binarize_multilabel(arr_type): + y_ind = np.array([[0, 1, 0], [1, 1, 1], [0, 0, 0]]) + classes = [0, 1, 2] + pos_label = 2 + neg_label = 0 + expected = pos_label * y_ind + y = arr_type(y_ind) + + check_binarized_results(y, classes, pos_label, neg_label, expected) + + with pytest.raises(ValueError): + label_binarize( + y, classes=classes, neg_label=-1, pos_label=pos_label, sparse_output=True + ) + + +def test_invalid_input_label_binarize(): + with pytest.raises(ValueError): + label_binarize([0, 2], classes=[0, 2], pos_label=0, neg_label=1) + with pytest.raises(ValueError, match="continuous target data is not "): + label_binarize([1.2, 2.7], classes=[0, 1]) + with pytest.raises(ValueError, match="mismatch with the labels"): + label_binarize([[1, 3]], classes=[1, 2, 3]) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_inverse_binarize_multiclass(csr_container): + got = _inverse_binarize_multiclass( + csr_container([[0, 1, 0], [-1, 0, -1], [0, 0, 0]]), np.arange(3) + ) + assert_array_equal(got, np.array([1, 1, 0])) + + +def test_nan_label_encoder(): + """Check that label encoder encodes nans in transform. + + Non-regression test for #22628. + """ + le = LabelEncoder() + le.fit(["a", "a", "b", np.nan]) + + y_trans = le.transform([np.nan]) + assert_array_equal(y_trans, [2]) + + +@pytest.mark.parametrize( + "encoder", [LabelEncoder(), LabelBinarizer(), MultiLabelBinarizer()] +) +def test_label_encoders_do_not_have_set_output(encoder): + """Check that label encoders do not define set_output and work with y as a kwarg. + + Non-regression test for #26854. + """ + assert not hasattr(encoder, "set_output") + y_encoded_with_kwarg = encoder.fit_transform(y=["a", "b", "c"]) + y_encoded_positional = encoder.fit_transform(["a", "b", "c"]) + assert_array_equal(y_encoded_with_kwarg, y_encoded_positional)