diff --git a/ckpts/universal/global_step40/zero/10.mlp.dense_4h_to_h.weight/exp_avg.pt b/ckpts/universal/global_step40/zero/10.mlp.dense_4h_to_h.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..cd004b57e8350cf9acef436f4ada8ae5c8a52f60 --- /dev/null +++ b/ckpts/universal/global_step40/zero/10.mlp.dense_4h_to_h.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b9689eb1c74eddf479143f88377890aaf72609025da1d42e38e1c3341dfdd31b +size 33555612 diff --git a/ckpts/universal/global_step40/zero/10.mlp.dense_4h_to_h.weight/exp_avg_sq.pt b/ckpts/universal/global_step40/zero/10.mlp.dense_4h_to_h.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..79ccf3e69fb5bc298a35bbc5d3d5c999e8930163 --- /dev/null +++ b/ckpts/universal/global_step40/zero/10.mlp.dense_4h_to_h.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fa4244869979212bc62f6130b03a693e182770b2cb2233a3e9b027d769daaefc +size 33555627 diff --git a/ckpts/universal/global_step40/zero/17.post_attention_layernorm.weight/exp_avg_sq.pt b/ckpts/universal/global_step40/zero/17.post_attention_layernorm.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..9457e38d5acd778cabf70a721d295b47db3aca47 --- /dev/null +++ b/ckpts/universal/global_step40/zero/17.post_attention_layernorm.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:445d543dfa522ec41120f21a75f0773cdb130f313f3a7855b4c98eb1d64e3c7a +size 9387 diff --git a/ckpts/universal/global_step40/zero/18.mlp.dense_4h_to_h.weight/exp_avg.pt b/ckpts/universal/global_step40/zero/18.mlp.dense_4h_to_h.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..55b7cee9d659b8b34f4cbf66e4eca03fc1bf1a44 --- /dev/null +++ b/ckpts/universal/global_step40/zero/18.mlp.dense_4h_to_h.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:df24ce7c264f4ab09f898921658c8a326f43eda7fa3d449e3254fd6907973604 +size 33555612 diff --git a/ckpts/universal/global_step40/zero/21.attention.query_key_value.weight/fp32.pt b/ckpts/universal/global_step40/zero/21.attention.query_key_value.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..35a6aea20e386860aceedb0a0e071794e99136a5 --- /dev/null +++ b/ckpts/universal/global_step40/zero/21.attention.query_key_value.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8ac18b29045cef3697c1b6e55206eb56999cea21767bfd0526373ab86f73271a +size 50332749 diff --git a/ckpts/universal/global_step40/zero/25.attention.dense.weight/exp_avg_sq.pt b/ckpts/universal/global_step40/zero/25.attention.dense.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..f33ffc4d1a9299362553fc085f47974803c0ebe9 --- /dev/null +++ b/ckpts/universal/global_step40/zero/25.attention.dense.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4dc1337163ce023c2f71e491e66aa73a14dfbd46435f6ae0f03a8ec5c0e9b7d0 +size 16778411 diff --git a/ckpts/universal/global_step40/zero/25.attention.dense.weight/fp32.pt b/ckpts/universal/global_step40/zero/25.attention.dense.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..2433d4c048d9a3aac3edc177eca9ff7febf7cb29 --- /dev/null +++ b/ckpts/universal/global_step40/zero/25.attention.dense.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d0a311c07cdcd005b77c2a71a818f6d585a7779a10cb045bbe91e6f62c5cd6cb +size 16778317 diff --git a/ckpts/universal/global_step40/zero/26.mlp.dense_4h_to_h.weight/exp_avg.pt b/ckpts/universal/global_step40/zero/26.mlp.dense_4h_to_h.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..081d92f6ce0f2295c47fde068352db3bc75adcfe --- /dev/null +++ b/ckpts/universal/global_step40/zero/26.mlp.dense_4h_to_h.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0e014baa072b1dc520dca57e239d50b32356e6245697cbba503b3911d071c5bf +size 33555612 diff --git a/ckpts/universal/global_step40/zero/26.mlp.dense_4h_to_h.weight/exp_avg_sq.pt b/ckpts/universal/global_step40/zero/26.mlp.dense_4h_to_h.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..d4518ca47fc4ec5dbd350652976c3ab1b817d3fb --- /dev/null +++ b/ckpts/universal/global_step40/zero/26.mlp.dense_4h_to_h.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:72bf3dee9375a038147a1b688ed1b6779744207217d8e76a0e09a8f3028212db +size 33555627 diff --git a/ckpts/universal/global_step40/zero/26.mlp.dense_4h_to_h.weight/fp32.pt b/ckpts/universal/global_step40/zero/26.mlp.dense_4h_to_h.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..06a7deb51d07434cecce1bf360a1717a9ce5d360 --- /dev/null +++ b/ckpts/universal/global_step40/zero/26.mlp.dense_4h_to_h.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:34e9e7ca6e93becfe7bd651e1a2265dbc9d6110bb1375aae72f8d1c0f73677b9 +size 33555533 diff --git a/venv/lib/python3.10/site-packages/sklearn/neighbors/_base.py b/venv/lib/python3.10/site-packages/sklearn/neighbors/_base.py new file mode 100644 index 0000000000000000000000000000000000000000..6df0f2030877e3b548decb2dc8cd5a4ef6ef31c7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/neighbors/_base.py @@ -0,0 +1,1387 @@ +"""Base and mixin classes for nearest neighbors.""" +# Authors: Jake Vanderplas +# Fabian Pedregosa +# Alexandre Gramfort +# Sparseness support by Lars Buitinck +# Multi-output support by Arnaud Joly +# +# License: BSD 3 clause (C) INRIA, University of Amsterdam +import itertools +import numbers +import warnings +from abc import ABCMeta, abstractmethod +from functools import partial +from numbers import Integral, Real + +import numpy as np +from joblib import effective_n_jobs +from scipy.sparse import csr_matrix, issparse + +from ..base import BaseEstimator, MultiOutputMixin, is_classifier +from ..exceptions import DataConversionWarning, EfficiencyWarning +from ..metrics import DistanceMetric, pairwise_distances_chunked +from ..metrics._pairwise_distances_reduction import ( + ArgKmin, + RadiusNeighbors, +) +from ..metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS +from ..utils import ( + _to_object_array, + check_array, + gen_even_slices, +) +from ..utils._param_validation import Interval, StrOptions, validate_params +from ..utils.fixes import parse_version, sp_base_version +from ..utils.multiclass import check_classification_targets +from ..utils.parallel import Parallel, delayed +from ..utils.validation import check_is_fitted, check_non_negative +from ._ball_tree import BallTree +from ._kd_tree import KDTree + +SCIPY_METRICS = [ + "braycurtis", + "canberra", + "chebyshev", + "correlation", + "cosine", + "dice", + "hamming", + "jaccard", + "mahalanobis", + "minkowski", + "rogerstanimoto", + "russellrao", + "seuclidean", + "sokalmichener", + "sokalsneath", + "sqeuclidean", + "yule", +] +if sp_base_version < parse_version("1.11"): + # Deprecated in SciPy 1.9 and removed in SciPy 1.11 + SCIPY_METRICS += ["kulsinski"] +if sp_base_version < parse_version("1.9"): + # Deprecated in SciPy 1.0 and removed in SciPy 1.9 + SCIPY_METRICS += ["matching"] + +VALID_METRICS = dict( + ball_tree=BallTree.valid_metrics, + kd_tree=KDTree.valid_metrics, + # The following list comes from the + # sklearn.metrics.pairwise doc string + brute=sorted(set(PAIRWISE_DISTANCE_FUNCTIONS).union(SCIPY_METRICS)), +) + +VALID_METRICS_SPARSE = dict( + ball_tree=[], + kd_tree=[], + brute=(PAIRWISE_DISTANCE_FUNCTIONS.keys() - {"haversine", "nan_euclidean"}), +) + + +def _get_weights(dist, weights): + """Get the weights from an array of distances and a parameter ``weights``. + + Assume weights have already been validated. + + Parameters + ---------- + dist : ndarray + The input distances. + + weights : {'uniform', 'distance'}, callable or None + The kind of weighting used. + + Returns + ------- + weights_arr : array of the same shape as ``dist`` + If ``weights == 'uniform'``, then returns None. + """ + if weights in (None, "uniform"): + return None + + if weights == "distance": + # if user attempts to classify a point that was zero distance from one + # or more training points, those training points are weighted as 1.0 + # and the other points as 0.0 + if dist.dtype is np.dtype(object): + for point_dist_i, point_dist in enumerate(dist): + # check if point_dist is iterable + # (ex: RadiusNeighborClassifier.predict may set an element of + # dist to 1e-6 to represent an 'outlier') + if hasattr(point_dist, "__contains__") and 0.0 in point_dist: + dist[point_dist_i] = point_dist == 0.0 + else: + dist[point_dist_i] = 1.0 / point_dist + else: + with np.errstate(divide="ignore"): + dist = 1.0 / dist + inf_mask = np.isinf(dist) + inf_row = np.any(inf_mask, axis=1) + dist[inf_row] = inf_mask[inf_row] + return dist + + if callable(weights): + return weights(dist) + + +def _is_sorted_by_data(graph): + """Return whether the graph's non-zero entries are sorted by data. + + The non-zero entries are stored in graph.data and graph.indices. + For each row (or sample), the non-zero entries can be either: + - sorted by indices, as after graph.sort_indices(); + - sorted by data, as after _check_precomputed(graph); + - not sorted. + + Parameters + ---------- + graph : sparse matrix of shape (n_samples, n_samples) + Neighbors graph as given by `kneighbors_graph` or + `radius_neighbors_graph`. Matrix should be of format CSR format. + + Returns + ------- + res : bool + Whether input graph is sorted by data. + """ + assert graph.format == "csr" + out_of_order = graph.data[:-1] > graph.data[1:] + line_change = np.unique(graph.indptr[1:-1] - 1) + line_change = line_change[line_change < out_of_order.shape[0]] + return out_of_order.sum() == out_of_order[line_change].sum() + + +def _check_precomputed(X): + """Check precomputed distance matrix. + + If the precomputed distance matrix is sparse, it checks that the non-zero + entries are sorted by distances. If not, the matrix is copied and sorted. + + Parameters + ---------- + X : {sparse matrix, array-like}, (n_samples, n_samples) + Distance matrix to other samples. X may be a sparse matrix, in which + case only non-zero elements may be considered neighbors. + + Returns + ------- + X : {sparse matrix, array-like}, (n_samples, n_samples) + Distance matrix to other samples. X may be a sparse matrix, in which + case only non-zero elements may be considered neighbors. + """ + if not issparse(X): + X = check_array(X) + check_non_negative(X, whom="precomputed distance matrix.") + return X + else: + graph = X + + if graph.format not in ("csr", "csc", "coo", "lil"): + raise TypeError( + "Sparse matrix in {!r} format is not supported due to " + "its handling of explicit zeros".format(graph.format) + ) + copied = graph.format != "csr" + graph = check_array(graph, accept_sparse="csr") + check_non_negative(graph, whom="precomputed distance matrix.") + graph = sort_graph_by_row_values(graph, copy=not copied, warn_when_not_sorted=True) + + return graph + + +@validate_params( + { + "graph": ["sparse matrix"], + "copy": ["boolean"], + "warn_when_not_sorted": ["boolean"], + }, + prefer_skip_nested_validation=True, +) +def sort_graph_by_row_values(graph, copy=False, warn_when_not_sorted=True): + """Sort a sparse graph such that each row is stored with increasing values. + + .. versionadded:: 1.2 + + Parameters + ---------- + graph : sparse matrix of shape (n_samples, n_samples) + Distance matrix to other samples, where only non-zero elements are + considered neighbors. Matrix is converted to CSR format if not already. + + copy : bool, default=False + If True, the graph is copied before sorting. If False, the sorting is + performed inplace. If the graph is not of CSR format, `copy` must be + True to allow the conversion to CSR format, otherwise an error is + raised. + + warn_when_not_sorted : bool, default=True + If True, a :class:`~sklearn.exceptions.EfficiencyWarning` is raised + when the input graph is not sorted by row values. + + Returns + ------- + graph : sparse matrix of shape (n_samples, n_samples) + Distance matrix to other samples, where only non-zero elements are + considered neighbors. Matrix is in CSR format. + + Examples + -------- + >>> from scipy.sparse import csr_matrix + >>> from sklearn.neighbors import sort_graph_by_row_values + >>> X = csr_matrix( + ... [[0., 3., 1.], + ... [3., 0., 2.], + ... [1., 2., 0.]]) + >>> X.data + array([3., 1., 3., 2., 1., 2.]) + >>> X_ = sort_graph_by_row_values(X) + >>> X_.data + array([1., 3., 2., 3., 1., 2.]) + """ + if graph.format == "csr" and _is_sorted_by_data(graph): + return graph + + if warn_when_not_sorted: + warnings.warn( + ( + "Precomputed sparse input was not sorted by row values. Use the" + " function sklearn.neighbors.sort_graph_by_row_values to sort the input" + " by row values, with warn_when_not_sorted=False to remove this" + " warning." + ), + EfficiencyWarning, + ) + + if graph.format not in ("csr", "csc", "coo", "lil"): + raise TypeError( + f"Sparse matrix in {graph.format!r} format is not supported due to " + "its handling of explicit zeros" + ) + elif graph.format != "csr": + if not copy: + raise ValueError( + "The input graph is not in CSR format. Use copy=True to allow " + "the conversion to CSR format." + ) + graph = graph.asformat("csr") + elif copy: # csr format with copy=True + graph = graph.copy() + + row_nnz = np.diff(graph.indptr) + if row_nnz.max() == row_nnz.min(): + # if each sample has the same number of provided neighbors + n_samples = graph.shape[0] + distances = graph.data.reshape(n_samples, -1) + + order = np.argsort(distances, kind="mergesort") + order += np.arange(n_samples)[:, None] * row_nnz[0] + order = order.ravel() + graph.data = graph.data[order] + graph.indices = graph.indices[order] + + else: + for start, stop in zip(graph.indptr, graph.indptr[1:]): + order = np.argsort(graph.data[start:stop], kind="mergesort") + graph.data[start:stop] = graph.data[start:stop][order] + graph.indices[start:stop] = graph.indices[start:stop][order] + + return graph + + +def _kneighbors_from_graph(graph, n_neighbors, return_distance): + """Decompose a nearest neighbors sparse graph into distances and indices. + + Parameters + ---------- + graph : sparse matrix of shape (n_samples, n_samples) + Neighbors graph as given by `kneighbors_graph` or + `radius_neighbors_graph`. Matrix should be of format CSR format. + + n_neighbors : int + Number of neighbors required for each sample. + + return_distance : bool + Whether or not to return the distances. + + Returns + ------- + neigh_dist : ndarray of shape (n_samples, n_neighbors) + Distances to nearest neighbors. Only present if `return_distance=True`. + + neigh_ind : ndarray of shape (n_samples, n_neighbors) + Indices of nearest neighbors. + """ + n_samples = graph.shape[0] + assert graph.format == "csr" + + # number of neighbors by samples + row_nnz = np.diff(graph.indptr) + row_nnz_min = row_nnz.min() + if n_neighbors is not None and row_nnz_min < n_neighbors: + raise ValueError( + "%d neighbors per samples are required, but some samples have only" + " %d neighbors in precomputed graph matrix. Decrease number of " + "neighbors used or recompute the graph with more neighbors." + % (n_neighbors, row_nnz_min) + ) + + def extract(a): + # if each sample has the same number of provided neighbors + if row_nnz.max() == row_nnz_min: + return a.reshape(n_samples, -1)[:, :n_neighbors] + else: + idx = np.tile(np.arange(n_neighbors), (n_samples, 1)) + idx += graph.indptr[:-1, None] + return a.take(idx, mode="clip").reshape(n_samples, n_neighbors) + + if return_distance: + return extract(graph.data), extract(graph.indices) + else: + return extract(graph.indices) + + +def _radius_neighbors_from_graph(graph, radius, return_distance): + """Decompose a nearest neighbors sparse graph into distances and indices. + + Parameters + ---------- + graph : sparse matrix of shape (n_samples, n_samples) + Neighbors graph as given by `kneighbors_graph` or + `radius_neighbors_graph`. Matrix should be of format CSR format. + + radius : float + Radius of neighborhoods which should be strictly positive. + + return_distance : bool + Whether or not to return the distances. + + Returns + ------- + neigh_dist : ndarray of shape (n_samples,) of arrays + Distances to nearest neighbors. Only present if `return_distance=True`. + + neigh_ind : ndarray of shape (n_samples,) of arrays + Indices of nearest neighbors. + """ + assert graph.format == "csr" + + no_filter_needed = bool(graph.data.max() <= radius) + + if no_filter_needed: + data, indices, indptr = graph.data, graph.indices, graph.indptr + else: + mask = graph.data <= radius + if return_distance: + data = np.compress(mask, graph.data) + indices = np.compress(mask, graph.indices) + indptr = np.concatenate(([0], np.cumsum(mask)))[graph.indptr] + + indices = indices.astype(np.intp, copy=no_filter_needed) + + if return_distance: + neigh_dist = _to_object_array(np.split(data, indptr[1:-1])) + neigh_ind = _to_object_array(np.split(indices, indptr[1:-1])) + + if return_distance: + return neigh_dist, neigh_ind + else: + return neigh_ind + + +class NeighborsBase(MultiOutputMixin, BaseEstimator, metaclass=ABCMeta): + """Base class for nearest neighbors estimators.""" + + _parameter_constraints: dict = { + "n_neighbors": [Interval(Integral, 1, None, closed="left"), None], + "radius": [Interval(Real, 0, None, closed="both"), None], + "algorithm": [StrOptions({"auto", "ball_tree", "kd_tree", "brute"})], + "leaf_size": [Interval(Integral, 1, None, closed="left")], + "p": [Interval(Real, 0, None, closed="right"), None], + "metric": [StrOptions(set(itertools.chain(*VALID_METRICS.values()))), callable], + "metric_params": [dict, None], + "n_jobs": [Integral, None], + } + + @abstractmethod + def __init__( + self, + n_neighbors=None, + radius=None, + algorithm="auto", + leaf_size=30, + metric="minkowski", + p=2, + metric_params=None, + n_jobs=None, + ): + self.n_neighbors = n_neighbors + self.radius = radius + self.algorithm = algorithm + self.leaf_size = leaf_size + self.metric = metric + self.metric_params = metric_params + self.p = p + self.n_jobs = n_jobs + + def _check_algorithm_metric(self): + if self.algorithm == "auto": + if self.metric == "precomputed": + alg_check = "brute" + elif ( + callable(self.metric) + or self.metric in VALID_METRICS["ball_tree"] + or isinstance(self.metric, DistanceMetric) + ): + alg_check = "ball_tree" + else: + alg_check = "brute" + else: + alg_check = self.algorithm + + if callable(self.metric): + if self.algorithm == "kd_tree": + # callable metric is only valid for brute force and ball_tree + raise ValueError( + "kd_tree does not support callable metric '%s'" + "Function call overhead will result" + "in very poor performance." + % self.metric + ) + elif self.metric not in VALID_METRICS[alg_check] and not isinstance( + self.metric, DistanceMetric + ): + raise ValueError( + "Metric '%s' not valid. Use " + "sorted(sklearn.neighbors.VALID_METRICS['%s']) " + "to get valid options. " + "Metric can also be a callable function." % (self.metric, alg_check) + ) + + if self.metric_params is not None and "p" in self.metric_params: + if self.p is not None: + warnings.warn( + ( + "Parameter p is found in metric_params. " + "The corresponding parameter from __init__ " + "is ignored." + ), + SyntaxWarning, + stacklevel=3, + ) + + def _fit(self, X, y=None): + if self._get_tags()["requires_y"]: + if not isinstance(X, (KDTree, BallTree, NeighborsBase)): + X, y = self._validate_data( + X, y, accept_sparse="csr", multi_output=True, order="C" + ) + + if is_classifier(self): + # Classification targets require a specific format + if y.ndim == 1 or y.ndim == 2 and y.shape[1] == 1: + if y.ndim != 1: + warnings.warn( + ( + "A column-vector y was passed when a " + "1d array was expected. Please change " + "the shape of y to (n_samples,), for " + "example using ravel()." + ), + DataConversionWarning, + stacklevel=2, + ) + + self.outputs_2d_ = False + y = y.reshape((-1, 1)) + else: + self.outputs_2d_ = True + + check_classification_targets(y) + self.classes_ = [] + # Using `dtype=np.intp` is necessary since `np.bincount` + # (called in _classification.py) fails when dealing + # with a float64 array on 32bit systems. + self._y = np.empty(y.shape, dtype=np.intp) + for k in range(self._y.shape[1]): + classes, self._y[:, k] = np.unique(y[:, k], return_inverse=True) + self.classes_.append(classes) + + if not self.outputs_2d_: + self.classes_ = self.classes_[0] + self._y = self._y.ravel() + else: + self._y = y + + else: + if not isinstance(X, (KDTree, BallTree, NeighborsBase)): + X = self._validate_data(X, accept_sparse="csr", order="C") + + self._check_algorithm_metric() + if self.metric_params is None: + self.effective_metric_params_ = {} + else: + self.effective_metric_params_ = self.metric_params.copy() + + effective_p = self.effective_metric_params_.get("p", self.p) + if self.metric == "minkowski": + self.effective_metric_params_["p"] = effective_p + + self.effective_metric_ = self.metric + # For minkowski distance, use more efficient methods where available + if self.metric == "minkowski": + p = self.effective_metric_params_.pop("p", 2) + w = self.effective_metric_params_.pop("w", None) + + if p == 1 and w is None: + self.effective_metric_ = "manhattan" + elif p == 2 and w is None: + self.effective_metric_ = "euclidean" + elif p == np.inf and w is None: + self.effective_metric_ = "chebyshev" + else: + # Use the generic minkowski metric, possibly weighted. + self.effective_metric_params_["p"] = p + self.effective_metric_params_["w"] = w + + if isinstance(X, NeighborsBase): + self._fit_X = X._fit_X + self._tree = X._tree + self._fit_method = X._fit_method + self.n_samples_fit_ = X.n_samples_fit_ + return self + + elif isinstance(X, BallTree): + self._fit_X = X.data + self._tree = X + self._fit_method = "ball_tree" + self.n_samples_fit_ = X.data.shape[0] + return self + + elif isinstance(X, KDTree): + self._fit_X = X.data + self._tree = X + self._fit_method = "kd_tree" + self.n_samples_fit_ = X.data.shape[0] + return self + + if self.metric == "precomputed": + X = _check_precomputed(X) + # Precomputed matrix X must be squared + if X.shape[0] != X.shape[1]: + raise ValueError( + "Precomputed matrix must be square." + " Input is a {}x{} matrix.".format(X.shape[0], X.shape[1]) + ) + self.n_features_in_ = X.shape[1] + + n_samples = X.shape[0] + if n_samples == 0: + raise ValueError("n_samples must be greater than 0") + + if issparse(X): + if self.algorithm not in ("auto", "brute"): + warnings.warn("cannot use tree with sparse input: using brute force") + + if ( + self.effective_metric_ not in VALID_METRICS_SPARSE["brute"] + and not callable(self.effective_metric_) + and not isinstance(self.effective_metric_, DistanceMetric) + ): + raise ValueError( + "Metric '%s' not valid for sparse input. " + "Use sorted(sklearn.neighbors." + "VALID_METRICS_SPARSE['brute']) " + "to get valid options. " + "Metric can also be a callable function." % (self.effective_metric_) + ) + self._fit_X = X.copy() + self._tree = None + self._fit_method = "brute" + self.n_samples_fit_ = X.shape[0] + return self + + self._fit_method = self.algorithm + self._fit_X = X + self.n_samples_fit_ = X.shape[0] + + if self._fit_method == "auto": + # A tree approach is better for small number of neighbors or small + # number of features, with KDTree generally faster when available + if ( + self.metric == "precomputed" + or self._fit_X.shape[1] > 15 + or ( + self.n_neighbors is not None + and self.n_neighbors >= self._fit_X.shape[0] // 2 + ) + ): + self._fit_method = "brute" + else: + if ( + self.effective_metric_ == "minkowski" + and self.effective_metric_params_["p"] < 1 + ): + self._fit_method = "brute" + elif ( + self.effective_metric_ == "minkowski" + and self.effective_metric_params_.get("w") is not None + ): + # 'minkowski' with weights is not supported by KDTree but is + # supported byBallTree. + self._fit_method = "ball_tree" + elif self.effective_metric_ in VALID_METRICS["kd_tree"]: + self._fit_method = "kd_tree" + elif ( + callable(self.effective_metric_) + or self.effective_metric_ in VALID_METRICS["ball_tree"] + ): + self._fit_method = "ball_tree" + else: + self._fit_method = "brute" + + if ( + self.effective_metric_ == "minkowski" + and self.effective_metric_params_["p"] < 1 + ): + # For 0 < p < 1 Minkowski distances aren't valid distance + # metric as they do not satisfy triangular inequality: + # they are semi-metrics. + # algorithm="kd_tree" and algorithm="ball_tree" can't be used because + # KDTree and BallTree require a proper distance metric to work properly. + # However, the brute-force algorithm supports semi-metrics. + if self._fit_method == "brute": + warnings.warn( + "Mind that for 0 < p < 1, Minkowski metrics are not distance" + " metrics. Continuing the execution with `algorithm='brute'`." + ) + else: # self._fit_method in ("kd_tree", "ball_tree") + raise ValueError( + f'algorithm="{self._fit_method}" does not support 0 < p < 1 for ' + "the Minkowski metric. To resolve this problem either " + 'set p >= 1 or algorithm="brute".' + ) + + if self._fit_method == "ball_tree": + self._tree = BallTree( + X, + self.leaf_size, + metric=self.effective_metric_, + **self.effective_metric_params_, + ) + elif self._fit_method == "kd_tree": + if ( + self.effective_metric_ == "minkowski" + and self.effective_metric_params_.get("w") is not None + ): + raise ValueError( + "algorithm='kd_tree' is not valid for " + "metric='minkowski' with a weight parameter 'w': " + "try algorithm='ball_tree' " + "or algorithm='brute' instead." + ) + self._tree = KDTree( + X, + self.leaf_size, + metric=self.effective_metric_, + **self.effective_metric_params_, + ) + elif self._fit_method == "brute": + self._tree = None + + return self + + def _more_tags(self): + # For cross-validation routines to split data correctly + return {"pairwise": self.metric == "precomputed"} + + +def _tree_query_parallel_helper(tree, *args, **kwargs): + """Helper for the Parallel calls in KNeighborsMixin.kneighbors. + + The Cython method tree.query is not directly picklable by cloudpickle + under PyPy. + """ + return tree.query(*args, **kwargs) + + +class KNeighborsMixin: + """Mixin for k-neighbors searches.""" + + def _kneighbors_reduce_func(self, dist, start, n_neighbors, return_distance): + """Reduce a chunk of distances to the nearest neighbors. + + Callback to :func:`sklearn.metrics.pairwise.pairwise_distances_chunked` + + Parameters + ---------- + dist : ndarray of shape (n_samples_chunk, n_samples) + The distance matrix. + + start : int + The index in X which the first row of dist corresponds to. + + n_neighbors : int + Number of neighbors required for each sample. + + return_distance : bool + Whether or not to return the distances. + + Returns + ------- + dist : array of shape (n_samples_chunk, n_neighbors) + Returned only if `return_distance=True`. + + neigh : array of shape (n_samples_chunk, n_neighbors) + The neighbors indices. + """ + sample_range = np.arange(dist.shape[0])[:, None] + neigh_ind = np.argpartition(dist, n_neighbors - 1, axis=1) + neigh_ind = neigh_ind[:, :n_neighbors] + # argpartition doesn't guarantee sorted order, so we sort again + neigh_ind = neigh_ind[sample_range, np.argsort(dist[sample_range, neigh_ind])] + if return_distance: + if self.effective_metric_ == "euclidean": + result = np.sqrt(dist[sample_range, neigh_ind]), neigh_ind + else: + result = dist[sample_range, neigh_ind], neigh_ind + else: + result = neigh_ind + return result + + def kneighbors(self, X=None, n_neighbors=None, return_distance=True): + """Find the K-neighbors of a point. + + Returns indices of and distances to the neighbors of each point. + + Parameters + ---------- + X : {array-like, sparse matrix}, shape (n_queries, n_features), \ + or (n_queries, n_indexed) if metric == 'precomputed', default=None + The query point or points. + If not provided, neighbors of each indexed point are returned. + In this case, the query point is not considered its own neighbor. + + n_neighbors : int, default=None + Number of neighbors required for each sample. The default is the + value passed to the constructor. + + return_distance : bool, default=True + Whether or not to return the distances. + + Returns + ------- + neigh_dist : ndarray of shape (n_queries, n_neighbors) + Array representing the lengths to points, only present if + return_distance=True. + + neigh_ind : ndarray of shape (n_queries, n_neighbors) + Indices of the nearest points in the population matrix. + + Examples + -------- + In the following example, we construct a NearestNeighbors + class from an array representing our data set and ask who's + the closest point to [1,1,1] + + >>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]] + >>> from sklearn.neighbors import NearestNeighbors + >>> neigh = NearestNeighbors(n_neighbors=1) + >>> neigh.fit(samples) + NearestNeighbors(n_neighbors=1) + >>> print(neigh.kneighbors([[1., 1., 1.]])) + (array([[0.5]]), array([[2]])) + + As you can see, it returns [[0.5]], and [[2]], which means that the + element is at distance 0.5 and is the third element of samples + (indexes start at 0). You can also query for multiple points: + + >>> X = [[0., 1., 0.], [1., 0., 1.]] + >>> neigh.kneighbors(X, return_distance=False) + array([[1], + [2]]...) + """ + check_is_fitted(self) + + if n_neighbors is None: + n_neighbors = self.n_neighbors + elif n_neighbors <= 0: + raise ValueError("Expected n_neighbors > 0. Got %d" % n_neighbors) + elif not isinstance(n_neighbors, numbers.Integral): + raise TypeError( + "n_neighbors does not take %s value, enter integer value" + % type(n_neighbors) + ) + + query_is_train = X is None + if query_is_train: + X = self._fit_X + # Include an extra neighbor to account for the sample itself being + # returned, which is removed later + n_neighbors += 1 + else: + if self.metric == "precomputed": + X = _check_precomputed(X) + else: + X = self._validate_data(X, accept_sparse="csr", reset=False, order="C") + + n_samples_fit = self.n_samples_fit_ + if n_neighbors > n_samples_fit: + if query_is_train: + n_neighbors -= 1 # ok to modify inplace because an error is raised + inequality_str = "n_neighbors < n_samples_fit" + else: + inequality_str = "n_neighbors <= n_samples_fit" + raise ValueError( + f"Expected {inequality_str}, but " + f"n_neighbors = {n_neighbors}, n_samples_fit = {n_samples_fit}, " + f"n_samples = {X.shape[0]}" # include n_samples for common tests + ) + + n_jobs = effective_n_jobs(self.n_jobs) + chunked_results = None + use_pairwise_distances_reductions = ( + self._fit_method == "brute" + and ArgKmin.is_usable_for( + X if X is not None else self._fit_X, self._fit_X, self.effective_metric_ + ) + ) + if use_pairwise_distances_reductions: + results = ArgKmin.compute( + X=X, + Y=self._fit_X, + k=n_neighbors, + metric=self.effective_metric_, + metric_kwargs=self.effective_metric_params_, + strategy="auto", + return_distance=return_distance, + ) + + elif ( + self._fit_method == "brute" and self.metric == "precomputed" and issparse(X) + ): + results = _kneighbors_from_graph( + X, n_neighbors=n_neighbors, return_distance=return_distance + ) + + elif self._fit_method == "brute": + # Joblib-based backend, which is used when user-defined callable + # are passed for metric. + + # This won't be used in the future once PairwiseDistancesReductions + # support: + # - DistanceMetrics which work on supposedly binary data + # - CSR-dense and dense-CSR case if 'euclidean' in metric. + reduce_func = partial( + self._kneighbors_reduce_func, + n_neighbors=n_neighbors, + return_distance=return_distance, + ) + + # for efficiency, use squared euclidean distances + if self.effective_metric_ == "euclidean": + kwds = {"squared": True} + else: + kwds = self.effective_metric_params_ + + chunked_results = list( + pairwise_distances_chunked( + X, + self._fit_X, + reduce_func=reduce_func, + metric=self.effective_metric_, + n_jobs=n_jobs, + **kwds, + ) + ) + + elif self._fit_method in ["ball_tree", "kd_tree"]: + if issparse(X): + raise ValueError( + "%s does not work with sparse matrices. Densify the data, " + "or set algorithm='brute'" + % self._fit_method + ) + chunked_results = Parallel(n_jobs, prefer="threads")( + delayed(_tree_query_parallel_helper)( + self._tree, X[s], n_neighbors, return_distance + ) + for s in gen_even_slices(X.shape[0], n_jobs) + ) + else: + raise ValueError("internal: _fit_method not recognized") + + if chunked_results is not None: + if return_distance: + neigh_dist, neigh_ind = zip(*chunked_results) + results = np.vstack(neigh_dist), np.vstack(neigh_ind) + else: + results = np.vstack(chunked_results) + + if not query_is_train: + return results + else: + # If the query data is the same as the indexed data, we would like + # to ignore the first nearest neighbor of every sample, i.e + # the sample itself. + if return_distance: + neigh_dist, neigh_ind = results + else: + neigh_ind = results + + n_queries, _ = X.shape + sample_range = np.arange(n_queries)[:, None] + sample_mask = neigh_ind != sample_range + + # Corner case: When the number of duplicates are more + # than the number of neighbors, the first NN will not + # be the sample, but a duplicate. + # In that case mask the first duplicate. + dup_gr_nbrs = np.all(sample_mask, axis=1) + sample_mask[:, 0][dup_gr_nbrs] = False + neigh_ind = np.reshape(neigh_ind[sample_mask], (n_queries, n_neighbors - 1)) + + if return_distance: + neigh_dist = np.reshape( + neigh_dist[sample_mask], (n_queries, n_neighbors - 1) + ) + return neigh_dist, neigh_ind + return neigh_ind + + def kneighbors_graph(self, X=None, n_neighbors=None, mode="connectivity"): + """Compute the (weighted) graph of k-Neighbors for points in X. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_queries, n_features), \ + or (n_queries, n_indexed) if metric == 'precomputed', default=None + The query point or points. + If not provided, neighbors of each indexed point are returned. + In this case, the query point is not considered its own neighbor. + For ``metric='precomputed'`` the shape should be + (n_queries, n_indexed). Otherwise the shape should be + (n_queries, n_features). + + n_neighbors : int, default=None + Number of neighbors for each sample. The default is the value + passed to the constructor. + + mode : {'connectivity', 'distance'}, default='connectivity' + Type of returned matrix: 'connectivity' will return the + connectivity matrix with ones and zeros, in 'distance' the + edges are distances between points, type of distance + depends on the selected metric parameter in + NearestNeighbors class. + + Returns + ------- + A : sparse-matrix of shape (n_queries, n_samples_fit) + `n_samples_fit` is the number of samples in the fitted data. + `A[i, j]` gives the weight of the edge connecting `i` to `j`. + The matrix is of CSR format. + + See Also + -------- + NearestNeighbors.radius_neighbors_graph : Compute the (weighted) graph + of Neighbors for points in X. + + Examples + -------- + >>> X = [[0], [3], [1]] + >>> from sklearn.neighbors import NearestNeighbors + >>> neigh = NearestNeighbors(n_neighbors=2) + >>> neigh.fit(X) + NearestNeighbors(n_neighbors=2) + >>> A = neigh.kneighbors_graph(X) + >>> A.toarray() + array([[1., 0., 1.], + [0., 1., 1.], + [1., 0., 1.]]) + """ + check_is_fitted(self) + if n_neighbors is None: + n_neighbors = self.n_neighbors + + # check the input only in self.kneighbors + + # construct CSR matrix representation of the k-NN graph + if mode == "connectivity": + A_ind = self.kneighbors(X, n_neighbors, return_distance=False) + n_queries = A_ind.shape[0] + A_data = np.ones(n_queries * n_neighbors) + + elif mode == "distance": + A_data, A_ind = self.kneighbors(X, n_neighbors, return_distance=True) + A_data = np.ravel(A_data) + + else: + raise ValueError( + 'Unsupported mode, must be one of "connectivity", ' + f'or "distance" but got "{mode}" instead' + ) + + n_queries = A_ind.shape[0] + n_samples_fit = self.n_samples_fit_ + n_nonzero = n_queries * n_neighbors + A_indptr = np.arange(0, n_nonzero + 1, n_neighbors) + + kneighbors_graph = csr_matrix( + (A_data, A_ind.ravel(), A_indptr), shape=(n_queries, n_samples_fit) + ) + + return kneighbors_graph + + +def _tree_query_radius_parallel_helper(tree, *args, **kwargs): + """Helper for the Parallel calls in RadiusNeighborsMixin.radius_neighbors. + + The Cython method tree.query_radius is not directly picklable by + cloudpickle under PyPy. + """ + return tree.query_radius(*args, **kwargs) + + +class RadiusNeighborsMixin: + """Mixin for radius-based neighbors searches.""" + + def _radius_neighbors_reduce_func(self, dist, start, radius, return_distance): + """Reduce a chunk of distances to the nearest neighbors. + + Callback to :func:`sklearn.metrics.pairwise.pairwise_distances_chunked` + + Parameters + ---------- + dist : ndarray of shape (n_samples_chunk, n_samples) + The distance matrix. + + start : int + The index in X which the first row of dist corresponds to. + + radius : float + The radius considered when making the nearest neighbors search. + + return_distance : bool + Whether or not to return the distances. + + Returns + ------- + dist : list of ndarray of shape (n_samples_chunk,) + Returned only if `return_distance=True`. + + neigh : list of ndarray of shape (n_samples_chunk,) + The neighbors indices. + """ + neigh_ind = [np.where(d <= radius)[0] for d in dist] + + if return_distance: + if self.effective_metric_ == "euclidean": + dist = [np.sqrt(d[neigh_ind[i]]) for i, d in enumerate(dist)] + else: + dist = [d[neigh_ind[i]] for i, d in enumerate(dist)] + results = dist, neigh_ind + else: + results = neigh_ind + return results + + def radius_neighbors( + self, X=None, radius=None, return_distance=True, sort_results=False + ): + """Find the neighbors within a given radius of a point or points. + + Return the indices and distances of each point from the dataset + lying in a ball with size ``radius`` around the points of the query + array. Points lying on the boundary are included in the results. + + The result points are *not* necessarily sorted by distance to their + query point. + + Parameters + ---------- + X : {array-like, sparse matrix} of (n_samples, n_features), default=None + The query point or points. + If not provided, neighbors of each indexed point are returned. + In this case, the query point is not considered its own neighbor. + + radius : float, default=None + Limiting distance of neighbors to return. The default is the value + passed to the constructor. + + return_distance : bool, default=True + Whether or not to return the distances. + + sort_results : bool, default=False + If True, the distances and indices will be sorted by increasing + distances before being returned. If False, the results may not + be sorted. If `return_distance=False`, setting `sort_results=True` + will result in an error. + + .. versionadded:: 0.22 + + Returns + ------- + neigh_dist : ndarray of shape (n_samples,) of arrays + Array representing the distances to each point, only present if + `return_distance=True`. The distance values are computed according + to the ``metric`` constructor parameter. + + neigh_ind : ndarray of shape (n_samples,) of arrays + An array of arrays of indices of the approximate nearest points + from the population matrix that lie within a ball of size + ``radius`` around the query points. + + Notes + ----- + Because the number of neighbors of each point is not necessarily + equal, the results for multiple query points cannot be fit in a + standard data array. + For efficiency, `radius_neighbors` returns arrays of objects, where + each object is a 1D array of indices or distances. + + Examples + -------- + In the following example, we construct a NeighborsClassifier + class from an array representing our data set and ask who's + the closest point to [1, 1, 1]: + + >>> import numpy as np + >>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]] + >>> from sklearn.neighbors import NearestNeighbors + >>> neigh = NearestNeighbors(radius=1.6) + >>> neigh.fit(samples) + NearestNeighbors(radius=1.6) + >>> rng = neigh.radius_neighbors([[1., 1., 1.]]) + >>> print(np.asarray(rng[0][0])) + [1.5 0.5] + >>> print(np.asarray(rng[1][0])) + [1 2] + + The first array returned contains the distances to all points which + are closer than 1.6, while the second array returned contains their + indices. In general, multiple points can be queried at the same time. + """ + check_is_fitted(self) + + if sort_results and not return_distance: + raise ValueError("return_distance must be True if sort_results is True.") + + query_is_train = X is None + if query_is_train: + X = self._fit_X + else: + if self.metric == "precomputed": + X = _check_precomputed(X) + else: + X = self._validate_data(X, accept_sparse="csr", reset=False, order="C") + + if radius is None: + radius = self.radius + + use_pairwise_distances_reductions = ( + self._fit_method == "brute" + and RadiusNeighbors.is_usable_for( + X if X is not None else self._fit_X, self._fit_X, self.effective_metric_ + ) + ) + + if use_pairwise_distances_reductions: + results = RadiusNeighbors.compute( + X=X, + Y=self._fit_X, + radius=radius, + metric=self.effective_metric_, + metric_kwargs=self.effective_metric_params_, + strategy="auto", + return_distance=return_distance, + sort_results=sort_results, + ) + + elif ( + self._fit_method == "brute" and self.metric == "precomputed" and issparse(X) + ): + results = _radius_neighbors_from_graph( + X, radius=radius, return_distance=return_distance + ) + + elif self._fit_method == "brute": + # Joblib-based backend, which is used when user-defined callable + # are passed for metric. + + # This won't be used in the future once PairwiseDistancesReductions + # support: + # - DistanceMetrics which work on supposedly binary data + # - CSR-dense and dense-CSR case if 'euclidean' in metric. + + # for efficiency, use squared euclidean distances + if self.effective_metric_ == "euclidean": + radius *= radius + kwds = {"squared": True} + else: + kwds = self.effective_metric_params_ + + reduce_func = partial( + self._radius_neighbors_reduce_func, + radius=radius, + return_distance=return_distance, + ) + + chunked_results = pairwise_distances_chunked( + X, + self._fit_X, + reduce_func=reduce_func, + metric=self.effective_metric_, + n_jobs=self.n_jobs, + **kwds, + ) + if return_distance: + neigh_dist_chunks, neigh_ind_chunks = zip(*chunked_results) + neigh_dist_list = sum(neigh_dist_chunks, []) + neigh_ind_list = sum(neigh_ind_chunks, []) + neigh_dist = _to_object_array(neigh_dist_list) + neigh_ind = _to_object_array(neigh_ind_list) + results = neigh_dist, neigh_ind + else: + neigh_ind_list = sum(chunked_results, []) + results = _to_object_array(neigh_ind_list) + + if sort_results: + for ii in range(len(neigh_dist)): + order = np.argsort(neigh_dist[ii], kind="mergesort") + neigh_ind[ii] = neigh_ind[ii][order] + neigh_dist[ii] = neigh_dist[ii][order] + results = neigh_dist, neigh_ind + + elif self._fit_method in ["ball_tree", "kd_tree"]: + if issparse(X): + raise ValueError( + "%s does not work with sparse matrices. Densify the data, " + "or set algorithm='brute'" + % self._fit_method + ) + + n_jobs = effective_n_jobs(self.n_jobs) + delayed_query = delayed(_tree_query_radius_parallel_helper) + chunked_results = Parallel(n_jobs, prefer="threads")( + delayed_query( + self._tree, X[s], radius, return_distance, sort_results=sort_results + ) + for s in gen_even_slices(X.shape[0], n_jobs) + ) + if return_distance: + neigh_ind, neigh_dist = tuple(zip(*chunked_results)) + results = np.hstack(neigh_dist), np.hstack(neigh_ind) + else: + results = np.hstack(chunked_results) + else: + raise ValueError("internal: _fit_method not recognized") + + if not query_is_train: + return results + else: + # If the query data is the same as the indexed data, we would like + # to ignore the first nearest neighbor of every sample, i.e + # the sample itself. + if return_distance: + neigh_dist, neigh_ind = results + else: + neigh_ind = results + + for ind, ind_neighbor in enumerate(neigh_ind): + mask = ind_neighbor != ind + + neigh_ind[ind] = ind_neighbor[mask] + if return_distance: + neigh_dist[ind] = neigh_dist[ind][mask] + + if return_distance: + return neigh_dist, neigh_ind + return neigh_ind + + def radius_neighbors_graph( + self, X=None, radius=None, mode="connectivity", sort_results=False + ): + """Compute the (weighted) graph of Neighbors for points in X. + + Neighborhoods are restricted the points at a distance lower than + radius. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features), default=None + The query point or points. + If not provided, neighbors of each indexed point are returned. + In this case, the query point is not considered its own neighbor. + + radius : float, default=None + Radius of neighborhoods. The default is the value passed to the + constructor. + + mode : {'connectivity', 'distance'}, default='connectivity' + Type of returned matrix: 'connectivity' will return the + connectivity matrix with ones and zeros, in 'distance' the + edges are distances between points, type of distance + depends on the selected metric parameter in + NearestNeighbors class. + + sort_results : bool, default=False + If True, in each row of the result, the non-zero entries will be + sorted by increasing distances. If False, the non-zero entries may + not be sorted. Only used with mode='distance'. + + .. versionadded:: 0.22 + + Returns + ------- + A : sparse-matrix of shape (n_queries, n_samples_fit) + `n_samples_fit` is the number of samples in the fitted data. + `A[i, j]` gives the weight of the edge connecting `i` to `j`. + The matrix is of CSR format. + + See Also + -------- + kneighbors_graph : Compute the (weighted) graph of k-Neighbors for + points in X. + + Examples + -------- + >>> X = [[0], [3], [1]] + >>> from sklearn.neighbors import NearestNeighbors + >>> neigh = NearestNeighbors(radius=1.5) + >>> neigh.fit(X) + NearestNeighbors(radius=1.5) + >>> A = neigh.radius_neighbors_graph(X) + >>> A.toarray() + array([[1., 0., 1.], + [0., 1., 0.], + [1., 0., 1.]]) + """ + check_is_fitted(self) + + # check the input only in self.radius_neighbors + + if radius is None: + radius = self.radius + + # construct CSR matrix representation of the NN graph + if mode == "connectivity": + A_ind = self.radius_neighbors(X, radius, return_distance=False) + A_data = None + elif mode == "distance": + dist, A_ind = self.radius_neighbors( + X, radius, return_distance=True, sort_results=sort_results + ) + A_data = np.concatenate(list(dist)) + else: + raise ValueError( + 'Unsupported mode, must be one of "connectivity", ' + f'or "distance" but got "{mode}" instead' + ) + + n_queries = A_ind.shape[0] + n_samples_fit = self.n_samples_fit_ + n_neighbors = np.array([len(a) for a in A_ind]) + A_ind = np.concatenate(list(A_ind)) + if A_data is None: + A_data = np.ones(len(A_ind)) + A_indptr = np.concatenate((np.zeros(1, dtype=int), np.cumsum(n_neighbors))) + + return csr_matrix((A_data, A_ind, A_indptr), shape=(n_queries, n_samples_fit)) diff --git a/venv/lib/python3.10/site-packages/sklearn/neighbors/_classification.py b/venv/lib/python3.10/site-packages/sklearn/neighbors/_classification.py new file mode 100644 index 0000000000000000000000000000000000000000..26ffa273d0a60b68f51eb4edff0e83e188cb6d5c --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/neighbors/_classification.py @@ -0,0 +1,839 @@ +"""Nearest Neighbor Classification""" + +# Authors: Jake Vanderplas +# Fabian Pedregosa +# Alexandre Gramfort +# Sparseness support by Lars Buitinck +# Multi-output support by Arnaud Joly +# +# License: BSD 3 clause (C) INRIA, University of Amsterdam +import warnings +from numbers import Integral + +import numpy as np + +from sklearn.neighbors._base import _check_precomputed + +from ..base import ClassifierMixin, _fit_context +from ..metrics._pairwise_distances_reduction import ( + ArgKminClassMode, + RadiusNeighborsClassMode, +) +from ..utils._param_validation import StrOptions +from ..utils.arrayfuncs import _all_with_any_reduction_axis_1 +from ..utils.extmath import weighted_mode +from ..utils.fixes import _mode +from ..utils.validation import _is_arraylike, _num_samples, check_is_fitted +from ._base import KNeighborsMixin, NeighborsBase, RadiusNeighborsMixin, _get_weights + + +def _adjusted_metric(metric, metric_kwargs, p=None): + metric_kwargs = metric_kwargs or {} + if metric == "minkowski": + metric_kwargs["p"] = p + if p == 2: + metric = "euclidean" + return metric, metric_kwargs + + +class KNeighborsClassifier(KNeighborsMixin, ClassifierMixin, NeighborsBase): + """Classifier implementing the k-nearest neighbors vote. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_neighbors : int, default=5 + Number of neighbors to use by default for :meth:`kneighbors` queries. + + weights : {'uniform', 'distance'}, callable or None, default='uniform' + Weight function used in prediction. Possible values: + + - 'uniform' : uniform weights. All points in each neighborhood + are weighted equally. + - 'distance' : weight points by the inverse of their distance. + in this case, closer neighbors of a query point will have a + greater influence than neighbors which are further away. + - [callable] : a user-defined function which accepts an + array of distances, and returns an array of the same shape + containing the weights. + + Refer to the example entitled + :ref:`sphx_glr_auto_examples_neighbors_plot_classification.py` + showing the impact of the `weights` parameter on the decision + boundary. + + algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto' + Algorithm used to compute the nearest neighbors: + + - 'ball_tree' will use :class:`BallTree` + - 'kd_tree' will use :class:`KDTree` + - 'brute' will use a brute-force search. + - 'auto' will attempt to decide the most appropriate algorithm + based on the values passed to :meth:`fit` method. + + Note: fitting on sparse input will override the setting of + this parameter, using brute force. + + leaf_size : int, default=30 + Leaf size passed to BallTree or KDTree. This can affect the + speed of the construction and query, as well as the memory + required to store the tree. The optimal value depends on the + nature of the problem. + + p : float, default=2 + Power parameter for the Minkowski metric. When p = 1, this is equivalent + to using manhattan_distance (l1), and euclidean_distance (l2) for p = 2. + For arbitrary p, minkowski_distance (l_p) is used. This parameter is expected + to be positive. + + metric : str or callable, default='minkowski' + Metric to use for distance computation. Default is "minkowski", which + results in the standard Euclidean distance when p = 2. See the + documentation of `scipy.spatial.distance + `_ and + the metrics listed in + :class:`~sklearn.metrics.pairwise.distance_metrics` for valid metric + values. + + If metric is "precomputed", X is assumed to be a distance matrix and + must be square during fit. X may be a :term:`sparse graph`, in which + case only "nonzero" elements may be considered neighbors. + + If metric is a callable function, it takes two arrays representing 1D + vectors as inputs and must return one value indicating the distance + between those vectors. This works for Scipy's metrics, but is less + efficient than passing the metric name as a string. + + metric_params : dict, default=None + Additional keyword arguments for the metric function. + + n_jobs : int, default=None + The number of parallel jobs to run for neighbors search. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + Doesn't affect :meth:`fit` method. + + Attributes + ---------- + classes_ : array of shape (n_classes,) + Class labels known to the classifier + + effective_metric_ : str or callble + The distance metric used. It will be same as the `metric` parameter + or a synonym of it, e.g. 'euclidean' if the `metric` parameter set to + 'minkowski' and `p` parameter set to 2. + + effective_metric_params_ : dict + Additional keyword arguments for the metric function. For most metrics + will be same with `metric_params` parameter, but may also contain the + `p` parameter value if the `effective_metric_` attribute is set to + 'minkowski'. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + n_samples_fit_ : int + Number of samples in the fitted data. + + outputs_2d_ : bool + False when `y`'s shape is (n_samples, ) or (n_samples, 1) during fit + otherwise True. + + See Also + -------- + RadiusNeighborsClassifier: Classifier based on neighbors within a fixed radius. + KNeighborsRegressor: Regression based on k-nearest neighbors. + RadiusNeighborsRegressor: Regression based on neighbors within a fixed radius. + NearestNeighbors: Unsupervised learner for implementing neighbor searches. + + Notes + ----- + See :ref:`Nearest Neighbors ` in the online documentation + for a discussion of the choice of ``algorithm`` and ``leaf_size``. + + .. warning:: + + Regarding the Nearest Neighbors algorithms, if it is found that two + neighbors, neighbor `k+1` and `k`, have identical distances + but different labels, the results will depend on the ordering of the + training data. + + https://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm + + Examples + -------- + >>> X = [[0], [1], [2], [3]] + >>> y = [0, 0, 1, 1] + >>> from sklearn.neighbors import KNeighborsClassifier + >>> neigh = KNeighborsClassifier(n_neighbors=3) + >>> neigh.fit(X, y) + KNeighborsClassifier(...) + >>> print(neigh.predict([[1.1]])) + [0] + >>> print(neigh.predict_proba([[0.9]])) + [[0.666... 0.333...]] + """ + + _parameter_constraints: dict = {**NeighborsBase._parameter_constraints} + _parameter_constraints.pop("radius") + _parameter_constraints.update( + {"weights": [StrOptions({"uniform", "distance"}), callable, None]} + ) + + def __init__( + self, + n_neighbors=5, + *, + weights="uniform", + algorithm="auto", + leaf_size=30, + p=2, + metric="minkowski", + metric_params=None, + n_jobs=None, + ): + super().__init__( + n_neighbors=n_neighbors, + algorithm=algorithm, + leaf_size=leaf_size, + metric=metric, + p=p, + metric_params=metric_params, + n_jobs=n_jobs, + ) + self.weights = weights + + @_fit_context( + # KNeighborsClassifier.metric is not validated yet + prefer_skip_nested_validation=False + ) + def fit(self, X, y): + """Fit the k-nearest neighbors classifier from the training dataset. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) or \ + (n_samples, n_samples) if metric='precomputed' + Training data. + + y : {array-like, sparse matrix} of shape (n_samples,) or \ + (n_samples, n_outputs) + Target values. + + Returns + ------- + self : KNeighborsClassifier + The fitted k-nearest neighbors classifier. + """ + return self._fit(X, y) + + def predict(self, X): + """Predict the class labels for the provided data. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_queries, n_features), \ + or (n_queries, n_indexed) if metric == 'precomputed' + Test samples. + + Returns + ------- + y : ndarray of shape (n_queries,) or (n_queries, n_outputs) + Class labels for each data sample. + """ + check_is_fitted(self, "_fit_method") + if self.weights == "uniform": + if self._fit_method == "brute" and ArgKminClassMode.is_usable_for( + X, self._fit_X, self.metric + ): + probabilities = self.predict_proba(X) + if self.outputs_2d_: + return np.stack( + [ + self.classes_[idx][np.argmax(probas, axis=1)] + for idx, probas in enumerate(probabilities) + ], + axis=1, + ) + return self.classes_[np.argmax(probabilities, axis=1)] + # In that case, we do not need the distances to perform + # the weighting so we do not compute them. + neigh_ind = self.kneighbors(X, return_distance=False) + neigh_dist = None + else: + neigh_dist, neigh_ind = self.kneighbors(X) + + classes_ = self.classes_ + _y = self._y + if not self.outputs_2d_: + _y = self._y.reshape((-1, 1)) + classes_ = [self.classes_] + + n_outputs = len(classes_) + n_queries = _num_samples(X) + weights = _get_weights(neigh_dist, self.weights) + if weights is not None and _all_with_any_reduction_axis_1(weights, value=0): + raise ValueError( + "All neighbors of some sample is getting zero weights. " + "Please modify 'weights' to avoid this case if you are " + "using a user-defined function." + ) + + y_pred = np.empty((n_queries, n_outputs), dtype=classes_[0].dtype) + for k, classes_k in enumerate(classes_): + if weights is None: + mode, _ = _mode(_y[neigh_ind, k], axis=1) + else: + mode, _ = weighted_mode(_y[neigh_ind, k], weights, axis=1) + + mode = np.asarray(mode.ravel(), dtype=np.intp) + y_pred[:, k] = classes_k.take(mode) + + if not self.outputs_2d_: + y_pred = y_pred.ravel() + + return y_pred + + def predict_proba(self, X): + """Return probability estimates for the test data X. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_queries, n_features), \ + or (n_queries, n_indexed) if metric == 'precomputed' + Test samples. + + Returns + ------- + p : ndarray of shape (n_queries, n_classes), or a list of n_outputs \ + of such arrays if n_outputs > 1. + The class probabilities of the input samples. Classes are ordered + by lexicographic order. + """ + check_is_fitted(self, "_fit_method") + if self.weights == "uniform": + # TODO: systematize this mapping of metric for + # PairwiseDistancesReductions. + metric, metric_kwargs = _adjusted_metric( + metric=self.metric, metric_kwargs=self.metric_params, p=self.p + ) + if ( + self._fit_method == "brute" + and ArgKminClassMode.is_usable_for(X, self._fit_X, metric) + # TODO: Implement efficient multi-output solution + and not self.outputs_2d_ + ): + if self.metric == "precomputed": + X = _check_precomputed(X) + else: + X = self._validate_data( + X, accept_sparse="csr", reset=False, order="C" + ) + + probabilities = ArgKminClassMode.compute( + X, + self._fit_X, + k=self.n_neighbors, + weights=self.weights, + Y_labels=self._y, + unique_Y_labels=self.classes_, + metric=metric, + metric_kwargs=metric_kwargs, + # `strategy="parallel_on_X"` has in practice be shown + # to be more efficient than `strategy="parallel_on_Y`` + # on many combination of datasets. + # Hence, we choose to enforce it here. + # For more information, see: + # https://github.com/scikit-learn/scikit-learn/pull/24076#issuecomment-1445258342 # noqa + # TODO: adapt the heuristic for `strategy="auto"` for + # `ArgKminClassMode` and use `strategy="auto"`. + strategy="parallel_on_X", + ) + return probabilities + + # In that case, we do not need the distances to perform + # the weighting so we do not compute them. + neigh_ind = self.kneighbors(X, return_distance=False) + neigh_dist = None + else: + neigh_dist, neigh_ind = self.kneighbors(X) + + classes_ = self.classes_ + _y = self._y + if not self.outputs_2d_: + _y = self._y.reshape((-1, 1)) + classes_ = [self.classes_] + + n_queries = _num_samples(X) + + weights = _get_weights(neigh_dist, self.weights) + if weights is None: + weights = np.ones_like(neigh_ind) + elif _all_with_any_reduction_axis_1(weights, value=0): + raise ValueError( + "All neighbors of some sample is getting zero weights. " + "Please modify 'weights' to avoid this case if you are " + "using a user-defined function." + ) + + all_rows = np.arange(n_queries) + probabilities = [] + for k, classes_k in enumerate(classes_): + pred_labels = _y[:, k][neigh_ind] + proba_k = np.zeros((n_queries, classes_k.size)) + + # a simple ':' index doesn't work right + for i, idx in enumerate(pred_labels.T): # loop is O(n_neighbors) + proba_k[all_rows, idx] += weights[:, i] + + # normalize 'votes' into real [0,1] probabilities + normalizer = proba_k.sum(axis=1)[:, np.newaxis] + proba_k /= normalizer + + probabilities.append(proba_k) + + if not self.outputs_2d_: + probabilities = probabilities[0] + + return probabilities + + def _more_tags(self): + return {"multilabel": True} + + +class RadiusNeighborsClassifier(RadiusNeighborsMixin, ClassifierMixin, NeighborsBase): + """Classifier implementing a vote among neighbors within a given radius. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + radius : float, default=1.0 + Range of parameter space to use by default for :meth:`radius_neighbors` + queries. + + weights : {'uniform', 'distance'}, callable or None, default='uniform' + Weight function used in prediction. Possible values: + + - 'uniform' : uniform weights. All points in each neighborhood + are weighted equally. + - 'distance' : weight points by the inverse of their distance. + in this case, closer neighbors of a query point will have a + greater influence than neighbors which are further away. + - [callable] : a user-defined function which accepts an + array of distances, and returns an array of the same shape + containing the weights. + + Uniform weights are used by default. + + algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto' + Algorithm used to compute the nearest neighbors: + + - 'ball_tree' will use :class:`BallTree` + - 'kd_tree' will use :class:`KDTree` + - 'brute' will use a brute-force search. + - 'auto' will attempt to decide the most appropriate algorithm + based on the values passed to :meth:`fit` method. + + Note: fitting on sparse input will override the setting of + this parameter, using brute force. + + leaf_size : int, default=30 + Leaf size passed to BallTree or KDTree. This can affect the + speed of the construction and query, as well as the memory + required to store the tree. The optimal value depends on the + nature of the problem. + + p : float, default=2 + Power parameter for the Minkowski metric. When p = 1, this is + equivalent to using manhattan_distance (l1), and euclidean_distance + (l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used. + This parameter is expected to be positive. + + metric : str or callable, default='minkowski' + Metric to use for distance computation. Default is "minkowski", which + results in the standard Euclidean distance when p = 2. See the + documentation of `scipy.spatial.distance + `_ and + the metrics listed in + :class:`~sklearn.metrics.pairwise.distance_metrics` for valid metric + values. + + If metric is "precomputed", X is assumed to be a distance matrix and + must be square during fit. X may be a :term:`sparse graph`, in which + case only "nonzero" elements may be considered neighbors. + + If metric is a callable function, it takes two arrays representing 1D + vectors as inputs and must return one value indicating the distance + between those vectors. This works for Scipy's metrics, but is less + efficient than passing the metric name as a string. + + outlier_label : {manual label, 'most_frequent'}, default=None + Label for outlier samples (samples with no neighbors in given radius). + + - manual label: str or int label (should be the same type as y) + or list of manual labels if multi-output is used. + - 'most_frequent' : assign the most frequent label of y to outliers. + - None : when any outlier is detected, ValueError will be raised. + + The outlier label should be selected from among the unique 'Y' labels. + If it is specified with a different value a warning will be raised and + all class probabilities of outliers will be assigned to be 0. + + metric_params : dict, default=None + Additional keyword arguments for the metric function. + + n_jobs : int, default=None + The number of parallel jobs to run for neighbors search. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + Attributes + ---------- + classes_ : ndarray of shape (n_classes,) + Class labels known to the classifier. + + effective_metric_ : str or callable + The distance metric used. It will be same as the `metric` parameter + or a synonym of it, e.g. 'euclidean' if the `metric` parameter set to + 'minkowski' and `p` parameter set to 2. + + effective_metric_params_ : dict + Additional keyword arguments for the metric function. For most metrics + will be same with `metric_params` parameter, but may also contain the + `p` parameter value if the `effective_metric_` attribute is set to + 'minkowski'. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + n_samples_fit_ : int + Number of samples in the fitted data. + + outlier_label_ : int or array-like of shape (n_class,) + Label which is given for outlier samples (samples with no neighbors + on given radius). + + outputs_2d_ : bool + False when `y`'s shape is (n_samples, ) or (n_samples, 1) during fit + otherwise True. + + See Also + -------- + KNeighborsClassifier : Classifier implementing the k-nearest neighbors + vote. + RadiusNeighborsRegressor : Regression based on neighbors within a + fixed radius. + KNeighborsRegressor : Regression based on k-nearest neighbors. + NearestNeighbors : Unsupervised learner for implementing neighbor + searches. + + Notes + ----- + See :ref:`Nearest Neighbors ` in the online documentation + for a discussion of the choice of ``algorithm`` and ``leaf_size``. + + https://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm + + Examples + -------- + >>> X = [[0], [1], [2], [3]] + >>> y = [0, 0, 1, 1] + >>> from sklearn.neighbors import RadiusNeighborsClassifier + >>> neigh = RadiusNeighborsClassifier(radius=1.0) + >>> neigh.fit(X, y) + RadiusNeighborsClassifier(...) + >>> print(neigh.predict([[1.5]])) + [0] + >>> print(neigh.predict_proba([[1.0]])) + [[0.66666667 0.33333333]] + """ + + _parameter_constraints: dict = { + **NeighborsBase._parameter_constraints, + "weights": [StrOptions({"uniform", "distance"}), callable, None], + "outlier_label": [Integral, str, "array-like", None], + } + _parameter_constraints.pop("n_neighbors") + + def __init__( + self, + radius=1.0, + *, + weights="uniform", + algorithm="auto", + leaf_size=30, + p=2, + metric="minkowski", + outlier_label=None, + metric_params=None, + n_jobs=None, + ): + super().__init__( + radius=radius, + algorithm=algorithm, + leaf_size=leaf_size, + metric=metric, + p=p, + metric_params=metric_params, + n_jobs=n_jobs, + ) + self.weights = weights + self.outlier_label = outlier_label + + @_fit_context( + # RadiusNeighborsClassifier.metric is not validated yet + prefer_skip_nested_validation=False + ) + def fit(self, X, y): + """Fit the radius neighbors classifier from the training dataset. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) or \ + (n_samples, n_samples) if metric='precomputed' + Training data. + + y : {array-like, sparse matrix} of shape (n_samples,) or \ + (n_samples, n_outputs) + Target values. + + Returns + ------- + self : RadiusNeighborsClassifier + The fitted radius neighbors classifier. + """ + self._fit(X, y) + + classes_ = self.classes_ + _y = self._y + if not self.outputs_2d_: + _y = self._y.reshape((-1, 1)) + classes_ = [self.classes_] + + if self.outlier_label is None: + outlier_label_ = None + + elif self.outlier_label == "most_frequent": + outlier_label_ = [] + # iterate over multi-output, get the most frequent label for each + # output. + for k, classes_k in enumerate(classes_): + label_count = np.bincount(_y[:, k]) + outlier_label_.append(classes_k[label_count.argmax()]) + + else: + if _is_arraylike(self.outlier_label) and not isinstance( + self.outlier_label, str + ): + if len(self.outlier_label) != len(classes_): + raise ValueError( + "The length of outlier_label: {} is " + "inconsistent with the output " + "length: {}".format(self.outlier_label, len(classes_)) + ) + outlier_label_ = self.outlier_label + else: + outlier_label_ = [self.outlier_label] * len(classes_) + + for classes, label in zip(classes_, outlier_label_): + if _is_arraylike(label) and not isinstance(label, str): + # ensure the outlier label for each output is a scalar. + raise TypeError( + "The outlier_label of classes {} is " + "supposed to be a scalar, got " + "{}.".format(classes, label) + ) + if np.append(classes, label).dtype != classes.dtype: + # ensure the dtype of outlier label is consistent with y. + raise TypeError( + "The dtype of outlier_label {} is " + "inconsistent with classes {} in " + "y.".format(label, classes) + ) + + self.outlier_label_ = outlier_label_ + + return self + + def predict(self, X): + """Predict the class labels for the provided data. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_queries, n_features), \ + or (n_queries, n_indexed) if metric == 'precomputed' + Test samples. + + Returns + ------- + y : ndarray of shape (n_queries,) or (n_queries, n_outputs) + Class labels for each data sample. + """ + + probs = self.predict_proba(X) + classes_ = self.classes_ + + if not self.outputs_2d_: + probs = [probs] + classes_ = [self.classes_] + + n_outputs = len(classes_) + n_queries = probs[0].shape[0] + y_pred = np.empty((n_queries, n_outputs), dtype=classes_[0].dtype) + + for k, prob in enumerate(probs): + # iterate over multi-output, assign labels based on probabilities + # of each output. + max_prob_index = prob.argmax(axis=1) + y_pred[:, k] = classes_[k].take(max_prob_index) + + outlier_zero_probs = (prob == 0).all(axis=1) + if outlier_zero_probs.any(): + zero_prob_index = np.flatnonzero(outlier_zero_probs) + y_pred[zero_prob_index, k] = self.outlier_label_[k] + + if not self.outputs_2d_: + y_pred = y_pred.ravel() + + return y_pred + + def predict_proba(self, X): + """Return probability estimates for the test data X. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_queries, n_features), \ + or (n_queries, n_indexed) if metric == 'precomputed' + Test samples. + + Returns + ------- + p : ndarray of shape (n_queries, n_classes), or a list of \ + n_outputs of such arrays if n_outputs > 1. + The class probabilities of the input samples. Classes are ordered + by lexicographic order. + """ + check_is_fitted(self, "_fit_method") + n_queries = _num_samples(X) + + metric, metric_kwargs = _adjusted_metric( + metric=self.metric, metric_kwargs=self.metric_params, p=self.p + ) + + if ( + self.weights == "uniform" + and self._fit_method == "brute" + and not self.outputs_2d_ + and RadiusNeighborsClassMode.is_usable_for(X, self._fit_X, metric) + ): + probabilities = RadiusNeighborsClassMode.compute( + X=X, + Y=self._fit_X, + radius=self.radius, + weights=self.weights, + Y_labels=self._y, + unique_Y_labels=self.classes_, + outlier_label=self.outlier_label, + metric=metric, + metric_kwargs=metric_kwargs, + strategy="parallel_on_X", + # `strategy="parallel_on_X"` has in practice be shown + # to be more efficient than `strategy="parallel_on_Y`` + # on many combination of datasets. + # Hence, we choose to enforce it here. + # For more information, see: + # https://github.com/scikit-learn/scikit-learn/pull/26828/files#r1282398471 # noqa + ) + return probabilities + + neigh_dist, neigh_ind = self.radius_neighbors(X) + outlier_mask = np.zeros(n_queries, dtype=bool) + outlier_mask[:] = [len(nind) == 0 for nind in neigh_ind] + outliers = np.flatnonzero(outlier_mask) + inliers = np.flatnonzero(~outlier_mask) + + classes_ = self.classes_ + _y = self._y + if not self.outputs_2d_: + _y = self._y.reshape((-1, 1)) + classes_ = [self.classes_] + + if self.outlier_label_ is None and outliers.size > 0: + raise ValueError( + "No neighbors found for test samples %r, " + "you can try using larger radius, " + "giving a label for outliers, " + "or considering removing them from your dataset." % outliers + ) + + weights = _get_weights(neigh_dist, self.weights) + if weights is not None: + weights = weights[inliers] + + probabilities = [] + # iterate over multi-output, measure probabilities of the k-th output. + for k, classes_k in enumerate(classes_): + pred_labels = np.zeros(len(neigh_ind), dtype=object) + pred_labels[:] = [_y[ind, k] for ind in neigh_ind] + + proba_k = np.zeros((n_queries, classes_k.size)) + proba_inl = np.zeros((len(inliers), classes_k.size)) + + # samples have different size of neighbors within the same radius + if weights is None: + for i, idx in enumerate(pred_labels[inliers]): + proba_inl[i, :] = np.bincount(idx, minlength=classes_k.size) + else: + for i, idx in enumerate(pred_labels[inliers]): + proba_inl[i, :] = np.bincount( + idx, weights[i], minlength=classes_k.size + ) + proba_k[inliers, :] = proba_inl + + if outliers.size > 0: + _outlier_label = self.outlier_label_[k] + label_index = np.flatnonzero(classes_k == _outlier_label) + if label_index.size == 1: + proba_k[outliers, label_index[0]] = 1.0 + else: + warnings.warn( + "Outlier label {} is not in training " + "classes. All class probabilities of " + "outliers will be assigned with 0." + "".format(self.outlier_label_[k]) + ) + + # normalize 'votes' into real [0,1] probabilities + normalizer = proba_k.sum(axis=1)[:, np.newaxis] + normalizer[normalizer == 0.0] = 1.0 + proba_k /= normalizer + + probabilities.append(proba_k) + + if not self.outputs_2d_: + probabilities = probabilities[0] + + return probabilities + + def _more_tags(self): + return {"multilabel": True} diff --git a/venv/lib/python3.10/site-packages/sklearn/neighbors/_graph.py b/venv/lib/python3.10/site-packages/sklearn/neighbors/_graph.py new file mode 100644 index 0000000000000000000000000000000000000000..2ff27d07514e05d4d6edc2fb5c9a8461ed4defd1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/neighbors/_graph.py @@ -0,0 +1,719 @@ +"""Nearest Neighbors graph functions""" + +# Author: Jake Vanderplas +# Tom Dupre la Tour +# +# License: BSD 3 clause (C) INRIA, University of Amsterdam +import itertools + +from ..base import ClassNamePrefixFeaturesOutMixin, TransformerMixin, _fit_context +from ..utils._param_validation import ( + Integral, + Interval, + Real, + StrOptions, + validate_params, +) +from ..utils.validation import check_is_fitted +from ._base import VALID_METRICS, KNeighborsMixin, NeighborsBase, RadiusNeighborsMixin +from ._unsupervised import NearestNeighbors + + +def _check_params(X, metric, p, metric_params): + """Check the validity of the input parameters""" + params = zip(["metric", "p", "metric_params"], [metric, p, metric_params]) + est_params = X.get_params() + for param_name, func_param in params: + if func_param != est_params[param_name]: + raise ValueError( + "Got %s for %s, while the estimator has %s for the same parameter." + % (func_param, param_name, est_params[param_name]) + ) + + +def _query_include_self(X, include_self, mode): + """Return the query based on include_self param""" + if include_self == "auto": + include_self = mode == "connectivity" + + # it does not include each sample as its own neighbors + if not include_self: + X = None + + return X + + +@validate_params( + { + "X": ["array-like", KNeighborsMixin], + "n_neighbors": [Interval(Integral, 1, None, closed="left")], + "mode": [StrOptions({"connectivity", "distance"})], + "metric": [StrOptions(set(itertools.chain(*VALID_METRICS.values()))), callable], + "p": [Interval(Real, 0, None, closed="right"), None], + "metric_params": [dict, None], + "include_self": ["boolean", StrOptions({"auto"})], + "n_jobs": [Integral, None], + }, + prefer_skip_nested_validation=False, # metric is not validated yet +) +def kneighbors_graph( + X, + n_neighbors, + *, + mode="connectivity", + metric="minkowski", + p=2, + metric_params=None, + include_self=False, + n_jobs=None, +): + """Compute the (weighted) graph of k-Neighbors for points in X. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Sample data. + + n_neighbors : int + Number of neighbors for each sample. + + mode : {'connectivity', 'distance'}, default='connectivity' + Type of returned matrix: 'connectivity' will return the connectivity + matrix with ones and zeros, and 'distance' will return the distances + between neighbors according to the given metric. + + metric : str, default='minkowski' + Metric to use for distance computation. Default is "minkowski", which + results in the standard Euclidean distance when p = 2. See the + documentation of `scipy.spatial.distance + `_ and + the metrics listed in + :class:`~sklearn.metrics.pairwise.distance_metrics` for valid metric + values. + + p : float, default=2 + Power parameter for the Minkowski metric. When p = 1, this is equivalent + to using manhattan_distance (l1), and euclidean_distance (l2) for p = 2. + For arbitrary p, minkowski_distance (l_p) is used. This parameter is expected + to be positive. + + metric_params : dict, default=None + Additional keyword arguments for the metric function. + + include_self : bool or 'auto', default=False + Whether or not to mark each sample as the first nearest neighbor to + itself. If 'auto', then True is used for mode='connectivity' and False + for mode='distance'. + + n_jobs : int, default=None + The number of parallel jobs to run for neighbors search. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + Returns + ------- + A : sparse matrix of shape (n_samples, n_samples) + Graph where A[i, j] is assigned the weight of edge that + connects i to j. The matrix is of CSR format. + + See Also + -------- + radius_neighbors_graph: Compute the (weighted) graph of Neighbors for points in X. + + Examples + -------- + >>> X = [[0], [3], [1]] + >>> from sklearn.neighbors import kneighbors_graph + >>> A = kneighbors_graph(X, 2, mode='connectivity', include_self=True) + >>> A.toarray() + array([[1., 0., 1.], + [0., 1., 1.], + [1., 0., 1.]]) + """ + if not isinstance(X, KNeighborsMixin): + X = NearestNeighbors( + n_neighbors=n_neighbors, + metric=metric, + p=p, + metric_params=metric_params, + n_jobs=n_jobs, + ).fit(X) + else: + _check_params(X, metric, p, metric_params) + + query = _query_include_self(X._fit_X, include_self, mode) + return X.kneighbors_graph(X=query, n_neighbors=n_neighbors, mode=mode) + + +@validate_params( + { + "X": ["array-like", RadiusNeighborsMixin], + "radius": [Interval(Real, 0, None, closed="both")], + "mode": [StrOptions({"connectivity", "distance"})], + "metric": [StrOptions(set(itertools.chain(*VALID_METRICS.values()))), callable], + "p": [Interval(Real, 0, None, closed="right"), None], + "metric_params": [dict, None], + "include_self": ["boolean", StrOptions({"auto"})], + "n_jobs": [Integral, None], + }, + prefer_skip_nested_validation=False, # metric is not validated yet +) +def radius_neighbors_graph( + X, + radius, + *, + mode="connectivity", + metric="minkowski", + p=2, + metric_params=None, + include_self=False, + n_jobs=None, +): + """Compute the (weighted) graph of Neighbors for points in X. + + Neighborhoods are restricted the points at a distance lower than + radius. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Sample data. + + radius : float + Radius of neighborhoods. + + mode : {'connectivity', 'distance'}, default='connectivity' + Type of returned matrix: 'connectivity' will return the connectivity + matrix with ones and zeros, and 'distance' will return the distances + between neighbors according to the given metric. + + metric : str, default='minkowski' + Metric to use for distance computation. Default is "minkowski", which + results in the standard Euclidean distance when p = 2. See the + documentation of `scipy.spatial.distance + `_ and + the metrics listed in + :class:`~sklearn.metrics.pairwise.distance_metrics` for valid metric + values. + + p : float, default=2 + Power parameter for the Minkowski metric. When p = 1, this is + equivalent to using manhattan_distance (l1), and euclidean_distance + (l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used. + + metric_params : dict, default=None + Additional keyword arguments for the metric function. + + include_self : bool or 'auto', default=False + Whether or not to mark each sample as the first nearest neighbor to + itself. If 'auto', then True is used for mode='connectivity' and False + for mode='distance'. + + n_jobs : int, default=None + The number of parallel jobs to run for neighbors search. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + Returns + ------- + A : sparse matrix of shape (n_samples, n_samples) + Graph where A[i, j] is assigned the weight of edge that connects + i to j. The matrix is of CSR format. + + See Also + -------- + kneighbors_graph: Compute the weighted graph of k-neighbors for points in X. + + Examples + -------- + >>> X = [[0], [3], [1]] + >>> from sklearn.neighbors import radius_neighbors_graph + >>> A = radius_neighbors_graph(X, 1.5, mode='connectivity', + ... include_self=True) + >>> A.toarray() + array([[1., 0., 1.], + [0., 1., 0.], + [1., 0., 1.]]) + """ + if not isinstance(X, RadiusNeighborsMixin): + X = NearestNeighbors( + radius=radius, + metric=metric, + p=p, + metric_params=metric_params, + n_jobs=n_jobs, + ).fit(X) + else: + _check_params(X, metric, p, metric_params) + + query = _query_include_self(X._fit_X, include_self, mode) + return X.radius_neighbors_graph(query, radius, mode) + + +class KNeighborsTransformer( + ClassNamePrefixFeaturesOutMixin, KNeighborsMixin, TransformerMixin, NeighborsBase +): + """Transform X into a (weighted) graph of k nearest neighbors. + + The transformed data is a sparse graph as returned by kneighbors_graph. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.22 + + Parameters + ---------- + mode : {'distance', 'connectivity'}, default='distance' + Type of returned matrix: 'connectivity' will return the connectivity + matrix with ones and zeros, and 'distance' will return the distances + between neighbors according to the given metric. + + n_neighbors : int, default=5 + Number of neighbors for each sample in the transformed sparse graph. + For compatibility reasons, as each sample is considered as its own + neighbor, one extra neighbor will be computed when mode == 'distance'. + In this case, the sparse graph contains (n_neighbors + 1) neighbors. + + algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto' + Algorithm used to compute the nearest neighbors: + + - 'ball_tree' will use :class:`BallTree` + - 'kd_tree' will use :class:`KDTree` + - 'brute' will use a brute-force search. + - 'auto' will attempt to decide the most appropriate algorithm + based on the values passed to :meth:`fit` method. + + Note: fitting on sparse input will override the setting of + this parameter, using brute force. + + leaf_size : int, default=30 + Leaf size passed to BallTree or KDTree. This can affect the + speed of the construction and query, as well as the memory + required to store the tree. The optimal value depends on the + nature of the problem. + + metric : str or callable, default='minkowski' + Metric to use for distance computation. Default is "minkowski", which + results in the standard Euclidean distance when p = 2. See the + documentation of `scipy.spatial.distance + `_ and + the metrics listed in + :class:`~sklearn.metrics.pairwise.distance_metrics` for valid metric + values. + + If metric is a callable function, it takes two arrays representing 1D + vectors as inputs and must return one value indicating the distance + between those vectors. This works for Scipy's metrics, but is less + efficient than passing the metric name as a string. + + Distance matrices are not supported. + + p : float, default=2 + Parameter for the Minkowski metric from + sklearn.metrics.pairwise.pairwise_distances. When p = 1, this is + equivalent to using manhattan_distance (l1), and euclidean_distance + (l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used. + This parameter is expected to be positive. + + metric_params : dict, default=None + Additional keyword arguments for the metric function. + + n_jobs : int, default=None + The number of parallel jobs to run for neighbors search. + If ``-1``, then the number of jobs is set to the number of CPU cores. + + Attributes + ---------- + effective_metric_ : str or callable + The distance metric used. It will be same as the `metric` parameter + or a synonym of it, e.g. 'euclidean' if the `metric` parameter set to + 'minkowski' and `p` parameter set to 2. + + effective_metric_params_ : dict + Additional keyword arguments for the metric function. For most metrics + will be same with `metric_params` parameter, but may also contain the + `p` parameter value if the `effective_metric_` attribute is set to + 'minkowski'. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + n_samples_fit_ : int + Number of samples in the fitted data. + + See Also + -------- + kneighbors_graph : Compute the weighted graph of k-neighbors for + points in X. + RadiusNeighborsTransformer : Transform X into a weighted graph of + neighbors nearer than a radius. + + Notes + ----- + For an example of using :class:`~sklearn.neighbors.KNeighborsTransformer` + in combination with :class:`~sklearn.manifold.TSNE` see + :ref:`sphx_glr_auto_examples_neighbors_approximate_nearest_neighbors.py`. + + Examples + -------- + >>> from sklearn.datasets import load_wine + >>> from sklearn.neighbors import KNeighborsTransformer + >>> X, _ = load_wine(return_X_y=True) + >>> X.shape + (178, 13) + >>> transformer = KNeighborsTransformer(n_neighbors=5, mode='distance') + >>> X_dist_graph = transformer.fit_transform(X) + >>> X_dist_graph.shape + (178, 178) + """ + + _parameter_constraints: dict = { + **NeighborsBase._parameter_constraints, + "mode": [StrOptions({"distance", "connectivity"})], + } + _parameter_constraints.pop("radius") + + def __init__( + self, + *, + mode="distance", + n_neighbors=5, + algorithm="auto", + leaf_size=30, + metric="minkowski", + p=2, + metric_params=None, + n_jobs=None, + ): + super(KNeighborsTransformer, self).__init__( + n_neighbors=n_neighbors, + radius=None, + algorithm=algorithm, + leaf_size=leaf_size, + metric=metric, + p=p, + metric_params=metric_params, + n_jobs=n_jobs, + ) + self.mode = mode + + @_fit_context( + # KNeighborsTransformer.metric is not validated yet + prefer_skip_nested_validation=False + ) + def fit(self, X, y=None): + """Fit the k-nearest neighbors transformer from the training dataset. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) or \ + (n_samples, n_samples) if metric='precomputed' + Training data. + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + self : KNeighborsTransformer + The fitted k-nearest neighbors transformer. + """ + self._fit(X) + self._n_features_out = self.n_samples_fit_ + return self + + def transform(self, X): + """Compute the (weighted) graph of Neighbors for points in X. + + Parameters + ---------- + X : array-like of shape (n_samples_transform, n_features) + Sample data. + + Returns + ------- + Xt : sparse matrix of shape (n_samples_transform, n_samples_fit) + Xt[i, j] is assigned the weight of edge that connects i to j. + Only the neighbors have an explicit value. + The diagonal is always explicit. + The matrix is of CSR format. + """ + check_is_fitted(self) + add_one = self.mode == "distance" + return self.kneighbors_graph( + X, mode=self.mode, n_neighbors=self.n_neighbors + add_one + ) + + def fit_transform(self, X, y=None): + """Fit to data, then transform it. + + Fits transformer to X and y with optional parameters fit_params + and returns a transformed version of X. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training set. + + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + Xt : sparse matrix of shape (n_samples, n_samples) + Xt[i, j] is assigned the weight of edge that connects i to j. + Only the neighbors have an explicit value. + The diagonal is always explicit. + The matrix is of CSR format. + """ + return self.fit(X).transform(X) + + def _more_tags(self): + return { + "_xfail_checks": { + "check_methods_sample_order_invariance": "check is not applicable." + } + } + + +class RadiusNeighborsTransformer( + ClassNamePrefixFeaturesOutMixin, + RadiusNeighborsMixin, + TransformerMixin, + NeighborsBase, +): + """Transform X into a (weighted) graph of neighbors nearer than a radius. + + The transformed data is a sparse graph as returned by + `radius_neighbors_graph`. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.22 + + Parameters + ---------- + mode : {'distance', 'connectivity'}, default='distance' + Type of returned matrix: 'connectivity' will return the connectivity + matrix with ones and zeros, and 'distance' will return the distances + between neighbors according to the given metric. + + radius : float, default=1.0 + Radius of neighborhood in the transformed sparse graph. + + algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto' + Algorithm used to compute the nearest neighbors: + + - 'ball_tree' will use :class:`BallTree` + - 'kd_tree' will use :class:`KDTree` + - 'brute' will use a brute-force search. + - 'auto' will attempt to decide the most appropriate algorithm + based on the values passed to :meth:`fit` method. + + Note: fitting on sparse input will override the setting of + this parameter, using brute force. + + leaf_size : int, default=30 + Leaf size passed to BallTree or KDTree. This can affect the + speed of the construction and query, as well as the memory + required to store the tree. The optimal value depends on the + nature of the problem. + + metric : str or callable, default='minkowski' + Metric to use for distance computation. Default is "minkowski", which + results in the standard Euclidean distance when p = 2. See the + documentation of `scipy.spatial.distance + `_ and + the metrics listed in + :class:`~sklearn.metrics.pairwise.distance_metrics` for valid metric + values. + + If metric is a callable function, it takes two arrays representing 1D + vectors as inputs and must return one value indicating the distance + between those vectors. This works for Scipy's metrics, but is less + efficient than passing the metric name as a string. + + Distance matrices are not supported. + + p : float, default=2 + Parameter for the Minkowski metric from + sklearn.metrics.pairwise.pairwise_distances. When p = 1, this is + equivalent to using manhattan_distance (l1), and euclidean_distance + (l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used. + This parameter is expected to be positive. + + metric_params : dict, default=None + Additional keyword arguments for the metric function. + + n_jobs : int, default=None + The number of parallel jobs to run for neighbors search. + If ``-1``, then the number of jobs is set to the number of CPU cores. + + Attributes + ---------- + effective_metric_ : str or callable + The distance metric used. It will be same as the `metric` parameter + or a synonym of it, e.g. 'euclidean' if the `metric` parameter set to + 'minkowski' and `p` parameter set to 2. + + effective_metric_params_ : dict + Additional keyword arguments for the metric function. For most metrics + will be same with `metric_params` parameter, but may also contain the + `p` parameter value if the `effective_metric_` attribute is set to + 'minkowski'. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + n_samples_fit_ : int + Number of samples in the fitted data. + + See Also + -------- + kneighbors_graph : Compute the weighted graph of k-neighbors for + points in X. + KNeighborsTransformer : Transform X into a weighted graph of k + nearest neighbors. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.datasets import load_wine + >>> from sklearn.cluster import DBSCAN + >>> from sklearn.neighbors import RadiusNeighborsTransformer + >>> from sklearn.pipeline import make_pipeline + >>> X, _ = load_wine(return_X_y=True) + >>> estimator = make_pipeline( + ... RadiusNeighborsTransformer(radius=42.0, mode='distance'), + ... DBSCAN(eps=25.0, metric='precomputed')) + >>> X_clustered = estimator.fit_predict(X) + >>> clusters, counts = np.unique(X_clustered, return_counts=True) + >>> print(counts) + [ 29 15 111 11 12] + """ + + _parameter_constraints: dict = { + **NeighborsBase._parameter_constraints, + "mode": [StrOptions({"distance", "connectivity"})], + } + _parameter_constraints.pop("n_neighbors") + + def __init__( + self, + *, + mode="distance", + radius=1.0, + algorithm="auto", + leaf_size=30, + metric="minkowski", + p=2, + metric_params=None, + n_jobs=None, + ): + super(RadiusNeighborsTransformer, self).__init__( + n_neighbors=None, + radius=radius, + algorithm=algorithm, + leaf_size=leaf_size, + metric=metric, + p=p, + metric_params=metric_params, + n_jobs=n_jobs, + ) + self.mode = mode + + @_fit_context( + # RadiusNeighborsTransformer.metric is not validated yet + prefer_skip_nested_validation=False + ) + def fit(self, X, y=None): + """Fit the radius neighbors transformer from the training dataset. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) or \ + (n_samples, n_samples) if metric='precomputed' + Training data. + + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + self : RadiusNeighborsTransformer + The fitted radius neighbors transformer. + """ + self._fit(X) + self._n_features_out = self.n_samples_fit_ + return self + + def transform(self, X): + """Compute the (weighted) graph of Neighbors for points in X. + + Parameters + ---------- + X : array-like of shape (n_samples_transform, n_features) + Sample data. + + Returns + ------- + Xt : sparse matrix of shape (n_samples_transform, n_samples_fit) + Xt[i, j] is assigned the weight of edge that connects i to j. + Only the neighbors have an explicit value. + The diagonal is always explicit. + The matrix is of CSR format. + """ + check_is_fitted(self) + return self.radius_neighbors_graph(X, mode=self.mode, sort_results=True) + + def fit_transform(self, X, y=None): + """Fit to data, then transform it. + + Fits transformer to X and y with optional parameters fit_params + and returns a transformed version of X. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training set. + + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + Xt : sparse matrix of shape (n_samples, n_samples) + Xt[i, j] is assigned the weight of edge that connects i to j. + Only the neighbors have an explicit value. + The diagonal is always explicit. + The matrix is of CSR format. + """ + return self.fit(X).transform(X) + + def _more_tags(self): + return { + "_xfail_checks": { + "check_methods_sample_order_invariance": "check is not applicable." + } + } diff --git a/venv/lib/python3.10/site-packages/sklearn/neighbors/_nca.py b/venv/lib/python3.10/site-packages/sklearn/neighbors/_nca.py new file mode 100644 index 0000000000000000000000000000000000000000..d302aef0dc0a286a10c9fbbd99a415cf5cf1ccc7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/neighbors/_nca.py @@ -0,0 +1,525 @@ +""" +Neighborhood Component Analysis +""" + +# Authors: William de Vazelhes +# John Chiotellis +# License: BSD 3 clause + +import sys +import time +from numbers import Integral, Real +from warnings import warn + +import numpy as np +from scipy.optimize import minimize + +from ..base import ( + BaseEstimator, + ClassNamePrefixFeaturesOutMixin, + TransformerMixin, + _fit_context, +) +from ..decomposition import PCA +from ..exceptions import ConvergenceWarning +from ..metrics import pairwise_distances +from ..preprocessing import LabelEncoder +from ..utils._param_validation import Interval, StrOptions +from ..utils.extmath import softmax +from ..utils.multiclass import check_classification_targets +from ..utils.random import check_random_state +from ..utils.validation import check_array, check_is_fitted + + +class NeighborhoodComponentsAnalysis( + ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator +): + """Neighborhood Components Analysis. + + Neighborhood Component Analysis (NCA) is a machine learning algorithm for + metric learning. It learns a linear transformation in a supervised fashion + to improve the classification accuracy of a stochastic nearest neighbors + rule in the transformed space. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_components : int, default=None + Preferred dimensionality of the projected space. + If None it will be set to `n_features`. + + init : {'auto', 'pca', 'lda', 'identity', 'random'} or ndarray of shape \ + (n_features_a, n_features_b), default='auto' + Initialization of the linear transformation. Possible options are + `'auto'`, `'pca'`, `'lda'`, `'identity'`, `'random'`, and a numpy + array of shape `(n_features_a, n_features_b)`. + + - `'auto'` + Depending on `n_components`, the most reasonable initialization + will be chosen. If `n_components <= n_classes` we use `'lda'`, as + it uses labels information. If not, but + `n_components < min(n_features, n_samples)`, we use `'pca'`, as + it projects data in meaningful directions (those of higher + variance). Otherwise, we just use `'identity'`. + + - `'pca'` + `n_components` principal components of the inputs passed + to :meth:`fit` will be used to initialize the transformation. + (See :class:`~sklearn.decomposition.PCA`) + + - `'lda'` + `min(n_components, n_classes)` most discriminative + components of the inputs passed to :meth:`fit` will be used to + initialize the transformation. (If `n_components > n_classes`, + the rest of the components will be zero.) (See + :class:`~sklearn.discriminant_analysis.LinearDiscriminantAnalysis`) + + - `'identity'` + If `n_components` is strictly smaller than the + dimensionality of the inputs passed to :meth:`fit`, the identity + matrix will be truncated to the first `n_components` rows. + + - `'random'` + The initial transformation will be a random array of shape + `(n_components, n_features)`. Each value is sampled from the + standard normal distribution. + + - numpy array + `n_features_b` must match the dimensionality of the inputs passed + to :meth:`fit` and n_features_a must be less than or equal to that. + If `n_components` is not `None`, `n_features_a` must match it. + + warm_start : bool, default=False + If `True` and :meth:`fit` has been called before, the solution of the + previous call to :meth:`fit` is used as the initial linear + transformation (`n_components` and `init` will be ignored). + + max_iter : int, default=50 + Maximum number of iterations in the optimization. + + tol : float, default=1e-5 + Convergence tolerance for the optimization. + + callback : callable, default=None + If not `None`, this function is called after every iteration of the + optimizer, taking as arguments the current solution (flattened + transformation matrix) and the number of iterations. This might be + useful in case one wants to examine or store the transformation + found after each iteration. + + verbose : int, default=0 + If 0, no progress messages will be printed. + If 1, progress messages will be printed to stdout. + If > 1, progress messages will be printed and the `disp` + parameter of :func:`scipy.optimize.minimize` will be set to + `verbose - 2`. + + random_state : int or numpy.RandomState, default=None + A pseudo random number generator object or a seed for it if int. If + `init='random'`, `random_state` is used to initialize the random + transformation. If `init='pca'`, `random_state` is passed as an + argument to PCA when initializing the transformation. Pass an int + for reproducible results across multiple function calls. + See :term:`Glossary `. + + Attributes + ---------- + components_ : ndarray of shape (n_components, n_features) + The linear transformation learned during fitting. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + n_iter_ : int + Counts the number of iterations performed by the optimizer. + + random_state_ : numpy.RandomState + Pseudo random number generator object used during initialization. + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + sklearn.discriminant_analysis.LinearDiscriminantAnalysis : Linear + Discriminant Analysis. + sklearn.decomposition.PCA : Principal component analysis (PCA). + + References + ---------- + .. [1] J. Goldberger, G. Hinton, S. Roweis, R. Salakhutdinov. + "Neighbourhood Components Analysis". Advances in Neural Information + Processing Systems. 17, 513-520, 2005. + http://www.cs.nyu.edu/~roweis/papers/ncanips.pdf + + .. [2] Wikipedia entry on Neighborhood Components Analysis + https://en.wikipedia.org/wiki/Neighbourhood_components_analysis + + Examples + -------- + >>> from sklearn.neighbors import NeighborhoodComponentsAnalysis + >>> from sklearn.neighbors import KNeighborsClassifier + >>> from sklearn.datasets import load_iris + >>> from sklearn.model_selection import train_test_split + >>> X, y = load_iris(return_X_y=True) + >>> X_train, X_test, y_train, y_test = train_test_split(X, y, + ... stratify=y, test_size=0.7, random_state=42) + >>> nca = NeighborhoodComponentsAnalysis(random_state=42) + >>> nca.fit(X_train, y_train) + NeighborhoodComponentsAnalysis(...) + >>> knn = KNeighborsClassifier(n_neighbors=3) + >>> knn.fit(X_train, y_train) + KNeighborsClassifier(...) + >>> print(knn.score(X_test, y_test)) + 0.933333... + >>> knn.fit(nca.transform(X_train), y_train) + KNeighborsClassifier(...) + >>> print(knn.score(nca.transform(X_test), y_test)) + 0.961904... + """ + + _parameter_constraints: dict = { + "n_components": [ + Interval(Integral, 1, None, closed="left"), + None, + ], + "init": [ + StrOptions({"auto", "pca", "lda", "identity", "random"}), + np.ndarray, + ], + "warm_start": ["boolean"], + "max_iter": [Interval(Integral, 1, None, closed="left")], + "tol": [Interval(Real, 0, None, closed="left")], + "callback": [callable, None], + "verbose": ["verbose"], + "random_state": ["random_state"], + } + + def __init__( + self, + n_components=None, + *, + init="auto", + warm_start=False, + max_iter=50, + tol=1e-5, + callback=None, + verbose=0, + random_state=None, + ): + self.n_components = n_components + self.init = init + self.warm_start = warm_start + self.max_iter = max_iter + self.tol = tol + self.callback = callback + self.verbose = verbose + self.random_state = random_state + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y): + """Fit the model according to the given training data. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The training samples. + + y : array-like of shape (n_samples,) + The corresponding training labels. + + Returns + ------- + self : object + Fitted estimator. + """ + # Validate the inputs X and y, and converts y to numerical classes. + X, y = self._validate_data(X, y, ensure_min_samples=2) + check_classification_targets(y) + y = LabelEncoder().fit_transform(y) + + # Check the preferred dimensionality of the projected space + if self.n_components is not None and self.n_components > X.shape[1]: + raise ValueError( + "The preferred dimensionality of the " + f"projected space `n_components` ({self.n_components}) cannot " + "be greater than the given data " + f"dimensionality ({X.shape[1]})!" + ) + # If warm_start is enabled, check that the inputs are consistent + if ( + self.warm_start + and hasattr(self, "components_") + and self.components_.shape[1] != X.shape[1] + ): + raise ValueError( + f"The new inputs dimensionality ({X.shape[1]}) does not " + "match the input dimensionality of the " + f"previously learned transformation ({self.components_.shape[1]})." + ) + # Check how the linear transformation should be initialized + init = self.init + if isinstance(init, np.ndarray): + init = check_array(init) + # Assert that init.shape[1] = X.shape[1] + if init.shape[1] != X.shape[1]: + raise ValueError( + f"The input dimensionality ({init.shape[1]}) of the given " + "linear transformation `init` must match the " + f"dimensionality of the given inputs `X` ({X.shape[1]})." + ) + # Assert that init.shape[0] <= init.shape[1] + if init.shape[0] > init.shape[1]: + raise ValueError( + f"The output dimensionality ({init.shape[0]}) of the given " + "linear transformation `init` cannot be " + f"greater than its input dimensionality ({init.shape[1]})." + ) + # Assert that self.n_components = init.shape[0] + if self.n_components is not None and self.n_components != init.shape[0]: + raise ValueError( + "The preferred dimensionality of the " + f"projected space `n_components` ({self.n_components}) does" + " not match the output dimensionality of " + "the given linear transformation " + f"`init` ({init.shape[0]})!" + ) + + # Initialize the random generator + self.random_state_ = check_random_state(self.random_state) + + # Measure the total training time + t_train = time.time() + + # Compute a mask that stays fixed during optimization: + same_class_mask = y[:, np.newaxis] == y[np.newaxis, :] + # (n_samples, n_samples) + + # Initialize the transformation + transformation = np.ravel(self._initialize(X, y, init)) + + # Create a dictionary of parameters to be passed to the optimizer + disp = self.verbose - 2 if self.verbose > 1 else -1 + optimizer_params = { + "method": "L-BFGS-B", + "fun": self._loss_grad_lbfgs, + "args": (X, same_class_mask, -1.0), + "jac": True, + "x0": transformation, + "tol": self.tol, + "options": dict(maxiter=self.max_iter, disp=disp), + "callback": self._callback, + } + + # Call the optimizer + self.n_iter_ = 0 + opt_result = minimize(**optimizer_params) + + # Reshape the solution found by the optimizer + self.components_ = opt_result.x.reshape(-1, X.shape[1]) + self._n_features_out = self.components_.shape[1] + + # Stop timer + t_train = time.time() - t_train + if self.verbose: + cls_name = self.__class__.__name__ + + # Warn the user if the algorithm did not converge + if not opt_result.success: + warn( + "[{}] NCA did not converge: {}".format( + cls_name, opt_result.message + ), + ConvergenceWarning, + ) + + print("[{}] Training took {:8.2f}s.".format(cls_name, t_train)) + + return self + + def transform(self, X): + """Apply the learned transformation to the given data. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Data samples. + + Returns + ------- + X_embedded: ndarray of shape (n_samples, n_components) + The data samples transformed. + + Raises + ------ + NotFittedError + If :meth:`fit` has not been called before. + """ + + check_is_fitted(self) + X = self._validate_data(X, reset=False) + + return np.dot(X, self.components_.T) + + def _initialize(self, X, y, init): + """Initialize the transformation. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The training samples. + + y : array-like of shape (n_samples,) + The training labels. + + init : str or ndarray of shape (n_features_a, n_features_b) + The validated initialization of the linear transformation. + + Returns + ------- + transformation : ndarray of shape (n_components, n_features) + The initialized linear transformation. + + """ + + transformation = init + if self.warm_start and hasattr(self, "components_"): + transformation = self.components_ + elif isinstance(init, np.ndarray): + pass + else: + n_samples, n_features = X.shape + n_components = self.n_components or n_features + if init == "auto": + n_classes = len(np.unique(y)) + if n_components <= min(n_features, n_classes - 1): + init = "lda" + elif n_components < min(n_features, n_samples): + init = "pca" + else: + init = "identity" + if init == "identity": + transformation = np.eye(n_components, X.shape[1]) + elif init == "random": + transformation = self.random_state_.standard_normal( + size=(n_components, X.shape[1]) + ) + elif init in {"pca", "lda"}: + init_time = time.time() + if init == "pca": + pca = PCA( + n_components=n_components, random_state=self.random_state_ + ) + if self.verbose: + print("Finding principal components... ", end="") + sys.stdout.flush() + pca.fit(X) + transformation = pca.components_ + elif init == "lda": + from ..discriminant_analysis import LinearDiscriminantAnalysis + + lda = LinearDiscriminantAnalysis(n_components=n_components) + if self.verbose: + print("Finding most discriminative components... ", end="") + sys.stdout.flush() + lda.fit(X, y) + transformation = lda.scalings_.T[:n_components] + if self.verbose: + print("done in {:5.2f}s".format(time.time() - init_time)) + return transformation + + def _callback(self, transformation): + """Called after each iteration of the optimizer. + + Parameters + ---------- + transformation : ndarray of shape (n_components * n_features,) + The solution computed by the optimizer in this iteration. + """ + if self.callback is not None: + self.callback(transformation, self.n_iter_) + + self.n_iter_ += 1 + + def _loss_grad_lbfgs(self, transformation, X, same_class_mask, sign=1.0): + """Compute the loss and the loss gradient w.r.t. `transformation`. + + Parameters + ---------- + transformation : ndarray of shape (n_components * n_features,) + The raveled linear transformation on which to compute loss and + evaluate gradient. + + X : ndarray of shape (n_samples, n_features) + The training samples. + + same_class_mask : ndarray of shape (n_samples, n_samples) + A mask where `mask[i, j] == 1` if `X[i]` and `X[j]` belong + to the same class, and `0` otherwise. + + Returns + ------- + loss : float + The loss computed for the given transformation. + + gradient : ndarray of shape (n_components * n_features,) + The new (flattened) gradient of the loss. + """ + + if self.n_iter_ == 0: + self.n_iter_ += 1 + if self.verbose: + header_fields = ["Iteration", "Objective Value", "Time(s)"] + header_fmt = "{:>10} {:>20} {:>10}" + header = header_fmt.format(*header_fields) + cls_name = self.__class__.__name__ + print("[{}]".format(cls_name)) + print( + "[{}] {}\n[{}] {}".format( + cls_name, header, cls_name, "-" * len(header) + ) + ) + + t_funcall = time.time() + + transformation = transformation.reshape(-1, X.shape[1]) + X_embedded = np.dot(X, transformation.T) # (n_samples, n_components) + + # Compute softmax distances + p_ij = pairwise_distances(X_embedded, squared=True) + np.fill_diagonal(p_ij, np.inf) + p_ij = softmax(-p_ij) # (n_samples, n_samples) + + # Compute loss + masked_p_ij = p_ij * same_class_mask + p = np.sum(masked_p_ij, axis=1, keepdims=True) # (n_samples, 1) + loss = np.sum(p) + + # Compute gradient of loss w.r.t. `transform` + weighted_p_ij = masked_p_ij - p_ij * p + weighted_p_ij_sym = weighted_p_ij + weighted_p_ij.T + np.fill_diagonal(weighted_p_ij_sym, -weighted_p_ij.sum(axis=0)) + gradient = 2 * X_embedded.T.dot(weighted_p_ij_sym).dot(X) + # time complexity of the gradient: O(n_components x n_samples x ( + # n_samples + n_features)) + + if self.verbose: + t_funcall = time.time() - t_funcall + values_fmt = "[{}] {:>10} {:>20.6e} {:>10.2f}" + print( + values_fmt.format( + self.__class__.__name__, self.n_iter_, loss, t_funcall + ) + ) + sys.stdout.flush() + + return sign * loss, sign * gradient.ravel() + + def _more_tags(self): + return {"requires_y": True} diff --git a/venv/lib/python3.10/site-packages/sklearn/neighbors/_partition_nodes.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/sklearn/neighbors/_partition_nodes.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..f165f2e20997b5f5d416826f23d5941178aaca03 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/neighbors/_partition_nodes.cpython-310-x86_64-linux-gnu.so differ diff --git a/venv/lib/python3.10/site-packages/sklearn/neighbors/_quad_tree.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/sklearn/neighbors/_quad_tree.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..a607ac71046e32c08d9d7933c9fa5df681118153 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/neighbors/_quad_tree.cpython-310-x86_64-linux-gnu.so differ diff --git a/venv/lib/python3.10/site-packages/sklearn/neighbors/_regression.py b/venv/lib/python3.10/site-packages/sklearn/neighbors/_regression.py new file mode 100644 index 0000000000000000000000000000000000000000..2897c1ce409e8bb0733ff98df4dce11de5d3a256 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/neighbors/_regression.py @@ -0,0 +1,510 @@ +"""Nearest Neighbor Regression.""" + +# Authors: Jake Vanderplas +# Fabian Pedregosa +# Alexandre Gramfort +# Sparseness support by Lars Buitinck +# Multi-output support by Arnaud Joly +# Empty radius support by Andreas Bjerre-Nielsen +# +# License: BSD 3 clause (C) INRIA, University of Amsterdam, +# University of Copenhagen + +import warnings + +import numpy as np + +from ..base import RegressorMixin, _fit_context +from ..metrics import DistanceMetric +from ..utils._param_validation import StrOptions +from ._base import KNeighborsMixin, NeighborsBase, RadiusNeighborsMixin, _get_weights + + +class KNeighborsRegressor(KNeighborsMixin, RegressorMixin, NeighborsBase): + """Regression based on k-nearest neighbors. + + The target is predicted by local interpolation of the targets + associated of the nearest neighbors in the training set. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.9 + + Parameters + ---------- + n_neighbors : int, default=5 + Number of neighbors to use by default for :meth:`kneighbors` queries. + + weights : {'uniform', 'distance'}, callable or None, default='uniform' + Weight function used in prediction. Possible values: + + - 'uniform' : uniform weights. All points in each neighborhood + are weighted equally. + - 'distance' : weight points by the inverse of their distance. + in this case, closer neighbors of a query point will have a + greater influence than neighbors which are further away. + - [callable] : a user-defined function which accepts an + array of distances, and returns an array of the same shape + containing the weights. + + Uniform weights are used by default. + + algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto' + Algorithm used to compute the nearest neighbors: + + - 'ball_tree' will use :class:`BallTree` + - 'kd_tree' will use :class:`KDTree` + - 'brute' will use a brute-force search. + - 'auto' will attempt to decide the most appropriate algorithm + based on the values passed to :meth:`fit` method. + + Note: fitting on sparse input will override the setting of + this parameter, using brute force. + + leaf_size : int, default=30 + Leaf size passed to BallTree or KDTree. This can affect the + speed of the construction and query, as well as the memory + required to store the tree. The optimal value depends on the + nature of the problem. + + p : float, default=2 + Power parameter for the Minkowski metric. When p = 1, this is + equivalent to using manhattan_distance (l1), and euclidean_distance + (l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used. + + metric : str, DistanceMetric object or callable, default='minkowski' + Metric to use for distance computation. Default is "minkowski", which + results in the standard Euclidean distance when p = 2. See the + documentation of `scipy.spatial.distance + `_ and + the metrics listed in + :class:`~sklearn.metrics.pairwise.distance_metrics` for valid metric + values. + + If metric is "precomputed", X is assumed to be a distance matrix and + must be square during fit. X may be a :term:`sparse graph`, in which + case only "nonzero" elements may be considered neighbors. + + If metric is a callable function, it takes two arrays representing 1D + vectors as inputs and must return one value indicating the distance + between those vectors. This works for Scipy's metrics, but is less + efficient than passing the metric name as a string. + + If metric is a DistanceMetric object, it will be passed directly to + the underlying computation routines. + + metric_params : dict, default=None + Additional keyword arguments for the metric function. + + n_jobs : int, default=None + The number of parallel jobs to run for neighbors search. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + Doesn't affect :meth:`fit` method. + + Attributes + ---------- + effective_metric_ : str or callable + The distance metric to use. It will be same as the `metric` parameter + or a synonym of it, e.g. 'euclidean' if the `metric` parameter set to + 'minkowski' and `p` parameter set to 2. + + effective_metric_params_ : dict + Additional keyword arguments for the metric function. For most metrics + will be same with `metric_params` parameter, but may also contain the + `p` parameter value if the `effective_metric_` attribute is set to + 'minkowski'. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + n_samples_fit_ : int + Number of samples in the fitted data. + + See Also + -------- + NearestNeighbors : Unsupervised learner for implementing neighbor searches. + RadiusNeighborsRegressor : Regression based on neighbors within a fixed radius. + KNeighborsClassifier : Classifier implementing the k-nearest neighbors vote. + RadiusNeighborsClassifier : Classifier implementing + a vote among neighbors within a given radius. + + Notes + ----- + See :ref:`Nearest Neighbors ` in the online documentation + for a discussion of the choice of ``algorithm`` and ``leaf_size``. + + .. warning:: + + Regarding the Nearest Neighbors algorithms, if it is found that two + neighbors, neighbor `k+1` and `k`, have identical distances but + different labels, the results will depend on the ordering of the + training data. + + https://en.wikipedia.org/wiki/K-nearest_neighbors_algorithm + + Examples + -------- + >>> X = [[0], [1], [2], [3]] + >>> y = [0, 0, 1, 1] + >>> from sklearn.neighbors import KNeighborsRegressor + >>> neigh = KNeighborsRegressor(n_neighbors=2) + >>> neigh.fit(X, y) + KNeighborsRegressor(...) + >>> print(neigh.predict([[1.5]])) + [0.5] + """ + + _parameter_constraints: dict = { + **NeighborsBase._parameter_constraints, + "weights": [StrOptions({"uniform", "distance"}), callable, None], + } + _parameter_constraints["metric"].append(DistanceMetric) + _parameter_constraints.pop("radius") + + def __init__( + self, + n_neighbors=5, + *, + weights="uniform", + algorithm="auto", + leaf_size=30, + p=2, + metric="minkowski", + metric_params=None, + n_jobs=None, + ): + super().__init__( + n_neighbors=n_neighbors, + algorithm=algorithm, + leaf_size=leaf_size, + metric=metric, + p=p, + metric_params=metric_params, + n_jobs=n_jobs, + ) + self.weights = weights + + def _more_tags(self): + # For cross-validation routines to split data correctly + return {"pairwise": self.metric == "precomputed"} + + @_fit_context( + # KNeighborsRegressor.metric is not validated yet + prefer_skip_nested_validation=False + ) + def fit(self, X, y): + """Fit the k-nearest neighbors regressor from the training dataset. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) or \ + (n_samples, n_samples) if metric='precomputed' + Training data. + + y : {array-like, sparse matrix} of shape (n_samples,) or \ + (n_samples, n_outputs) + Target values. + + Returns + ------- + self : KNeighborsRegressor + The fitted k-nearest neighbors regressor. + """ + return self._fit(X, y) + + def predict(self, X): + """Predict the target for the provided data. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_queries, n_features), \ + or (n_queries, n_indexed) if metric == 'precomputed' + Test samples. + + Returns + ------- + y : ndarray of shape (n_queries,) or (n_queries, n_outputs), dtype=int + Target values. + """ + if self.weights == "uniform": + # In that case, we do not need the distances to perform + # the weighting so we do not compute them. + neigh_ind = self.kneighbors(X, return_distance=False) + neigh_dist = None + else: + neigh_dist, neigh_ind = self.kneighbors(X) + + weights = _get_weights(neigh_dist, self.weights) + + _y = self._y + if _y.ndim == 1: + _y = _y.reshape((-1, 1)) + + if weights is None: + y_pred = np.mean(_y[neigh_ind], axis=1) + else: + y_pred = np.empty((neigh_dist.shape[0], _y.shape[1]), dtype=np.float64) + denom = np.sum(weights, axis=1) + + for j in range(_y.shape[1]): + num = np.sum(_y[neigh_ind, j] * weights, axis=1) + y_pred[:, j] = num / denom + + if self._y.ndim == 1: + y_pred = y_pred.ravel() + + return y_pred + + +class RadiusNeighborsRegressor(RadiusNeighborsMixin, RegressorMixin, NeighborsBase): + """Regression based on neighbors within a fixed radius. + + The target is predicted by local interpolation of the targets + associated of the nearest neighbors in the training set. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.9 + + Parameters + ---------- + radius : float, default=1.0 + Range of parameter space to use by default for :meth:`radius_neighbors` + queries. + + weights : {'uniform', 'distance'}, callable or None, default='uniform' + Weight function used in prediction. Possible values: + + - 'uniform' : uniform weights. All points in each neighborhood + are weighted equally. + - 'distance' : weight points by the inverse of their distance. + in this case, closer neighbors of a query point will have a + greater influence than neighbors which are further away. + - [callable] : a user-defined function which accepts an + array of distances, and returns an array of the same shape + containing the weights. + + Uniform weights are used by default. + + algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto' + Algorithm used to compute the nearest neighbors: + + - 'ball_tree' will use :class:`BallTree` + - 'kd_tree' will use :class:`KDTree` + - 'brute' will use a brute-force search. + - 'auto' will attempt to decide the most appropriate algorithm + based on the values passed to :meth:`fit` method. + + Note: fitting on sparse input will override the setting of + this parameter, using brute force. + + leaf_size : int, default=30 + Leaf size passed to BallTree or KDTree. This can affect the + speed of the construction and query, as well as the memory + required to store the tree. The optimal value depends on the + nature of the problem. + + p : float, default=2 + Power parameter for the Minkowski metric. When p = 1, this is + equivalent to using manhattan_distance (l1), and euclidean_distance + (l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used. + + metric : str or callable, default='minkowski' + Metric to use for distance computation. Default is "minkowski", which + results in the standard Euclidean distance when p = 2. See the + documentation of `scipy.spatial.distance + `_ and + the metrics listed in + :class:`~sklearn.metrics.pairwise.distance_metrics` for valid metric + values. + + If metric is "precomputed", X is assumed to be a distance matrix and + must be square during fit. X may be a :term:`sparse graph`, in which + case only "nonzero" elements may be considered neighbors. + + If metric is a callable function, it takes two arrays representing 1D + vectors as inputs and must return one value indicating the distance + between those vectors. This works for Scipy's metrics, but is less + efficient than passing the metric name as a string. + + metric_params : dict, default=None + Additional keyword arguments for the metric function. + + n_jobs : int, default=None + The number of parallel jobs to run for neighbors search. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + Attributes + ---------- + effective_metric_ : str or callable + The distance metric to use. It will be same as the `metric` parameter + or a synonym of it, e.g. 'euclidean' if the `metric` parameter set to + 'minkowski' and `p` parameter set to 2. + + effective_metric_params_ : dict + Additional keyword arguments for the metric function. For most metrics + will be same with `metric_params` parameter, but may also contain the + `p` parameter value if the `effective_metric_` attribute is set to + 'minkowski'. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + n_samples_fit_ : int + Number of samples in the fitted data. + + See Also + -------- + NearestNeighbors : Unsupervised learner for implementing neighbor searches. + KNeighborsRegressor : Regression based on k-nearest neighbors. + KNeighborsClassifier : Classifier based on the k-nearest neighbors. + RadiusNeighborsClassifier : Classifier based on neighbors within a given radius. + + Notes + ----- + See :ref:`Nearest Neighbors ` in the online documentation + for a discussion of the choice of ``algorithm`` and ``leaf_size``. + + https://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm + + Examples + -------- + >>> X = [[0], [1], [2], [3]] + >>> y = [0, 0, 1, 1] + >>> from sklearn.neighbors import RadiusNeighborsRegressor + >>> neigh = RadiusNeighborsRegressor(radius=1.0) + >>> neigh.fit(X, y) + RadiusNeighborsRegressor(...) + >>> print(neigh.predict([[1.5]])) + [0.5] + """ + + _parameter_constraints: dict = { + **NeighborsBase._parameter_constraints, + "weights": [StrOptions({"uniform", "distance"}), callable, None], + } + _parameter_constraints.pop("n_neighbors") + + def __init__( + self, + radius=1.0, + *, + weights="uniform", + algorithm="auto", + leaf_size=30, + p=2, + metric="minkowski", + metric_params=None, + n_jobs=None, + ): + super().__init__( + radius=radius, + algorithm=algorithm, + leaf_size=leaf_size, + p=p, + metric=metric, + metric_params=metric_params, + n_jobs=n_jobs, + ) + self.weights = weights + + @_fit_context( + # RadiusNeighborsRegressor.metric is not validated yet + prefer_skip_nested_validation=False + ) + def fit(self, X, y): + """Fit the radius neighbors regressor from the training dataset. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) or \ + (n_samples, n_samples) if metric='precomputed' + Training data. + + y : {array-like, sparse matrix} of shape (n_samples,) or \ + (n_samples, n_outputs) + Target values. + + Returns + ------- + self : RadiusNeighborsRegressor + The fitted radius neighbors regressor. + """ + return self._fit(X, y) + + def predict(self, X): + """Predict the target for the provided data. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_queries, n_features), \ + or (n_queries, n_indexed) if metric == 'precomputed' + Test samples. + + Returns + ------- + y : ndarray of shape (n_queries,) or (n_queries, n_outputs), \ + dtype=double + Target values. + """ + neigh_dist, neigh_ind = self.radius_neighbors(X) + + weights = _get_weights(neigh_dist, self.weights) + + _y = self._y + if _y.ndim == 1: + _y = _y.reshape((-1, 1)) + + empty_obs = np.full_like(_y[0], np.nan) + + if weights is None: + y_pred = np.array( + [ + np.mean(_y[ind, :], axis=0) if len(ind) else empty_obs + for (i, ind) in enumerate(neigh_ind) + ] + ) + + else: + y_pred = np.array( + [ + ( + np.average(_y[ind, :], axis=0, weights=weights[i]) + if len(ind) + else empty_obs + ) + for (i, ind) in enumerate(neigh_ind) + ] + ) + + if np.any(np.isnan(y_pred)): + empty_warning_msg = ( + "One or more samples have no neighbors " + "within specified radius; predicting NaN." + ) + warnings.warn(empty_warning_msg) + + if self._y.ndim == 1: + y_pred = y_pred.ravel() + + return y_pred diff --git a/venv/lib/python3.10/site-packages/sklearn/neighbors/_unsupervised.py b/venv/lib/python3.10/site-packages/sklearn/neighbors/_unsupervised.py new file mode 100644 index 0000000000000000000000000000000000000000..a4ff66786340acc7379c135f8c51c27e41142744 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/neighbors/_unsupervised.py @@ -0,0 +1,175 @@ +"""Unsupervised nearest neighbors learner""" +from ..base import _fit_context +from ._base import KNeighborsMixin, NeighborsBase, RadiusNeighborsMixin + + +class NearestNeighbors(KNeighborsMixin, RadiusNeighborsMixin, NeighborsBase): + """Unsupervised learner for implementing neighbor searches. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.9 + + Parameters + ---------- + n_neighbors : int, default=5 + Number of neighbors to use by default for :meth:`kneighbors` queries. + + radius : float, default=1.0 + Range of parameter space to use by default for :meth:`radius_neighbors` + queries. + + algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto' + Algorithm used to compute the nearest neighbors: + + - 'ball_tree' will use :class:`BallTree` + - 'kd_tree' will use :class:`KDTree` + - 'brute' will use a brute-force search. + - 'auto' will attempt to decide the most appropriate algorithm + based on the values passed to :meth:`fit` method. + + Note: fitting on sparse input will override the setting of + this parameter, using brute force. + + leaf_size : int, default=30 + Leaf size passed to BallTree or KDTree. This can affect the + speed of the construction and query, as well as the memory + required to store the tree. The optimal value depends on the + nature of the problem. + + metric : str or callable, default='minkowski' + Metric to use for distance computation. Default is "minkowski", which + results in the standard Euclidean distance when p = 2. See the + documentation of `scipy.spatial.distance + `_ and + the metrics listed in + :class:`~sklearn.metrics.pairwise.distance_metrics` for valid metric + values. + + If metric is "precomputed", X is assumed to be a distance matrix and + must be square during fit. X may be a :term:`sparse graph`, in which + case only "nonzero" elements may be considered neighbors. + + If metric is a callable function, it takes two arrays representing 1D + vectors as inputs and must return one value indicating the distance + between those vectors. This works for Scipy's metrics, but is less + efficient than passing the metric name as a string. + + p : float (positive), default=2 + Parameter for the Minkowski metric from + sklearn.metrics.pairwise.pairwise_distances. When p = 1, this is + equivalent to using manhattan_distance (l1), and euclidean_distance + (l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used. + + metric_params : dict, default=None + Additional keyword arguments for the metric function. + + n_jobs : int, default=None + The number of parallel jobs to run for neighbors search. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + Attributes + ---------- + effective_metric_ : str + Metric used to compute distances to neighbors. + + effective_metric_params_ : dict + Parameters for the metric used to compute distances to neighbors. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + n_samples_fit_ : int + Number of samples in the fitted data. + + See Also + -------- + KNeighborsClassifier : Classifier implementing the k-nearest neighbors + vote. + RadiusNeighborsClassifier : Classifier implementing a vote among neighbors + within a given radius. + KNeighborsRegressor : Regression based on k-nearest neighbors. + RadiusNeighborsRegressor : Regression based on neighbors within a fixed + radius. + BallTree : Space partitioning data structure for organizing points in a + multi-dimensional space, used for nearest neighbor search. + + Notes + ----- + See :ref:`Nearest Neighbors ` in the online documentation + for a discussion of the choice of ``algorithm`` and ``leaf_size``. + + https://en.wikipedia.org/wiki/K-nearest_neighbors_algorithm + + Examples + -------- + >>> import numpy as np + >>> from sklearn.neighbors import NearestNeighbors + >>> samples = [[0, 0, 2], [1, 0, 0], [0, 0, 1]] + >>> neigh = NearestNeighbors(n_neighbors=2, radius=0.4) + >>> neigh.fit(samples) + NearestNeighbors(...) + >>> neigh.kneighbors([[0, 0, 1.3]], 2, return_distance=False) + array([[2, 0]]...) + >>> nbrs = neigh.radius_neighbors( + ... [[0, 0, 1.3]], 0.4, return_distance=False + ... ) + >>> np.asarray(nbrs[0][0]) + array(2) + """ + + def __init__( + self, + *, + n_neighbors=5, + radius=1.0, + algorithm="auto", + leaf_size=30, + metric="minkowski", + p=2, + metric_params=None, + n_jobs=None, + ): + super().__init__( + n_neighbors=n_neighbors, + radius=radius, + algorithm=algorithm, + leaf_size=leaf_size, + metric=metric, + p=p, + metric_params=metric_params, + n_jobs=n_jobs, + ) + + @_fit_context( + # NearestNeighbors.metric is not validated yet + prefer_skip_nested_validation=False + ) + def fit(self, X, y=None): + """Fit the nearest neighbors estimator from the training dataset. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) or \ + (n_samples, n_samples) if metric='precomputed' + Training data. + + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + self : NearestNeighbors + The fitted nearest neighbors estimator. + """ + return self._fit(X) diff --git a/venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fe2e8915c4f45982d7dc45e2d06ef16340e5e676 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/_arpack.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/_arpack.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..751a5b361b3a898ec51883541cb3ce05905be42f Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/_arpack.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/_array_api.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/_array_api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fca5d50afc48c8e4a903e6042f10f2048a1e8946 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/_array_api.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/_available_if.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/_available_if.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..51d33e944d5921629e5ff5a4c59a4035d2ac67a7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/_available_if.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/_bunch.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/_bunch.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f4ae6af7942b75f7f9ac69ba394f41090dc31fcd Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/_bunch.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/_encode.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/_encode.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fff50cf0a1860779c4cf498fae80b0d21eab327d Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/_encode.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/_estimator_html_repr.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/_estimator_html_repr.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e63d1a3951c0dc2135e2cb70efeb2c3f080140bb Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/_estimator_html_repr.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/_joblib.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/_joblib.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f0885263462c336f514d8ddc72a4e1315ff0497e Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/_joblib.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/_mask.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/_mask.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3aa89fbd69d5199ef9ee79c42942ba4a82dacb2a Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/_mask.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/_metadata_requests.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/_metadata_requests.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8b14a7474a47db73e9493349b92bcf1f5d30ca2f Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/_metadata_requests.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/_mocking.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/_mocking.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f45fbde84185bb858e9eeeb5bf267776e7aa04ba Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/_mocking.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/_param_validation.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/_param_validation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8ac3f0989984d6c24fd2b2052631fbeeae0d283d Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/_param_validation.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/_plotting.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/_plotting.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e0fc881185cf54921e20d43bc25a1bde20095ea7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/_plotting.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/_pprint.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/_pprint.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..64811d9290c8b1c19957fb2fbcf5cb7c59c54e11 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/_pprint.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/_response.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/_response.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..72b8ddd5a608424dd33fa3fc224dc35f4e274266 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/_response.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/_set_output.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/_set_output.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f02aaf78b829d458e8042041f4158796d39a47f8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/_set_output.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/_show_versions.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/_show_versions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a6625eba77c652d3885938bbd89a9343bb367d1b Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/_show_versions.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/_tags.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/_tags.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c8ed477c384a1dadd1611026d8646e208478c52f Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/_tags.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/_testing.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/_testing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a0a12760a43322b587095b8b846257440ad0ad51 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/_testing.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/class_weight.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/class_weight.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..90b9782bc1571401b7c8b74415920f4e4a75622d Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/class_weight.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/deprecation.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/deprecation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8145e96fe7a765282426a8a6ffdb281dd2163066 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/deprecation.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/discovery.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/discovery.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1d78be23879a565f5b19eb7fe526918801eda7bf Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/discovery.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/estimator_checks.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/estimator_checks.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2d90e855528df6f52ecf953f7bf942812ba96c37 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/estimator_checks.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/extmath.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/extmath.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0bd26a7cb71016fac4947e73755c9aa339c7cf53 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/extmath.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/fixes.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/fixes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cd38deddc17fb5a0cf599454f24a13b0a5a3c065 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/fixes.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/graph.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/graph.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..29effc20923244b916a402b7fd44626b7c2b5f31 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/graph.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/metadata_routing.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/metadata_routing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d2f8ed3bfc1179f350f131c07b0ec0263015d491 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/metadata_routing.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/metaestimators.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/metaestimators.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e8e44683c75080a37dea782eaf813a4c9c6d14f0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/metaestimators.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/multiclass.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/multiclass.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0975e90a62dcf4b2057fc70b85a16d4577b6120d Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/multiclass.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/optimize.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/optimize.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4b81fa48b87dd455e0743a1b3cb0a9db05f04ac8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/optimize.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/parallel.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/parallel.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fe3c63d56dfdcd043784c4fab43cc5f0dfe10a92 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/parallel.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/random.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/random.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..05d32c3ee203bdba24be3bcabe56e963b906859f Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/random.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/sparsefuncs.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/sparsefuncs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..36913066f0178b09f4ca5310182ae5c12fd4116b Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/sparsefuncs.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/stats.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/stats.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ec5cbb1b3d55f66a36ac9a28a6b61a556ab521eb Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/stats.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/validation.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/validation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b5a90a116b0cb07da5f0b0dc9daa3a16c86a55a4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/utils/__pycache__/validation.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7023734e2aaaa6a7be91f48e21450ad2db7fd88f Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_arpack.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_arpack.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..98f2ef531754eb8be4f92553e6e2e94158e0bcc9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_arpack.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_array_api.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_array_api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..63cb5c96fcb7e160d64e3ae5597812276a588ecd Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_array_api.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_arrayfuncs.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_arrayfuncs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5f6cc94079dcd062a7a8f21b9d6a0c08d7e8bca6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_arrayfuncs.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_class_weight.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_class_weight.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5482913aeff8dceeae63b55d878be03f13a4e95d Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_class_weight.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_cython_blas.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_cython_blas.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f42228e9a5a7d4724c4ec902e153b497165b213f Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_cython_blas.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_cython_templating.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_cython_templating.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f85dcb255d8db9c5e135314f99a65c0426662638 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_cython_templating.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_deprecation.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_deprecation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a898a9ffc7142b57689950f90ae655240e4548ce Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_deprecation.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_encode.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_encode.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f2408cd4b6e61ce7b13d0415a94d3ef8b658945a Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_encode.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_estimator_html_repr.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_estimator_html_repr.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..db7107f45baa98cc77353a7422f64dcdd35f611e Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_estimator_html_repr.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_extmath.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_extmath.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..480d9f0deaff75d2527991b45af0af0842bfd5c1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_extmath.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_fast_dict.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_fast_dict.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dec4e92525dfee5bcb01ac2a7e241ac65f341098 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_fast_dict.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_fixes.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_fixes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..39e10e6090d320f7bbdf8a6b9ba9d32fe054024c Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_fixes.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_graph.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_graph.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3984cd0e8af1e36e2688fcdfdf34a72e4ac98e72 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_graph.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_metaestimators.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_metaestimators.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4e1a3c43fd76dcca61da15e74fb84efa1bdbd304 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_metaestimators.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_mocking.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_mocking.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..13a8382f4506c7c7a21aec85caf35546aa7ec1d7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_mocking.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_murmurhash.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_murmurhash.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6dd7248fd6f04c9499be95304c9df7bb4e59858b Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_murmurhash.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_parallel.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_parallel.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6269ed5ff9e5492b3d960828160b4b1d22d0a955 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_parallel.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_pprint.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_pprint.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fa9005ba832d7eed53f9c8945ba3204e4f8ce803 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_pprint.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_random.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_random.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c669c773f04cf56a4e1a9f863512f3a139bfb14f Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_random.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_response.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_response.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d63462685d442a79528f3d7ee55cc280675e1673 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_response.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_set_output.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_set_output.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ae367198ea204061fc787424be37409b2c731cab Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_set_output.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_shortest_path.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_shortest_path.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7c4dee332918b3e4b4fd9024caf531592531c1d3 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_shortest_path.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_sparsefuncs.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_sparsefuncs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..388835b056956d1de41d682ae390b23d899518bf Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_sparsefuncs.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_tags.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_tags.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1ec0b22be12f0fd05607ee2ba2543664558f34c1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_tags.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_testing.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_testing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b0525086b36e2b5126480f262ace3ab009961180 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_testing.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_typedefs.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_typedefs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6319c18c9e9c0ccc193241658f8b1e69a808ad32 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_typedefs.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2bc14611577fbd30d8f25217e01007e38ae5d6b0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_validation.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_validation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4aa42bf37f3634199270bedb7efbb8696befcf66 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_validation.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/utils/tests/test_fast_dict.py b/venv/lib/python3.10/site-packages/sklearn/utils/tests/test_fast_dict.py new file mode 100644 index 0000000000000000000000000000000000000000..8fada45db3f52ca47da44162609b9c23f7222361 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/utils/tests/test_fast_dict.py @@ -0,0 +1,47 @@ +""" Test fast_dict. +""" +import numpy as np +from numpy.testing import assert_allclose, assert_array_equal + +from sklearn.utils._fast_dict import IntFloatDict, argmin + + +def test_int_float_dict(): + rng = np.random.RandomState(0) + keys = np.unique(rng.randint(100, size=10).astype(np.intp)) + values = rng.rand(len(keys)) + + d = IntFloatDict(keys, values) + for key, value in zip(keys, values): + assert d[key] == value + assert len(d) == len(keys) + + d.append(120, 3.0) + assert d[120] == 3.0 + assert len(d) == len(keys) + 1 + for i in range(2000): + d.append(i + 1000, 4.0) + assert d[1100] == 4.0 + + +def test_int_float_dict_argmin(): + # Test the argmin implementation on the IntFloatDict + keys = np.arange(100, dtype=np.intp) + values = np.arange(100, dtype=np.float64) + d = IntFloatDict(keys, values) + assert argmin(d) == (0, 0) + + +def test_to_arrays(): + # Test that an IntFloatDict is converted into arrays + # of keys and values correctly + keys_in = np.array([1, 2, 3], dtype=np.intp) + values_in = np.array([4, 5, 6], dtype=np.float64) + + d = IntFloatDict(keys_in, values_in) + keys_out, values_out = d.to_arrays() + + assert keys_out.dtype == keys_in.dtype + assert values_in.dtype == values_out.dtype + assert_array_equal(keys_out, keys_in) + assert_allclose(values_out, values_in) diff --git a/venv/lib/python3.10/site-packages/sklearn/utils/tests/test_random.py b/venv/lib/python3.10/site-packages/sklearn/utils/tests/test_random.py new file mode 100644 index 0000000000000000000000000000000000000000..04a8ee371f358da4a37e400e120730591873dba0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/utils/tests/test_random.py @@ -0,0 +1,192 @@ +import numpy as np +import pytest +import scipy.sparse as sp +from numpy.testing import assert_array_almost_equal +from scipy.special import comb + +from sklearn.utils._random import _our_rand_r_py +from sklearn.utils.random import _random_choice_csc, sample_without_replacement + + +############################################################################### +# test custom sampling without replacement algorithm +############################################################################### +def test_invalid_sample_without_replacement_algorithm(): + with pytest.raises(ValueError): + sample_without_replacement(5, 4, "unknown") + + +def test_sample_without_replacement_algorithms(): + methods = ("auto", "tracking_selection", "reservoir_sampling", "pool") + + for m in methods: + + def sample_without_replacement_method( + n_population, n_samples, random_state=None + ): + return sample_without_replacement( + n_population, n_samples, method=m, random_state=random_state + ) + + check_edge_case_of_sample_int(sample_without_replacement_method) + check_sample_int(sample_without_replacement_method) + check_sample_int_distribution(sample_without_replacement_method) + + +def check_edge_case_of_sample_int(sample_without_replacement): + # n_population < n_sample + with pytest.raises(ValueError): + sample_without_replacement(0, 1) + with pytest.raises(ValueError): + sample_without_replacement(1, 2) + + # n_population == n_samples + assert sample_without_replacement(0, 0).shape == (0,) + + assert sample_without_replacement(1, 1).shape == (1,) + + # n_population >= n_samples + assert sample_without_replacement(5, 0).shape == (0,) + assert sample_without_replacement(5, 1).shape == (1,) + + # n_population < 0 or n_samples < 0 + with pytest.raises(ValueError): + sample_without_replacement(-1, 5) + with pytest.raises(ValueError): + sample_without_replacement(5, -1) + + +def check_sample_int(sample_without_replacement): + # This test is heavily inspired from test_random.py of python-core. + # + # For the entire allowable range of 0 <= k <= N, validate that + # the sample is of the correct length and contains only unique items + n_population = 100 + + for n_samples in range(n_population + 1): + s = sample_without_replacement(n_population, n_samples) + assert len(s) == n_samples + unique = np.unique(s) + assert np.size(unique) == n_samples + assert np.all(unique < n_population) + + # test edge case n_population == n_samples == 0 + assert np.size(sample_without_replacement(0, 0)) == 0 + + +def check_sample_int_distribution(sample_without_replacement): + # This test is heavily inspired from test_random.py of python-core. + # + # For the entire allowable range of 0 <= k <= N, validate that + # sample generates all possible permutations + n_population = 10 + + # a large number of trials prevents false negatives without slowing normal + # case + n_trials = 10000 + + for n_samples in range(n_population): + # Counting the number of combinations is not as good as counting the + # the number of permutations. However, it works with sampling algorithm + # that does not provide a random permutation of the subset of integer. + n_expected = comb(n_population, n_samples, exact=True) + + output = {} + for i in range(n_trials): + output[frozenset(sample_without_replacement(n_population, n_samples))] = ( + None + ) + + if len(output) == n_expected: + break + else: + raise AssertionError( + "number of combinations != number of expected (%s != %s)" + % (len(output), n_expected) + ) + + +def test_random_choice_csc(n_samples=10000, random_state=24): + # Explicit class probabilities + classes = [np.array([0, 1]), np.array([0, 1, 2])] + class_probabilities = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])] + + got = _random_choice_csc(n_samples, classes, class_probabilities, random_state) + assert sp.issparse(got) + + for k in range(len(classes)): + p = np.bincount(got.getcol(k).toarray().ravel()) / float(n_samples) + assert_array_almost_equal(class_probabilities[k], p, decimal=1) + + # Implicit class probabilities + classes = [[0, 1], [1, 2]] # test for array-like support + class_probabilities = [np.array([0.5, 0.5]), np.array([0, 1 / 2, 1 / 2])] + + got = _random_choice_csc( + n_samples=n_samples, classes=classes, random_state=random_state + ) + assert sp.issparse(got) + + for k in range(len(classes)): + p = np.bincount(got.getcol(k).toarray().ravel()) / float(n_samples) + assert_array_almost_equal(class_probabilities[k], p, decimal=1) + + # Edge case probabilities 1.0 and 0.0 + classes = [np.array([0, 1]), np.array([0, 1, 2])] + class_probabilities = [np.array([0.0, 1.0]), np.array([0.0, 1.0, 0.0])] + + got = _random_choice_csc(n_samples, classes, class_probabilities, random_state) + assert sp.issparse(got) + + for k in range(len(classes)): + p = ( + np.bincount( + got.getcol(k).toarray().ravel(), minlength=len(class_probabilities[k]) + ) + / n_samples + ) + assert_array_almost_equal(class_probabilities[k], p, decimal=1) + + # One class target data + classes = [[1], [0]] # test for array-like support + class_probabilities = [np.array([0.0, 1.0]), np.array([1.0])] + + got = _random_choice_csc( + n_samples=n_samples, classes=classes, random_state=random_state + ) + assert sp.issparse(got) + + for k in range(len(classes)): + p = np.bincount(got.getcol(k).toarray().ravel()) / n_samples + assert_array_almost_equal(class_probabilities[k], p, decimal=1) + + +def test_random_choice_csc_errors(): + # the length of an array in classes and class_probabilities is mismatched + classes = [np.array([0, 1]), np.array([0, 1, 2, 3])] + class_probabilities = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])] + with pytest.raises(ValueError): + _random_choice_csc(4, classes, class_probabilities, 1) + + # the class dtype is not supported + classes = [np.array(["a", "1"]), np.array(["z", "1", "2"])] + class_probabilities = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])] + with pytest.raises(ValueError): + _random_choice_csc(4, classes, class_probabilities, 1) + + # the class dtype is not supported + classes = [np.array([4.2, 0.1]), np.array([0.1, 0.2, 9.4])] + class_probabilities = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])] + with pytest.raises(ValueError): + _random_choice_csc(4, classes, class_probabilities, 1) + + # Given probabilities don't sum to 1 + classes = [np.array([0, 1]), np.array([0, 1, 2])] + class_probabilities = [np.array([0.5, 0.6]), np.array([0.6, 0.1, 0.3])] + with pytest.raises(ValueError): + _random_choice_csc(4, classes, class_probabilities, 1) + + +def test_our_rand_r(): + assert 131541053 == _our_rand_r_py(1273642419) + assert 270369 == _our_rand_r_py(0) diff --git a/venv/lib/python3.10/site-packages/sklearn/utils/tests/test_set_output.py b/venv/lib/python3.10/site-packages/sklearn/utils/tests/test_set_output.py new file mode 100644 index 0000000000000000000000000000000000000000..827627f441ddd403f487995e27d9ebc96b4e0fed --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/utils/tests/test_set_output.py @@ -0,0 +1,452 @@ +import importlib +from collections import namedtuple + +import numpy as np +import pytest +from numpy.testing import assert_array_equal + +from sklearn._config import config_context, get_config +from sklearn.preprocessing import StandardScaler +from sklearn.utils._set_output import ( + ADAPTERS_MANAGER, + ContainerAdapterProtocol, + _get_output_config, + _safe_set_output, + _SetOutputMixin, + _wrap_data_with_container, + check_library_installed, +) +from sklearn.utils.fixes import CSR_CONTAINERS + + +def test_pandas_adapter(): + """Check pandas adapter has expected behavior.""" + pd = pytest.importorskip("pandas") + X_np = np.asarray([[1, 0, 3], [0, 0, 1]]) + columns = np.asarray(["f0", "f1", "f2"], dtype=object) + index = np.asarray([0, 1]) + X_df_orig = pd.DataFrame([[1, 2], [1, 3]], index=index) + + adapter = ADAPTERS_MANAGER.adapters["pandas"] + X_container = adapter.create_container(X_np, X_df_orig, columns=lambda: columns) + assert isinstance(X_container, pd.DataFrame) + assert_array_equal(X_container.columns, columns) + assert_array_equal(X_container.index, index) + + # Input dataframe's index does not change + new_columns = np.asarray(["f0", "f1"], dtype=object) + X_df = pd.DataFrame([[1, 2], [1, 3]], index=[10, 12]) + new_df = adapter.create_container(X_df, X_df_orig, columns=new_columns) + assert_array_equal(new_df.columns, new_columns) + assert_array_equal(new_df.index, X_df.index) + + assert adapter.is_supported_container(X_df) + assert not adapter.is_supported_container(X_np) + + # adapter.update_columns updates the columns + new_columns = np.array(["a", "c"], dtype=object) + new_df = adapter.rename_columns(X_df, new_columns) + assert_array_equal(new_df.columns, new_columns) + + # adapter.hstack stacks the dataframes horizontally. + X_df_1 = pd.DataFrame([[1, 2, 5], [3, 4, 6]], columns=["a", "b", "e"]) + X_df_2 = pd.DataFrame([[4], [5]], columns=["c"]) + X_stacked = adapter.hstack([X_df_1, X_df_2]) + + expected_df = pd.DataFrame( + [[1, 2, 5, 4], [3, 4, 6, 5]], columns=["a", "b", "e", "c"] + ) + pd.testing.assert_frame_equal(X_stacked, expected_df) + + # check that we update properly the columns even with duplicate column names + # this use-case potentially happen when using ColumnTransformer + # non-regression test for gh-28260 + X_df = pd.DataFrame([[1, 2], [1, 3]], columns=["a", "a"]) + new_columns = np.array(["x__a", "y__a"], dtype=object) + new_df = adapter.rename_columns(X_df, new_columns) + assert_array_equal(new_df.columns, new_columns) + + # check the behavior of the inplace parameter in `create_container` + # we should trigger a copy + X_df = pd.DataFrame([[1, 2], [1, 3]], index=index) + X_output = adapter.create_container(X_df, X_df, columns=["a", "b"], inplace=False) + assert X_output is not X_df + assert list(X_df.columns) == [0, 1] + assert list(X_output.columns) == ["a", "b"] + + # the operation is inplace + X_df = pd.DataFrame([[1, 2], [1, 3]], index=index) + X_output = adapter.create_container(X_df, X_df, columns=["a", "b"], inplace=True) + assert X_output is X_df + assert list(X_df.columns) == ["a", "b"] + assert list(X_output.columns) == ["a", "b"] + + +def test_polars_adapter(): + """Check Polars adapter has expected behavior.""" + pl = pytest.importorskip("polars") + X_np = np.array([[1, 0, 3], [0, 0, 1]]) + columns = ["f1", "f2", "f3"] + X_df_orig = pl.DataFrame(X_np, schema=columns, orient="row") + + adapter = ADAPTERS_MANAGER.adapters["polars"] + X_container = adapter.create_container(X_np, X_df_orig, columns=lambda: columns) + + assert isinstance(X_container, pl.DataFrame) + assert_array_equal(X_container.columns, columns) + + # Update columns with create_container + new_columns = np.asarray(["a", "b", "c"], dtype=object) + new_df = adapter.create_container(X_df_orig, X_df_orig, columns=new_columns) + assert_array_equal(new_df.columns, new_columns) + + assert adapter.is_supported_container(X_df_orig) + assert not adapter.is_supported_container(X_np) + + # adapter.update_columns updates the columns + new_columns = np.array(["a", "c", "g"], dtype=object) + new_df = adapter.rename_columns(X_df_orig, new_columns) + assert_array_equal(new_df.columns, new_columns) + + # adapter.hstack stacks the dataframes horizontally. + X_df_1 = pl.DataFrame([[1, 2, 5], [3, 4, 6]], schema=["a", "b", "e"], orient="row") + X_df_2 = pl.DataFrame([[4], [5]], schema=["c"], orient="row") + X_stacked = adapter.hstack([X_df_1, X_df_2]) + + expected_df = pl.DataFrame( + [[1, 2, 5, 4], [3, 4, 6, 5]], schema=["a", "b", "e", "c"], orient="row" + ) + from polars.testing import assert_frame_equal + + assert_frame_equal(X_stacked, expected_df) + + # check the behavior of the inplace parameter in `create_container` + # we should trigger a copy + X_df = pl.DataFrame([[1, 2], [1, 3]], schema=["a", "b"], orient="row") + X_output = adapter.create_container(X_df, X_df, columns=["c", "d"], inplace=False) + assert X_output is not X_df + assert list(X_df.columns) == ["a", "b"] + assert list(X_output.columns) == ["c", "d"] + + # the operation is inplace + X_df = pl.DataFrame([[1, 2], [1, 3]], schema=["a", "b"], orient="row") + X_output = adapter.create_container(X_df, X_df, columns=["c", "d"], inplace=True) + assert X_output is X_df + assert list(X_df.columns) == ["c", "d"] + assert list(X_output.columns) == ["c", "d"] + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test__container_error_validation(csr_container): + """Check errors in _wrap_data_with_container.""" + X = np.asarray([[1, 0, 3], [0, 0, 1]]) + X_csr = csr_container(X) + match = "The transformer outputs a scipy sparse matrix." + with config_context(transform_output="pandas"): + with pytest.raises(ValueError, match=match): + _wrap_data_with_container("transform", X_csr, X, StandardScaler()) + + +class EstimatorWithoutSetOutputAndWithoutTransform: + pass + + +class EstimatorNoSetOutputWithTransform: + def transform(self, X, y=None): + return X # pragma: no cover + + +class EstimatorWithSetOutput(_SetOutputMixin): + def fit(self, X, y=None): + self.n_features_in_ = X.shape[1] + return self + + def transform(self, X, y=None): + return X + + def get_feature_names_out(self, input_features=None): + return np.asarray([f"X{i}" for i in range(self.n_features_in_)], dtype=object) + + +def test__safe_set_output(): + """Check _safe_set_output works as expected.""" + + # Estimator without transform will not raise when setting set_output for transform. + est = EstimatorWithoutSetOutputAndWithoutTransform() + _safe_set_output(est, transform="pandas") + + # Estimator with transform but without set_output will raise + est = EstimatorNoSetOutputWithTransform() + with pytest.raises(ValueError, match="Unable to configure output"): + _safe_set_output(est, transform="pandas") + + est = EstimatorWithSetOutput().fit(np.asarray([[1, 2, 3]])) + _safe_set_output(est, transform="pandas") + config = _get_output_config("transform", est) + assert config["dense"] == "pandas" + + _safe_set_output(est, transform="default") + config = _get_output_config("transform", est) + assert config["dense"] == "default" + + # transform is None is a no-op, so the config remains "default" + _safe_set_output(est, transform=None) + config = _get_output_config("transform", est) + assert config["dense"] == "default" + + +class EstimatorNoSetOutputWithTransformNoFeatureNamesOut(_SetOutputMixin): + def transform(self, X, y=None): + return X # pragma: no cover + + +def test_set_output_mixin(): + """Estimator without get_feature_names_out does not define `set_output`.""" + est = EstimatorNoSetOutputWithTransformNoFeatureNamesOut() + assert not hasattr(est, "set_output") + + +def test__safe_set_output_error(): + """Check transform with invalid config.""" + X = np.asarray([[1, 0, 3], [0, 0, 1]]) + + est = EstimatorWithSetOutput() + _safe_set_output(est, transform="bad") + + msg = "output config must be in" + with pytest.raises(ValueError, match=msg): + est.transform(X) + + +@pytest.mark.parametrize("dataframe_lib", ["pandas", "polars"]) +def test_set_output_method(dataframe_lib): + """Check that the output is a dataframe.""" + lib = pytest.importorskip(dataframe_lib) + + X = np.asarray([[1, 0, 3], [0, 0, 1]]) + est = EstimatorWithSetOutput().fit(X) + + # transform=None is a no-op + est2 = est.set_output(transform=None) + assert est2 is est + X_trans_np = est2.transform(X) + assert isinstance(X_trans_np, np.ndarray) + + est.set_output(transform=dataframe_lib) + + X_trans_pd = est.transform(X) + + assert isinstance(X_trans_pd, lib.DataFrame) + + +def test_set_output_method_error(): + """Check transform fails with invalid transform.""" + + X = np.asarray([[1, 0, 3], [0, 0, 1]]) + est = EstimatorWithSetOutput().fit(X) + est.set_output(transform="bad") + + msg = "output config must be in" + with pytest.raises(ValueError, match=msg): + est.transform(X) + + +@pytest.mark.parametrize("transform_output", ["pandas", "polars"]) +def test__get_output_config(transform_output): + """Check _get_output_config works as expected.""" + + # Without a configuration set, the global config is used + global_config = get_config()["transform_output"] + config = _get_output_config("transform") + assert config["dense"] == global_config + + with config_context(transform_output=transform_output): + # with estimator=None, the global config is used + config = _get_output_config("transform") + assert config["dense"] == transform_output + + est = EstimatorNoSetOutputWithTransform() + config = _get_output_config("transform", est) + assert config["dense"] == transform_output + + est = EstimatorWithSetOutput() + # If estimator has not config, use global config + config = _get_output_config("transform", est) + assert config["dense"] == transform_output + + # If estimator has a config, use local config + est.set_output(transform="default") + config = _get_output_config("transform", est) + assert config["dense"] == "default" + + est.set_output(transform=transform_output) + config = _get_output_config("transform", est) + assert config["dense"] == transform_output + + +class EstimatorWithSetOutputNoAutoWrap(_SetOutputMixin, auto_wrap_output_keys=None): + def transform(self, X, y=None): + return X + + +def test_get_output_auto_wrap_false(): + """Check that auto_wrap_output_keys=None does not wrap.""" + est = EstimatorWithSetOutputNoAutoWrap() + assert not hasattr(est, "set_output") + + X = np.asarray([[1, 0, 3], [0, 0, 1]]) + assert X is est.transform(X) + + +def test_auto_wrap_output_keys_errors_with_incorrect_input(): + msg = "auto_wrap_output_keys must be None or a tuple of keys." + with pytest.raises(ValueError, match=msg): + + class BadEstimator(_SetOutputMixin, auto_wrap_output_keys="bad_parameter"): + pass + + +class AnotherMixin: + def __init_subclass__(cls, custom_parameter, **kwargs): + super().__init_subclass__(**kwargs) + cls.custom_parameter = custom_parameter + + +def test_set_output_mixin_custom_mixin(): + """Check that multiple init_subclasses passes parameters up.""" + + class BothMixinEstimator(_SetOutputMixin, AnotherMixin, custom_parameter=123): + def transform(self, X, y=None): + return X + + def get_feature_names_out(self, input_features=None): + return input_features + + est = BothMixinEstimator() + assert est.custom_parameter == 123 + assert hasattr(est, "set_output") + + +def test_set_output_mro(): + """Check that multi-inheritance resolves to the correct class method. + + Non-regression test gh-25293. + """ + + class Base(_SetOutputMixin): + def transform(self, X): + return "Base" # noqa + + class A(Base): + pass + + class B(Base): + def transform(self, X): + return "B" + + class C(A, B): + pass + + assert C().transform(None) == "B" + + +class EstimatorWithSetOutputIndex(_SetOutputMixin): + def fit(self, X, y=None): + self.n_features_in_ = X.shape[1] + return self + + def transform(self, X, y=None): + import pandas as pd + + # transform by giving output a new index. + return pd.DataFrame(X.to_numpy(), index=[f"s{i}" for i in range(X.shape[0])]) + + def get_feature_names_out(self, input_features=None): + return np.asarray([f"X{i}" for i in range(self.n_features_in_)], dtype=object) + + +def test_set_output_pandas_keep_index(): + """Check that set_output does not override index. + + Non-regression test for gh-25730. + """ + pd = pytest.importorskip("pandas") + + X = pd.DataFrame([[1, 2, 3], [4, 5, 6]], index=[0, 1]) + est = EstimatorWithSetOutputIndex().set_output(transform="pandas") + est.fit(X) + + X_trans = est.transform(X) + assert_array_equal(X_trans.index, ["s0", "s1"]) + + +class EstimatorReturnTuple(_SetOutputMixin): + def __init__(self, OutputTuple): + self.OutputTuple = OutputTuple + + def transform(self, X, y=None): + return self.OutputTuple(X, 2 * X) + + +def test_set_output_named_tuple_out(): + """Check that namedtuples are kept by default.""" + Output = namedtuple("Output", "X, Y") + X = np.asarray([[1, 2, 3]]) + est = EstimatorReturnTuple(OutputTuple=Output) + X_trans = est.transform(X) + + assert isinstance(X_trans, Output) + assert_array_equal(X_trans.X, X) + assert_array_equal(X_trans.Y, 2 * X) + + +class EstimatorWithListInput(_SetOutputMixin): + def fit(self, X, y=None): + assert isinstance(X, list) + self.n_features_in_ = len(X[0]) + return self + + def transform(self, X, y=None): + return X + + def get_feature_names_out(self, input_features=None): + return np.asarray([f"X{i}" for i in range(self.n_features_in_)], dtype=object) + + +@pytest.mark.parametrize("dataframe_lib", ["pandas", "polars"]) +def test_set_output_list_input(dataframe_lib): + """Check set_output for list input. + + Non-regression test for #27037. + """ + lib = pytest.importorskip(dataframe_lib) + + X = [[0, 1, 2, 3], [4, 5, 6, 7]] + est = EstimatorWithListInput() + est.set_output(transform=dataframe_lib) + + X_out = est.fit(X).transform(X) + assert isinstance(X_out, lib.DataFrame) + assert_array_equal(X_out.columns, ["X0", "X1", "X2", "X3"]) + + +@pytest.mark.parametrize("name", sorted(ADAPTERS_MANAGER.adapters)) +def test_adapter_class_has_interface(name): + """Check adapters have the correct interface.""" + assert isinstance(ADAPTERS_MANAGER.adapters[name], ContainerAdapterProtocol) + + +def test_check_library_installed(monkeypatch): + """Check import error changed.""" + orig_import_module = importlib.import_module + + def patched_import_module(name): + if name == "pandas": + raise ImportError() + orig_import_module(name, package=None) + + monkeypatch.setattr(importlib, "import_module", patched_import_module) + + msg = "Setting output container to 'pandas' requires" + with pytest.raises(ImportError, match=msg): + check_library_installed("pandas")