applied-ai-018 commited on
Commit
e9bddc1
·
verified ·
1 Parent(s): a070569

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. env-llmeval/lib/python3.10/site-packages/sklearn/covariance/__init__.py +44 -0
  2. env-llmeval/lib/python3.10/site-packages/sklearn/covariance/__pycache__/__init__.cpython-310.pyc +0 -0
  3. env-llmeval/lib/python3.10/site-packages/sklearn/covariance/__pycache__/_elliptic_envelope.cpython-310.pyc +0 -0
  4. env-llmeval/lib/python3.10/site-packages/sklearn/covariance/__pycache__/_graph_lasso.cpython-310.pyc +0 -0
  5. env-llmeval/lib/python3.10/site-packages/sklearn/covariance/__pycache__/_robust_covariance.cpython-310.pyc +0 -0
  6. env-llmeval/lib/python3.10/site-packages/sklearn/covariance/__pycache__/_shrunk_covariance.cpython-310.pyc +0 -0
  7. env-llmeval/lib/python3.10/site-packages/sklearn/covariance/_empirical_covariance.py +364 -0
  8. env-llmeval/lib/python3.10/site-packages/sklearn/covariance/_graph_lasso.py +1110 -0
  9. env-llmeval/lib/python3.10/site-packages/sklearn/covariance/_robust_covariance.py +868 -0
  10. env-llmeval/lib/python3.10/site-packages/sklearn/covariance/_shrunk_covariance.py +816 -0
  11. env-llmeval/lib/python3.10/site-packages/sklearn/experimental/__init__.py +7 -0
  12. env-llmeval/lib/python3.10/site-packages/sklearn/experimental/__pycache__/__init__.cpython-310.pyc +0 -0
  13. env-llmeval/lib/python3.10/site-packages/sklearn/experimental/__pycache__/enable_halving_search_cv.cpython-310.pyc +0 -0
  14. env-llmeval/lib/python3.10/site-packages/sklearn/experimental/__pycache__/enable_hist_gradient_boosting.cpython-310.pyc +0 -0
  15. env-llmeval/lib/python3.10/site-packages/sklearn/experimental/__pycache__/enable_iterative_imputer.cpython-310.pyc +0 -0
  16. env-llmeval/lib/python3.10/site-packages/sklearn/experimental/enable_halving_search_cv.py +32 -0
  17. env-llmeval/lib/python3.10/site-packages/sklearn/experimental/enable_hist_gradient_boosting.py +20 -0
  18. env-llmeval/lib/python3.10/site-packages/sklearn/experimental/enable_iterative_imputer.py +20 -0
  19. env-llmeval/lib/python3.10/site-packages/sklearn/experimental/tests/__init__.py +0 -0
  20. env-llmeval/lib/python3.10/site-packages/sklearn/experimental/tests/__pycache__/__init__.cpython-310.pyc +0 -0
  21. env-llmeval/lib/python3.10/site-packages/sklearn/experimental/tests/__pycache__/test_enable_hist_gradient_boosting.cpython-310.pyc +0 -0
  22. env-llmeval/lib/python3.10/site-packages/sklearn/experimental/tests/__pycache__/test_enable_iterative_imputer.cpython-310.pyc +0 -0
  23. env-llmeval/lib/python3.10/site-packages/sklearn/experimental/tests/__pycache__/test_enable_successive_halving.cpython-310.pyc +0 -0
  24. env-llmeval/lib/python3.10/site-packages/sklearn/experimental/tests/test_enable_hist_gradient_boosting.py +19 -0
  25. env-llmeval/lib/python3.10/site-packages/sklearn/experimental/tests/test_enable_iterative_imputer.py +51 -0
  26. env-llmeval/lib/python3.10/site-packages/sklearn/experimental/tests/test_enable_successive_halving.py +53 -0
  27. env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/__init__.py +42 -0
  28. env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/__pycache__/_graph.cpython-310.pyc +0 -0
  29. env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/_ball_tree.cpython-310-x86_64-linux-gnu.so +0 -0
  30. env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/_base.py +1387 -0
  31. env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/_classification.py +839 -0
  32. env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/_graph.py +719 -0
  33. env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/_kd_tree.cpython-310-x86_64-linux-gnu.so +0 -0
  34. env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/_kde.py +365 -0
  35. env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/_lof.py +516 -0
  36. env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/_nca.py +525 -0
  37. env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/_nearest_centroid.py +261 -0
  38. env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/_partition_nodes.cpython-310-x86_64-linux-gnu.so +0 -0
  39. env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/_partition_nodes.pxd +10 -0
  40. env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/_quad_tree.cpython-310-x86_64-linux-gnu.so +0 -0
  41. env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/_quad_tree.pxd +92 -0
  42. env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/_regression.py +510 -0
  43. env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/_unsupervised.py +175 -0
  44. env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/tests/__init__.py +0 -0
  45. env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/tests/__pycache__/__init__.cpython-310.pyc +0 -0
  46. env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/tests/__pycache__/test_ball_tree.cpython-310.pyc +0 -0
  47. env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/tests/__pycache__/test_kd_tree.cpython-310.pyc +0 -0
  48. env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/tests/__pycache__/test_lof.cpython-310.pyc +0 -0
  49. env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/tests/__pycache__/test_nca.cpython-310.pyc +0 -0
  50. env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/tests/__pycache__/test_nearest_centroid.cpython-310.pyc +0 -0
env-llmeval/lib/python3.10/site-packages/sklearn/covariance/__init__.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ The :mod:`sklearn.covariance` module includes methods and algorithms to
3
+ robustly estimate the covariance of features given a set of points. The
4
+ precision matrix defined as the inverse of the covariance is also estimated.
5
+ Covariance estimation is closely related to the theory of Gaussian Graphical
6
+ Models.
7
+ """
8
+
9
+ from ._elliptic_envelope import EllipticEnvelope
10
+ from ._empirical_covariance import (
11
+ EmpiricalCovariance,
12
+ empirical_covariance,
13
+ log_likelihood,
14
+ )
15
+ from ._graph_lasso import GraphicalLasso, GraphicalLassoCV, graphical_lasso
16
+ from ._robust_covariance import MinCovDet, fast_mcd
17
+ from ._shrunk_covariance import (
18
+ OAS,
19
+ LedoitWolf,
20
+ ShrunkCovariance,
21
+ ledoit_wolf,
22
+ ledoit_wolf_shrinkage,
23
+ oas,
24
+ shrunk_covariance,
25
+ )
26
+
27
+ __all__ = [
28
+ "EllipticEnvelope",
29
+ "EmpiricalCovariance",
30
+ "GraphicalLasso",
31
+ "GraphicalLassoCV",
32
+ "LedoitWolf",
33
+ "MinCovDet",
34
+ "OAS",
35
+ "ShrunkCovariance",
36
+ "empirical_covariance",
37
+ "fast_mcd",
38
+ "graphical_lasso",
39
+ "ledoit_wolf",
40
+ "ledoit_wolf_shrinkage",
41
+ "log_likelihood",
42
+ "oas",
43
+ "shrunk_covariance",
44
+ ]
env-llmeval/lib/python3.10/site-packages/sklearn/covariance/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.16 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/covariance/__pycache__/_elliptic_envelope.cpython-310.pyc ADDED
Binary file (9.55 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/covariance/__pycache__/_graph_lasso.cpython-310.pyc ADDED
Binary file (30.5 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/covariance/__pycache__/_robust_covariance.cpython-310.pyc ADDED
Binary file (24.2 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/covariance/__pycache__/_shrunk_covariance.cpython-310.pyc ADDED
Binary file (24.1 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/covariance/_empirical_covariance.py ADDED
@@ -0,0 +1,364 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Maximum likelihood covariance estimator.
3
+
4
+ """
5
+
6
+ # Author: Alexandre Gramfort <[email protected]>
7
+ # Gael Varoquaux <[email protected]>
8
+ # Virgile Fritsch <[email protected]>
9
+ #
10
+ # License: BSD 3 clause
11
+
12
+ # avoid division truncation
13
+ import warnings
14
+
15
+ import numpy as np
16
+ from scipy import linalg
17
+
18
+ from .. import config_context
19
+ from ..base import BaseEstimator, _fit_context
20
+ from ..metrics.pairwise import pairwise_distances
21
+ from ..utils import check_array
22
+ from ..utils._param_validation import validate_params
23
+ from ..utils.extmath import fast_logdet
24
+
25
+
26
+ @validate_params(
27
+ {
28
+ "emp_cov": [np.ndarray],
29
+ "precision": [np.ndarray],
30
+ },
31
+ prefer_skip_nested_validation=True,
32
+ )
33
+ def log_likelihood(emp_cov, precision):
34
+ """Compute the sample mean of the log_likelihood under a covariance model.
35
+
36
+ Computes the empirical expected log-likelihood, allowing for universal
37
+ comparison (beyond this software package), and accounts for normalization
38
+ terms and scaling.
39
+
40
+ Parameters
41
+ ----------
42
+ emp_cov : ndarray of shape (n_features, n_features)
43
+ Maximum Likelihood Estimator of covariance.
44
+
45
+ precision : ndarray of shape (n_features, n_features)
46
+ The precision matrix of the covariance model to be tested.
47
+
48
+ Returns
49
+ -------
50
+ log_likelihood_ : float
51
+ Sample mean of the log-likelihood.
52
+ """
53
+ p = precision.shape[0]
54
+ log_likelihood_ = -np.sum(emp_cov * precision) + fast_logdet(precision)
55
+ log_likelihood_ -= p * np.log(2 * np.pi)
56
+ log_likelihood_ /= 2.0
57
+ return log_likelihood_
58
+
59
+
60
+ @validate_params(
61
+ {
62
+ "X": ["array-like"],
63
+ "assume_centered": ["boolean"],
64
+ },
65
+ prefer_skip_nested_validation=True,
66
+ )
67
+ def empirical_covariance(X, *, assume_centered=False):
68
+ """Compute the Maximum likelihood covariance estimator.
69
+
70
+ Parameters
71
+ ----------
72
+ X : ndarray of shape (n_samples, n_features)
73
+ Data from which to compute the covariance estimate.
74
+
75
+ assume_centered : bool, default=False
76
+ If `True`, data will not be centered before computation.
77
+ Useful when working with data whose mean is almost, but not exactly
78
+ zero.
79
+ If `False`, data will be centered before computation.
80
+
81
+ Returns
82
+ -------
83
+ covariance : ndarray of shape (n_features, n_features)
84
+ Empirical covariance (Maximum Likelihood Estimator).
85
+
86
+ Examples
87
+ --------
88
+ >>> from sklearn.covariance import empirical_covariance
89
+ >>> X = [[1,1,1],[1,1,1],[1,1,1],
90
+ ... [0,0,0],[0,0,0],[0,0,0]]
91
+ >>> empirical_covariance(X)
92
+ array([[0.25, 0.25, 0.25],
93
+ [0.25, 0.25, 0.25],
94
+ [0.25, 0.25, 0.25]])
95
+ """
96
+ X = check_array(X, ensure_2d=False, force_all_finite=False)
97
+
98
+ if X.ndim == 1:
99
+ X = np.reshape(X, (1, -1))
100
+
101
+ if X.shape[0] == 1:
102
+ warnings.warn(
103
+ "Only one sample available. You may want to reshape your data array"
104
+ )
105
+
106
+ if assume_centered:
107
+ covariance = np.dot(X.T, X) / X.shape[0]
108
+ else:
109
+ covariance = np.cov(X.T, bias=1)
110
+
111
+ if covariance.ndim == 0:
112
+ covariance = np.array([[covariance]])
113
+ return covariance
114
+
115
+
116
+ class EmpiricalCovariance(BaseEstimator):
117
+ """Maximum likelihood covariance estimator.
118
+
119
+ Read more in the :ref:`User Guide <covariance>`.
120
+
121
+ Parameters
122
+ ----------
123
+ store_precision : bool, default=True
124
+ Specifies if the estimated precision is stored.
125
+
126
+ assume_centered : bool, default=False
127
+ If True, data are not centered before computation.
128
+ Useful when working with data whose mean is almost, but not exactly
129
+ zero.
130
+ If False (default), data are centered before computation.
131
+
132
+ Attributes
133
+ ----------
134
+ location_ : ndarray of shape (n_features,)
135
+ Estimated location, i.e. the estimated mean.
136
+
137
+ covariance_ : ndarray of shape (n_features, n_features)
138
+ Estimated covariance matrix
139
+
140
+ precision_ : ndarray of shape (n_features, n_features)
141
+ Estimated pseudo-inverse matrix.
142
+ (stored only if store_precision is True)
143
+
144
+ n_features_in_ : int
145
+ Number of features seen during :term:`fit`.
146
+
147
+ .. versionadded:: 0.24
148
+
149
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
150
+ Names of features seen during :term:`fit`. Defined only when `X`
151
+ has feature names that are all strings.
152
+
153
+ .. versionadded:: 1.0
154
+
155
+ See Also
156
+ --------
157
+ EllipticEnvelope : An object for detecting outliers in
158
+ a Gaussian distributed dataset.
159
+ GraphicalLasso : Sparse inverse covariance estimation
160
+ with an l1-penalized estimator.
161
+ LedoitWolf : LedoitWolf Estimator.
162
+ MinCovDet : Minimum Covariance Determinant
163
+ (robust estimator of covariance).
164
+ OAS : Oracle Approximating Shrinkage Estimator.
165
+ ShrunkCovariance : Covariance estimator with shrinkage.
166
+
167
+ Examples
168
+ --------
169
+ >>> import numpy as np
170
+ >>> from sklearn.covariance import EmpiricalCovariance
171
+ >>> from sklearn.datasets import make_gaussian_quantiles
172
+ >>> real_cov = np.array([[.8, .3],
173
+ ... [.3, .4]])
174
+ >>> rng = np.random.RandomState(0)
175
+ >>> X = rng.multivariate_normal(mean=[0, 0],
176
+ ... cov=real_cov,
177
+ ... size=500)
178
+ >>> cov = EmpiricalCovariance().fit(X)
179
+ >>> cov.covariance_
180
+ array([[0.7569..., 0.2818...],
181
+ [0.2818..., 0.3928...]])
182
+ >>> cov.location_
183
+ array([0.0622..., 0.0193...])
184
+ """
185
+
186
+ _parameter_constraints: dict = {
187
+ "store_precision": ["boolean"],
188
+ "assume_centered": ["boolean"],
189
+ }
190
+
191
+ def __init__(self, *, store_precision=True, assume_centered=False):
192
+ self.store_precision = store_precision
193
+ self.assume_centered = assume_centered
194
+
195
+ def _set_covariance(self, covariance):
196
+ """Saves the covariance and precision estimates
197
+
198
+ Storage is done accordingly to `self.store_precision`.
199
+ Precision stored only if invertible.
200
+
201
+ Parameters
202
+ ----------
203
+ covariance : array-like of shape (n_features, n_features)
204
+ Estimated covariance matrix to be stored, and from which precision
205
+ is computed.
206
+ """
207
+ covariance = check_array(covariance)
208
+ # set covariance
209
+ self.covariance_ = covariance
210
+ # set precision
211
+ if self.store_precision:
212
+ self.precision_ = linalg.pinvh(covariance, check_finite=False)
213
+ else:
214
+ self.precision_ = None
215
+
216
+ def get_precision(self):
217
+ """Getter for the precision matrix.
218
+
219
+ Returns
220
+ -------
221
+ precision_ : array-like of shape (n_features, n_features)
222
+ The precision matrix associated to the current covariance object.
223
+ """
224
+ if self.store_precision:
225
+ precision = self.precision_
226
+ else:
227
+ precision = linalg.pinvh(self.covariance_, check_finite=False)
228
+ return precision
229
+
230
+ @_fit_context(prefer_skip_nested_validation=True)
231
+ def fit(self, X, y=None):
232
+ """Fit the maximum likelihood covariance estimator to X.
233
+
234
+ Parameters
235
+ ----------
236
+ X : array-like of shape (n_samples, n_features)
237
+ Training data, where `n_samples` is the number of samples and
238
+ `n_features` is the number of features.
239
+
240
+ y : Ignored
241
+ Not used, present for API consistency by convention.
242
+
243
+ Returns
244
+ -------
245
+ self : object
246
+ Returns the instance itself.
247
+ """
248
+ X = self._validate_data(X)
249
+ if self.assume_centered:
250
+ self.location_ = np.zeros(X.shape[1])
251
+ else:
252
+ self.location_ = X.mean(0)
253
+ covariance = empirical_covariance(X, assume_centered=self.assume_centered)
254
+ self._set_covariance(covariance)
255
+
256
+ return self
257
+
258
+ def score(self, X_test, y=None):
259
+ """Compute the log-likelihood of `X_test` under the estimated Gaussian model.
260
+
261
+ The Gaussian model is defined by its mean and covariance matrix which are
262
+ represented respectively by `self.location_` and `self.covariance_`.
263
+
264
+ Parameters
265
+ ----------
266
+ X_test : array-like of shape (n_samples, n_features)
267
+ Test data of which we compute the likelihood, where `n_samples` is
268
+ the number of samples and `n_features` is the number of features.
269
+ `X_test` is assumed to be drawn from the same distribution than
270
+ the data used in fit (including centering).
271
+
272
+ y : Ignored
273
+ Not used, present for API consistency by convention.
274
+
275
+ Returns
276
+ -------
277
+ res : float
278
+ The log-likelihood of `X_test` with `self.location_` and `self.covariance_`
279
+ as estimators of the Gaussian model mean and covariance matrix respectively.
280
+ """
281
+ X_test = self._validate_data(X_test, reset=False)
282
+ # compute empirical covariance of the test set
283
+ test_cov = empirical_covariance(X_test - self.location_, assume_centered=True)
284
+ # compute log likelihood
285
+ res = log_likelihood(test_cov, self.get_precision())
286
+
287
+ return res
288
+
289
+ def error_norm(self, comp_cov, norm="frobenius", scaling=True, squared=True):
290
+ """Compute the Mean Squared Error between two covariance estimators.
291
+
292
+ Parameters
293
+ ----------
294
+ comp_cov : array-like of shape (n_features, n_features)
295
+ The covariance to compare with.
296
+
297
+ norm : {"frobenius", "spectral"}, default="frobenius"
298
+ The type of norm used to compute the error. Available error types:
299
+ - 'frobenius' (default): sqrt(tr(A^t.A))
300
+ - 'spectral': sqrt(max(eigenvalues(A^t.A))
301
+ where A is the error ``(comp_cov - self.covariance_)``.
302
+
303
+ scaling : bool, default=True
304
+ If True (default), the squared error norm is divided by n_features.
305
+ If False, the squared error norm is not rescaled.
306
+
307
+ squared : bool, default=True
308
+ Whether to compute the squared error norm or the error norm.
309
+ If True (default), the squared error norm is returned.
310
+ If False, the error norm is returned.
311
+
312
+ Returns
313
+ -------
314
+ result : float
315
+ The Mean Squared Error (in the sense of the Frobenius norm) between
316
+ `self` and `comp_cov` covariance estimators.
317
+ """
318
+ # compute the error
319
+ error = comp_cov - self.covariance_
320
+ # compute the error norm
321
+ if norm == "frobenius":
322
+ squared_norm = np.sum(error**2)
323
+ elif norm == "spectral":
324
+ squared_norm = np.amax(linalg.svdvals(np.dot(error.T, error)))
325
+ else:
326
+ raise NotImplementedError(
327
+ "Only spectral and frobenius norms are implemented"
328
+ )
329
+ # optionally scale the error norm
330
+ if scaling:
331
+ squared_norm = squared_norm / error.shape[0]
332
+ # finally get either the squared norm or the norm
333
+ if squared:
334
+ result = squared_norm
335
+ else:
336
+ result = np.sqrt(squared_norm)
337
+
338
+ return result
339
+
340
+ def mahalanobis(self, X):
341
+ """Compute the squared Mahalanobis distances of given observations.
342
+
343
+ Parameters
344
+ ----------
345
+ X : array-like of shape (n_samples, n_features)
346
+ The observations, the Mahalanobis distances of the which we
347
+ compute. Observations are assumed to be drawn from the same
348
+ distribution than the data used in fit.
349
+
350
+ Returns
351
+ -------
352
+ dist : ndarray of shape (n_samples,)
353
+ Squared Mahalanobis distances of the observations.
354
+ """
355
+ X = self._validate_data(X, reset=False)
356
+
357
+ precision = self.get_precision()
358
+ with config_context(assume_finite=True):
359
+ # compute mahalanobis distances
360
+ dist = pairwise_distances(
361
+ X, self.location_[np.newaxis, :], metric="mahalanobis", VI=precision
362
+ )
363
+
364
+ return np.reshape(dist, (len(X),)) ** 2
env-llmeval/lib/python3.10/site-packages/sklearn/covariance/_graph_lasso.py ADDED
@@ -0,0 +1,1110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """GraphicalLasso: sparse inverse covariance estimation with an l1-penalized
2
+ estimator.
3
+ """
4
+
5
+ # Author: Gael Varoquaux <[email protected]>
6
+ # License: BSD 3 clause
7
+ # Copyright: INRIA
8
+ import operator
9
+ import sys
10
+ import time
11
+ import warnings
12
+ from numbers import Integral, Real
13
+
14
+ import numpy as np
15
+ from scipy import linalg
16
+
17
+ from ..base import _fit_context
18
+ from ..exceptions import ConvergenceWarning
19
+
20
+ # mypy error: Module 'sklearn.linear_model' has no attribute '_cd_fast'
21
+ from ..linear_model import _cd_fast as cd_fast # type: ignore
22
+ from ..linear_model import lars_path_gram
23
+ from ..model_selection import check_cv, cross_val_score
24
+ from ..utils._param_validation import Interval, StrOptions, validate_params
25
+ from ..utils.metadata_routing import _RoutingNotSupportedMixin
26
+ from ..utils.parallel import Parallel, delayed
27
+ from ..utils.validation import (
28
+ _is_arraylike_not_scalar,
29
+ check_random_state,
30
+ check_scalar,
31
+ )
32
+ from . import EmpiricalCovariance, empirical_covariance, log_likelihood
33
+
34
+
35
+ # Helper functions to compute the objective and dual objective functions
36
+ # of the l1-penalized estimator
37
+ def _objective(mle, precision_, alpha):
38
+ """Evaluation of the graphical-lasso objective function
39
+
40
+ the objective function is made of a shifted scaled version of the
41
+ normalized log-likelihood (i.e. its empirical mean over the samples) and a
42
+ penalisation term to promote sparsity
43
+ """
44
+ p = precision_.shape[0]
45
+ cost = -2.0 * log_likelihood(mle, precision_) + p * np.log(2 * np.pi)
46
+ cost += alpha * (np.abs(precision_).sum() - np.abs(np.diag(precision_)).sum())
47
+ return cost
48
+
49
+
50
+ def _dual_gap(emp_cov, precision_, alpha):
51
+ """Expression of the dual gap convergence criterion
52
+
53
+ The specific definition is given in Duchi "Projected Subgradient Methods
54
+ for Learning Sparse Gaussians".
55
+ """
56
+ gap = np.sum(emp_cov * precision_)
57
+ gap -= precision_.shape[0]
58
+ gap += alpha * (np.abs(precision_).sum() - np.abs(np.diag(precision_)).sum())
59
+ return gap
60
+
61
+
62
+ # The g-lasso algorithm
63
+ def _graphical_lasso(
64
+ emp_cov,
65
+ alpha,
66
+ *,
67
+ cov_init=None,
68
+ mode="cd",
69
+ tol=1e-4,
70
+ enet_tol=1e-4,
71
+ max_iter=100,
72
+ verbose=False,
73
+ eps=np.finfo(np.float64).eps,
74
+ ):
75
+ _, n_features = emp_cov.shape
76
+ if alpha == 0:
77
+ # Early return without regularization
78
+ precision_ = linalg.inv(emp_cov)
79
+ cost = -2.0 * log_likelihood(emp_cov, precision_)
80
+ cost += n_features * np.log(2 * np.pi)
81
+ d_gap = np.sum(emp_cov * precision_) - n_features
82
+ return emp_cov, precision_, (cost, d_gap), 0
83
+
84
+ if cov_init is None:
85
+ covariance_ = emp_cov.copy()
86
+ else:
87
+ covariance_ = cov_init.copy()
88
+ # As a trivial regularization (Tikhonov like), we scale down the
89
+ # off-diagonal coefficients of our starting point: This is needed, as
90
+ # in the cross-validation the cov_init can easily be
91
+ # ill-conditioned, and the CV loop blows. Beside, this takes
92
+ # conservative stand-point on the initial conditions, and it tends to
93
+ # make the convergence go faster.
94
+ covariance_ *= 0.95
95
+ diagonal = emp_cov.flat[:: n_features + 1]
96
+ covariance_.flat[:: n_features + 1] = diagonal
97
+ precision_ = linalg.pinvh(covariance_)
98
+
99
+ indices = np.arange(n_features)
100
+ i = 0 # initialize the counter to be robust to `max_iter=0`
101
+ costs = list()
102
+ # The different l1 regression solver have different numerical errors
103
+ if mode == "cd":
104
+ errors = dict(over="raise", invalid="ignore")
105
+ else:
106
+ errors = dict(invalid="raise")
107
+ try:
108
+ # be robust to the max_iter=0 edge case, see:
109
+ # https://github.com/scikit-learn/scikit-learn/issues/4134
110
+ d_gap = np.inf
111
+ # set a sub_covariance buffer
112
+ sub_covariance = np.copy(covariance_[1:, 1:], order="C")
113
+ for i in range(max_iter):
114
+ for idx in range(n_features):
115
+ # To keep the contiguous matrix `sub_covariance` equal to
116
+ # covariance_[indices != idx].T[indices != idx]
117
+ # we only need to update 1 column and 1 line when idx changes
118
+ if idx > 0:
119
+ di = idx - 1
120
+ sub_covariance[di] = covariance_[di][indices != idx]
121
+ sub_covariance[:, di] = covariance_[:, di][indices != idx]
122
+ else:
123
+ sub_covariance[:] = covariance_[1:, 1:]
124
+ row = emp_cov[idx, indices != idx]
125
+ with np.errstate(**errors):
126
+ if mode == "cd":
127
+ # Use coordinate descent
128
+ coefs = -(
129
+ precision_[indices != idx, idx]
130
+ / (precision_[idx, idx] + 1000 * eps)
131
+ )
132
+ coefs, _, _, _ = cd_fast.enet_coordinate_descent_gram(
133
+ coefs,
134
+ alpha,
135
+ 0,
136
+ sub_covariance,
137
+ row,
138
+ row,
139
+ max_iter,
140
+ enet_tol,
141
+ check_random_state(None),
142
+ False,
143
+ )
144
+ else: # mode == "lars"
145
+ _, _, coefs = lars_path_gram(
146
+ Xy=row,
147
+ Gram=sub_covariance,
148
+ n_samples=row.size,
149
+ alpha_min=alpha / (n_features - 1),
150
+ copy_Gram=True,
151
+ eps=eps,
152
+ method="lars",
153
+ return_path=False,
154
+ )
155
+ # Update the precision matrix
156
+ precision_[idx, idx] = 1.0 / (
157
+ covariance_[idx, idx]
158
+ - np.dot(covariance_[indices != idx, idx], coefs)
159
+ )
160
+ precision_[indices != idx, idx] = -precision_[idx, idx] * coefs
161
+ precision_[idx, indices != idx] = -precision_[idx, idx] * coefs
162
+ coefs = np.dot(sub_covariance, coefs)
163
+ covariance_[idx, indices != idx] = coefs
164
+ covariance_[indices != idx, idx] = coefs
165
+ if not np.isfinite(precision_.sum()):
166
+ raise FloatingPointError(
167
+ "The system is too ill-conditioned for this solver"
168
+ )
169
+ d_gap = _dual_gap(emp_cov, precision_, alpha)
170
+ cost = _objective(emp_cov, precision_, alpha)
171
+ if verbose:
172
+ print(
173
+ "[graphical_lasso] Iteration % 3i, cost % 3.2e, dual gap %.3e"
174
+ % (i, cost, d_gap)
175
+ )
176
+ costs.append((cost, d_gap))
177
+ if np.abs(d_gap) < tol:
178
+ break
179
+ if not np.isfinite(cost) and i > 0:
180
+ raise FloatingPointError(
181
+ "Non SPD result: the system is too ill-conditioned for this solver"
182
+ )
183
+ else:
184
+ warnings.warn(
185
+ "graphical_lasso: did not converge after %i iteration: dual gap: %.3e"
186
+ % (max_iter, d_gap),
187
+ ConvergenceWarning,
188
+ )
189
+ except FloatingPointError as e:
190
+ e.args = (e.args[0] + ". The system is too ill-conditioned for this solver",)
191
+ raise e
192
+
193
+ return covariance_, precision_, costs, i + 1
194
+
195
+
196
+ def alpha_max(emp_cov):
197
+ """Find the maximum alpha for which there are some non-zeros off-diagonal.
198
+
199
+ Parameters
200
+ ----------
201
+ emp_cov : ndarray of shape (n_features, n_features)
202
+ The sample covariance matrix.
203
+
204
+ Notes
205
+ -----
206
+ This results from the bound for the all the Lasso that are solved
207
+ in GraphicalLasso: each time, the row of cov corresponds to Xy. As the
208
+ bound for alpha is given by `max(abs(Xy))`, the result follows.
209
+ """
210
+ A = np.copy(emp_cov)
211
+ A.flat[:: A.shape[0] + 1] = 0
212
+ return np.max(np.abs(A))
213
+
214
+
215
+ @validate_params(
216
+ {
217
+ "emp_cov": ["array-like"],
218
+ "cov_init": ["array-like", None],
219
+ "return_costs": ["boolean"],
220
+ "return_n_iter": ["boolean"],
221
+ },
222
+ prefer_skip_nested_validation=False,
223
+ )
224
+ def graphical_lasso(
225
+ emp_cov,
226
+ alpha,
227
+ *,
228
+ cov_init=None,
229
+ mode="cd",
230
+ tol=1e-4,
231
+ enet_tol=1e-4,
232
+ max_iter=100,
233
+ verbose=False,
234
+ return_costs=False,
235
+ eps=np.finfo(np.float64).eps,
236
+ return_n_iter=False,
237
+ ):
238
+ """L1-penalized covariance estimator.
239
+
240
+ Read more in the :ref:`User Guide <sparse_inverse_covariance>`.
241
+
242
+ .. versionchanged:: v0.20
243
+ graph_lasso has been renamed to graphical_lasso
244
+
245
+ Parameters
246
+ ----------
247
+ emp_cov : array-like of shape (n_features, n_features)
248
+ Empirical covariance from which to compute the covariance estimate.
249
+
250
+ alpha : float
251
+ The regularization parameter: the higher alpha, the more
252
+ regularization, the sparser the inverse covariance.
253
+ Range is (0, inf].
254
+
255
+ cov_init : array of shape (n_features, n_features), default=None
256
+ The initial guess for the covariance. If None, then the empirical
257
+ covariance is used.
258
+
259
+ .. deprecated:: 1.3
260
+ `cov_init` is deprecated in 1.3 and will be removed in 1.5.
261
+ It currently has no effect.
262
+
263
+ mode : {'cd', 'lars'}, default='cd'
264
+ The Lasso solver to use: coordinate descent or LARS. Use LARS for
265
+ very sparse underlying graphs, where p > n. Elsewhere prefer cd
266
+ which is more numerically stable.
267
+
268
+ tol : float, default=1e-4
269
+ The tolerance to declare convergence: if the dual gap goes below
270
+ this value, iterations are stopped. Range is (0, inf].
271
+
272
+ enet_tol : float, default=1e-4
273
+ The tolerance for the elastic net solver used to calculate the descent
274
+ direction. This parameter controls the accuracy of the search direction
275
+ for a given column update, not of the overall parameter estimate. Only
276
+ used for mode='cd'. Range is (0, inf].
277
+
278
+ max_iter : int, default=100
279
+ The maximum number of iterations.
280
+
281
+ verbose : bool, default=False
282
+ If verbose is True, the objective function and dual gap are
283
+ printed at each iteration.
284
+
285
+ return_costs : bool, default=False
286
+ If return_costs is True, the objective function and dual gap
287
+ at each iteration are returned.
288
+
289
+ eps : float, default=eps
290
+ The machine-precision regularization in the computation of the
291
+ Cholesky diagonal factors. Increase this for very ill-conditioned
292
+ systems. Default is `np.finfo(np.float64).eps`.
293
+
294
+ return_n_iter : bool, default=False
295
+ Whether or not to return the number of iterations.
296
+
297
+ Returns
298
+ -------
299
+ covariance : ndarray of shape (n_features, n_features)
300
+ The estimated covariance matrix.
301
+
302
+ precision : ndarray of shape (n_features, n_features)
303
+ The estimated (sparse) precision matrix.
304
+
305
+ costs : list of (objective, dual_gap) pairs
306
+ The list of values of the objective function and the dual gap at
307
+ each iteration. Returned only if return_costs is True.
308
+
309
+ n_iter : int
310
+ Number of iterations. Returned only if `return_n_iter` is set to True.
311
+
312
+ See Also
313
+ --------
314
+ GraphicalLasso : Sparse inverse covariance estimation
315
+ with an l1-penalized estimator.
316
+ GraphicalLassoCV : Sparse inverse covariance with
317
+ cross-validated choice of the l1 penalty.
318
+
319
+ Notes
320
+ -----
321
+ The algorithm employed to solve this problem is the GLasso algorithm,
322
+ from the Friedman 2008 Biostatistics paper. It is the same algorithm
323
+ as in the R `glasso` package.
324
+
325
+ One possible difference with the `glasso` R package is that the
326
+ diagonal coefficients are not penalized.
327
+
328
+ Examples
329
+ --------
330
+ >>> import numpy as np
331
+ >>> from sklearn.datasets import make_sparse_spd_matrix
332
+ >>> from sklearn.covariance import empirical_covariance, graphical_lasso
333
+ >>> true_cov = make_sparse_spd_matrix(n_dim=3,random_state=42)
334
+ >>> rng = np.random.RandomState(42)
335
+ >>> X = rng.multivariate_normal(mean=np.zeros(3), cov=true_cov, size=3)
336
+ >>> emp_cov = empirical_covariance(X, assume_centered=True)
337
+ >>> emp_cov, _ = graphical_lasso(emp_cov, alpha=0.05)
338
+ >>> emp_cov
339
+ array([[ 1.68..., 0.21..., -0.20...],
340
+ [ 0.21..., 0.22..., -0.08...],
341
+ [-0.20..., -0.08..., 0.23...]])
342
+ """
343
+
344
+ if cov_init is not None:
345
+ warnings.warn(
346
+ (
347
+ "The cov_init parameter is deprecated in 1.3 and will be removed in "
348
+ "1.5. It does not have any effect."
349
+ ),
350
+ FutureWarning,
351
+ )
352
+
353
+ model = GraphicalLasso(
354
+ alpha=alpha,
355
+ mode=mode,
356
+ covariance="precomputed",
357
+ tol=tol,
358
+ enet_tol=enet_tol,
359
+ max_iter=max_iter,
360
+ verbose=verbose,
361
+ eps=eps,
362
+ assume_centered=True,
363
+ ).fit(emp_cov)
364
+
365
+ output = [model.covariance_, model.precision_]
366
+ if return_costs:
367
+ output.append(model.costs_)
368
+ if return_n_iter:
369
+ output.append(model.n_iter_)
370
+ return tuple(output)
371
+
372
+
373
+ class BaseGraphicalLasso(EmpiricalCovariance):
374
+ _parameter_constraints: dict = {
375
+ **EmpiricalCovariance._parameter_constraints,
376
+ "tol": [Interval(Real, 0, None, closed="right")],
377
+ "enet_tol": [Interval(Real, 0, None, closed="right")],
378
+ "max_iter": [Interval(Integral, 0, None, closed="left")],
379
+ "mode": [StrOptions({"cd", "lars"})],
380
+ "verbose": ["verbose"],
381
+ "eps": [Interval(Real, 0, None, closed="both")],
382
+ }
383
+ _parameter_constraints.pop("store_precision")
384
+
385
+ def __init__(
386
+ self,
387
+ tol=1e-4,
388
+ enet_tol=1e-4,
389
+ max_iter=100,
390
+ mode="cd",
391
+ verbose=False,
392
+ eps=np.finfo(np.float64).eps,
393
+ assume_centered=False,
394
+ ):
395
+ super().__init__(assume_centered=assume_centered)
396
+ self.tol = tol
397
+ self.enet_tol = enet_tol
398
+ self.max_iter = max_iter
399
+ self.mode = mode
400
+ self.verbose = verbose
401
+ self.eps = eps
402
+
403
+
404
+ class GraphicalLasso(BaseGraphicalLasso):
405
+ """Sparse inverse covariance estimation with an l1-penalized estimator.
406
+
407
+ Read more in the :ref:`User Guide <sparse_inverse_covariance>`.
408
+
409
+ .. versionchanged:: v0.20
410
+ GraphLasso has been renamed to GraphicalLasso
411
+
412
+ Parameters
413
+ ----------
414
+ alpha : float, default=0.01
415
+ The regularization parameter: the higher alpha, the more
416
+ regularization, the sparser the inverse covariance.
417
+ Range is (0, inf].
418
+
419
+ mode : {'cd', 'lars'}, default='cd'
420
+ The Lasso solver to use: coordinate descent or LARS. Use LARS for
421
+ very sparse underlying graphs, where p > n. Elsewhere prefer cd
422
+ which is more numerically stable.
423
+
424
+ covariance : "precomputed", default=None
425
+ If covariance is "precomputed", the input data in `fit` is assumed
426
+ to be the covariance matrix. If `None`, the empirical covariance
427
+ is estimated from the data `X`.
428
+
429
+ .. versionadded:: 1.3
430
+
431
+ tol : float, default=1e-4
432
+ The tolerance to declare convergence: if the dual gap goes below
433
+ this value, iterations are stopped. Range is (0, inf].
434
+
435
+ enet_tol : float, default=1e-4
436
+ The tolerance for the elastic net solver used to calculate the descent
437
+ direction. This parameter controls the accuracy of the search direction
438
+ for a given column update, not of the overall parameter estimate. Only
439
+ used for mode='cd'. Range is (0, inf].
440
+
441
+ max_iter : int, default=100
442
+ The maximum number of iterations.
443
+
444
+ verbose : bool, default=False
445
+ If verbose is True, the objective function and dual gap are
446
+ plotted at each iteration.
447
+
448
+ eps : float, default=eps
449
+ The machine-precision regularization in the computation of the
450
+ Cholesky diagonal factors. Increase this for very ill-conditioned
451
+ systems. Default is `np.finfo(np.float64).eps`.
452
+
453
+ .. versionadded:: 1.3
454
+
455
+ assume_centered : bool, default=False
456
+ If True, data are not centered before computation.
457
+ Useful when working with data whose mean is almost, but not exactly
458
+ zero.
459
+ If False, data are centered before computation.
460
+
461
+ Attributes
462
+ ----------
463
+ location_ : ndarray of shape (n_features,)
464
+ Estimated location, i.e. the estimated mean.
465
+
466
+ covariance_ : ndarray of shape (n_features, n_features)
467
+ Estimated covariance matrix
468
+
469
+ precision_ : ndarray of shape (n_features, n_features)
470
+ Estimated pseudo inverse matrix.
471
+
472
+ n_iter_ : int
473
+ Number of iterations run.
474
+
475
+ costs_ : list of (objective, dual_gap) pairs
476
+ The list of values of the objective function and the dual gap at
477
+ each iteration. Returned only if return_costs is True.
478
+
479
+ .. versionadded:: 1.3
480
+
481
+ n_features_in_ : int
482
+ Number of features seen during :term:`fit`.
483
+
484
+ .. versionadded:: 0.24
485
+
486
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
487
+ Names of features seen during :term:`fit`. Defined only when `X`
488
+ has feature names that are all strings.
489
+
490
+ .. versionadded:: 1.0
491
+
492
+ See Also
493
+ --------
494
+ graphical_lasso : L1-penalized covariance estimator.
495
+ GraphicalLassoCV : Sparse inverse covariance with
496
+ cross-validated choice of the l1 penalty.
497
+
498
+ Examples
499
+ --------
500
+ >>> import numpy as np
501
+ >>> from sklearn.covariance import GraphicalLasso
502
+ >>> true_cov = np.array([[0.8, 0.0, 0.2, 0.0],
503
+ ... [0.0, 0.4, 0.0, 0.0],
504
+ ... [0.2, 0.0, 0.3, 0.1],
505
+ ... [0.0, 0.0, 0.1, 0.7]])
506
+ >>> np.random.seed(0)
507
+ >>> X = np.random.multivariate_normal(mean=[0, 0, 0, 0],
508
+ ... cov=true_cov,
509
+ ... size=200)
510
+ >>> cov = GraphicalLasso().fit(X)
511
+ >>> np.around(cov.covariance_, decimals=3)
512
+ array([[0.816, 0.049, 0.218, 0.019],
513
+ [0.049, 0.364, 0.017, 0.034],
514
+ [0.218, 0.017, 0.322, 0.093],
515
+ [0.019, 0.034, 0.093, 0.69 ]])
516
+ >>> np.around(cov.location_, decimals=3)
517
+ array([0.073, 0.04 , 0.038, 0.143])
518
+ """
519
+
520
+ _parameter_constraints: dict = {
521
+ **BaseGraphicalLasso._parameter_constraints,
522
+ "alpha": [Interval(Real, 0, None, closed="both")],
523
+ "covariance": [StrOptions({"precomputed"}), None],
524
+ }
525
+
526
+ def __init__(
527
+ self,
528
+ alpha=0.01,
529
+ *,
530
+ mode="cd",
531
+ covariance=None,
532
+ tol=1e-4,
533
+ enet_tol=1e-4,
534
+ max_iter=100,
535
+ verbose=False,
536
+ eps=np.finfo(np.float64).eps,
537
+ assume_centered=False,
538
+ ):
539
+ super().__init__(
540
+ tol=tol,
541
+ enet_tol=enet_tol,
542
+ max_iter=max_iter,
543
+ mode=mode,
544
+ verbose=verbose,
545
+ eps=eps,
546
+ assume_centered=assume_centered,
547
+ )
548
+ self.alpha = alpha
549
+ self.covariance = covariance
550
+
551
+ @_fit_context(prefer_skip_nested_validation=True)
552
+ def fit(self, X, y=None):
553
+ """Fit the GraphicalLasso model to X.
554
+
555
+ Parameters
556
+ ----------
557
+ X : array-like of shape (n_samples, n_features)
558
+ Data from which to compute the covariance estimate.
559
+
560
+ y : Ignored
561
+ Not used, present for API consistency by convention.
562
+
563
+ Returns
564
+ -------
565
+ self : object
566
+ Returns the instance itself.
567
+ """
568
+ # Covariance does not make sense for a single feature
569
+ X = self._validate_data(X, ensure_min_features=2, ensure_min_samples=2)
570
+
571
+ if self.covariance == "precomputed":
572
+ emp_cov = X.copy()
573
+ self.location_ = np.zeros(X.shape[1])
574
+ else:
575
+ emp_cov = empirical_covariance(X, assume_centered=self.assume_centered)
576
+ if self.assume_centered:
577
+ self.location_ = np.zeros(X.shape[1])
578
+ else:
579
+ self.location_ = X.mean(0)
580
+
581
+ self.covariance_, self.precision_, self.costs_, self.n_iter_ = _graphical_lasso(
582
+ emp_cov,
583
+ alpha=self.alpha,
584
+ cov_init=None,
585
+ mode=self.mode,
586
+ tol=self.tol,
587
+ enet_tol=self.enet_tol,
588
+ max_iter=self.max_iter,
589
+ verbose=self.verbose,
590
+ eps=self.eps,
591
+ )
592
+ return self
593
+
594
+
595
+ # Cross-validation with GraphicalLasso
596
+ def graphical_lasso_path(
597
+ X,
598
+ alphas,
599
+ cov_init=None,
600
+ X_test=None,
601
+ mode="cd",
602
+ tol=1e-4,
603
+ enet_tol=1e-4,
604
+ max_iter=100,
605
+ verbose=False,
606
+ eps=np.finfo(np.float64).eps,
607
+ ):
608
+ """l1-penalized covariance estimator along a path of decreasing alphas
609
+
610
+ Read more in the :ref:`User Guide <sparse_inverse_covariance>`.
611
+
612
+ Parameters
613
+ ----------
614
+ X : ndarray of shape (n_samples, n_features)
615
+ Data from which to compute the covariance estimate.
616
+
617
+ alphas : array-like of shape (n_alphas,)
618
+ The list of regularization parameters, decreasing order.
619
+
620
+ cov_init : array of shape (n_features, n_features), default=None
621
+ The initial guess for the covariance.
622
+
623
+ X_test : array of shape (n_test_samples, n_features), default=None
624
+ Optional test matrix to measure generalisation error.
625
+
626
+ mode : {'cd', 'lars'}, default='cd'
627
+ The Lasso solver to use: coordinate descent or LARS. Use LARS for
628
+ very sparse underlying graphs, where p > n. Elsewhere prefer cd
629
+ which is more numerically stable.
630
+
631
+ tol : float, default=1e-4
632
+ The tolerance to declare convergence: if the dual gap goes below
633
+ this value, iterations are stopped. The tolerance must be a positive
634
+ number.
635
+
636
+ enet_tol : float, default=1e-4
637
+ The tolerance for the elastic net solver used to calculate the descent
638
+ direction. This parameter controls the accuracy of the search direction
639
+ for a given column update, not of the overall parameter estimate. Only
640
+ used for mode='cd'. The tolerance must be a positive number.
641
+
642
+ max_iter : int, default=100
643
+ The maximum number of iterations. This parameter should be a strictly
644
+ positive integer.
645
+
646
+ verbose : int or bool, default=False
647
+ The higher the verbosity flag, the more information is printed
648
+ during the fitting.
649
+
650
+ eps : float, default=eps
651
+ The machine-precision regularization in the computation of the
652
+ Cholesky diagonal factors. Increase this for very ill-conditioned
653
+ systems. Default is `np.finfo(np.float64).eps`.
654
+
655
+ .. versionadded:: 1.3
656
+
657
+ Returns
658
+ -------
659
+ covariances_ : list of shape (n_alphas,) of ndarray of shape \
660
+ (n_features, n_features)
661
+ The estimated covariance matrices.
662
+
663
+ precisions_ : list of shape (n_alphas,) of ndarray of shape \
664
+ (n_features, n_features)
665
+ The estimated (sparse) precision matrices.
666
+
667
+ scores_ : list of shape (n_alphas,), dtype=float
668
+ The generalisation error (log-likelihood) on the test data.
669
+ Returned only if test data is passed.
670
+ """
671
+ inner_verbose = max(0, verbose - 1)
672
+ emp_cov = empirical_covariance(X)
673
+ if cov_init is None:
674
+ covariance_ = emp_cov.copy()
675
+ else:
676
+ covariance_ = cov_init
677
+ covariances_ = list()
678
+ precisions_ = list()
679
+ scores_ = list()
680
+ if X_test is not None:
681
+ test_emp_cov = empirical_covariance(X_test)
682
+
683
+ for alpha in alphas:
684
+ try:
685
+ # Capture the errors, and move on
686
+ covariance_, precision_, _, _ = _graphical_lasso(
687
+ emp_cov,
688
+ alpha=alpha,
689
+ cov_init=covariance_,
690
+ mode=mode,
691
+ tol=tol,
692
+ enet_tol=enet_tol,
693
+ max_iter=max_iter,
694
+ verbose=inner_verbose,
695
+ eps=eps,
696
+ )
697
+ covariances_.append(covariance_)
698
+ precisions_.append(precision_)
699
+ if X_test is not None:
700
+ this_score = log_likelihood(test_emp_cov, precision_)
701
+ except FloatingPointError:
702
+ this_score = -np.inf
703
+ covariances_.append(np.nan)
704
+ precisions_.append(np.nan)
705
+ if X_test is not None:
706
+ if not np.isfinite(this_score):
707
+ this_score = -np.inf
708
+ scores_.append(this_score)
709
+ if verbose == 1:
710
+ sys.stderr.write(".")
711
+ elif verbose > 1:
712
+ if X_test is not None:
713
+ print(
714
+ "[graphical_lasso_path] alpha: %.2e, score: %.2e"
715
+ % (alpha, this_score)
716
+ )
717
+ else:
718
+ print("[graphical_lasso_path] alpha: %.2e" % alpha)
719
+ if X_test is not None:
720
+ return covariances_, precisions_, scores_
721
+ return covariances_, precisions_
722
+
723
+
724
+ class GraphicalLassoCV(_RoutingNotSupportedMixin, BaseGraphicalLasso):
725
+ """Sparse inverse covariance w/ cross-validated choice of the l1 penalty.
726
+
727
+ See glossary entry for :term:`cross-validation estimator`.
728
+
729
+ Read more in the :ref:`User Guide <sparse_inverse_covariance>`.
730
+
731
+ .. versionchanged:: v0.20
732
+ GraphLassoCV has been renamed to GraphicalLassoCV
733
+
734
+ Parameters
735
+ ----------
736
+ alphas : int or array-like of shape (n_alphas,), dtype=float, default=4
737
+ If an integer is given, it fixes the number of points on the
738
+ grids of alpha to be used. If a list is given, it gives the
739
+ grid to be used. See the notes in the class docstring for
740
+ more details. Range is [1, inf) for an integer.
741
+ Range is (0, inf] for an array-like of floats.
742
+
743
+ n_refinements : int, default=4
744
+ The number of times the grid is refined. Not used if explicit
745
+ values of alphas are passed. Range is [1, inf).
746
+
747
+ cv : int, cross-validation generator or iterable, default=None
748
+ Determines the cross-validation splitting strategy.
749
+ Possible inputs for cv are:
750
+
751
+ - None, to use the default 5-fold cross-validation,
752
+ - integer, to specify the number of folds.
753
+ - :term:`CV splitter`,
754
+ - An iterable yielding (train, test) splits as arrays of indices.
755
+
756
+ For integer/None inputs :class:`~sklearn.model_selection.KFold` is used.
757
+
758
+ Refer :ref:`User Guide <cross_validation>` for the various
759
+ cross-validation strategies that can be used here.
760
+
761
+ .. versionchanged:: 0.20
762
+ ``cv`` default value if None changed from 3-fold to 5-fold.
763
+
764
+ tol : float, default=1e-4
765
+ The tolerance to declare convergence: if the dual gap goes below
766
+ this value, iterations are stopped. Range is (0, inf].
767
+
768
+ enet_tol : float, default=1e-4
769
+ The tolerance for the elastic net solver used to calculate the descent
770
+ direction. This parameter controls the accuracy of the search direction
771
+ for a given column update, not of the overall parameter estimate. Only
772
+ used for mode='cd'. Range is (0, inf].
773
+
774
+ max_iter : int, default=100
775
+ Maximum number of iterations.
776
+
777
+ mode : {'cd', 'lars'}, default='cd'
778
+ The Lasso solver to use: coordinate descent or LARS. Use LARS for
779
+ very sparse underlying graphs, where number of features is greater
780
+ than number of samples. Elsewhere prefer cd which is more numerically
781
+ stable.
782
+
783
+ n_jobs : int, default=None
784
+ Number of jobs to run in parallel.
785
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
786
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
787
+ for more details.
788
+
789
+ .. versionchanged:: v0.20
790
+ `n_jobs` default changed from 1 to None
791
+
792
+ verbose : bool, default=False
793
+ If verbose is True, the objective function and duality gap are
794
+ printed at each iteration.
795
+
796
+ eps : float, default=eps
797
+ The machine-precision regularization in the computation of the
798
+ Cholesky diagonal factors. Increase this for very ill-conditioned
799
+ systems. Default is `np.finfo(np.float64).eps`.
800
+
801
+ .. versionadded:: 1.3
802
+
803
+ assume_centered : bool, default=False
804
+ If True, data are not centered before computation.
805
+ Useful when working with data whose mean is almost, but not exactly
806
+ zero.
807
+ If False, data are centered before computation.
808
+
809
+ Attributes
810
+ ----------
811
+ location_ : ndarray of shape (n_features,)
812
+ Estimated location, i.e. the estimated mean.
813
+
814
+ covariance_ : ndarray of shape (n_features, n_features)
815
+ Estimated covariance matrix.
816
+
817
+ precision_ : ndarray of shape (n_features, n_features)
818
+ Estimated precision matrix (inverse covariance).
819
+
820
+ costs_ : list of (objective, dual_gap) pairs
821
+ The list of values of the objective function and the dual gap at
822
+ each iteration. Returned only if return_costs is True.
823
+
824
+ .. versionadded:: 1.3
825
+
826
+ alpha_ : float
827
+ Penalization parameter selected.
828
+
829
+ cv_results_ : dict of ndarrays
830
+ A dict with keys:
831
+
832
+ alphas : ndarray of shape (n_alphas,)
833
+ All penalization parameters explored.
834
+
835
+ split(k)_test_score : ndarray of shape (n_alphas,)
836
+ Log-likelihood score on left-out data across (k)th fold.
837
+
838
+ .. versionadded:: 1.0
839
+
840
+ mean_test_score : ndarray of shape (n_alphas,)
841
+ Mean of scores over the folds.
842
+
843
+ .. versionadded:: 1.0
844
+
845
+ std_test_score : ndarray of shape (n_alphas,)
846
+ Standard deviation of scores over the folds.
847
+
848
+ .. versionadded:: 1.0
849
+
850
+ n_iter_ : int
851
+ Number of iterations run for the optimal alpha.
852
+
853
+ n_features_in_ : int
854
+ Number of features seen during :term:`fit`.
855
+
856
+ .. versionadded:: 0.24
857
+
858
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
859
+ Names of features seen during :term:`fit`. Defined only when `X`
860
+ has feature names that are all strings.
861
+
862
+ .. versionadded:: 1.0
863
+
864
+ See Also
865
+ --------
866
+ graphical_lasso : L1-penalized covariance estimator.
867
+ GraphicalLasso : Sparse inverse covariance estimation
868
+ with an l1-penalized estimator.
869
+
870
+ Notes
871
+ -----
872
+ The search for the optimal penalization parameter (`alpha`) is done on an
873
+ iteratively refined grid: first the cross-validated scores on a grid are
874
+ computed, then a new refined grid is centered around the maximum, and so
875
+ on.
876
+
877
+ One of the challenges which is faced here is that the solvers can
878
+ fail to converge to a well-conditioned estimate. The corresponding
879
+ values of `alpha` then come out as missing values, but the optimum may
880
+ be close to these missing values.
881
+
882
+ In `fit`, once the best parameter `alpha` is found through
883
+ cross-validation, the model is fit again using the entire training set.
884
+
885
+ Examples
886
+ --------
887
+ >>> import numpy as np
888
+ >>> from sklearn.covariance import GraphicalLassoCV
889
+ >>> true_cov = np.array([[0.8, 0.0, 0.2, 0.0],
890
+ ... [0.0, 0.4, 0.0, 0.0],
891
+ ... [0.2, 0.0, 0.3, 0.1],
892
+ ... [0.0, 0.0, 0.1, 0.7]])
893
+ >>> np.random.seed(0)
894
+ >>> X = np.random.multivariate_normal(mean=[0, 0, 0, 0],
895
+ ... cov=true_cov,
896
+ ... size=200)
897
+ >>> cov = GraphicalLassoCV().fit(X)
898
+ >>> np.around(cov.covariance_, decimals=3)
899
+ array([[0.816, 0.051, 0.22 , 0.017],
900
+ [0.051, 0.364, 0.018, 0.036],
901
+ [0.22 , 0.018, 0.322, 0.094],
902
+ [0.017, 0.036, 0.094, 0.69 ]])
903
+ >>> np.around(cov.location_, decimals=3)
904
+ array([0.073, 0.04 , 0.038, 0.143])
905
+ """
906
+
907
+ _parameter_constraints: dict = {
908
+ **BaseGraphicalLasso._parameter_constraints,
909
+ "alphas": [Interval(Integral, 0, None, closed="left"), "array-like"],
910
+ "n_refinements": [Interval(Integral, 1, None, closed="left")],
911
+ "cv": ["cv_object"],
912
+ "n_jobs": [Integral, None],
913
+ }
914
+
915
+ def __init__(
916
+ self,
917
+ *,
918
+ alphas=4,
919
+ n_refinements=4,
920
+ cv=None,
921
+ tol=1e-4,
922
+ enet_tol=1e-4,
923
+ max_iter=100,
924
+ mode="cd",
925
+ n_jobs=None,
926
+ verbose=False,
927
+ eps=np.finfo(np.float64).eps,
928
+ assume_centered=False,
929
+ ):
930
+ super().__init__(
931
+ tol=tol,
932
+ enet_tol=enet_tol,
933
+ max_iter=max_iter,
934
+ mode=mode,
935
+ verbose=verbose,
936
+ eps=eps,
937
+ assume_centered=assume_centered,
938
+ )
939
+ self.alphas = alphas
940
+ self.n_refinements = n_refinements
941
+ self.cv = cv
942
+ self.n_jobs = n_jobs
943
+
944
+ @_fit_context(prefer_skip_nested_validation=True)
945
+ def fit(self, X, y=None):
946
+ """Fit the GraphicalLasso covariance model to X.
947
+
948
+ Parameters
949
+ ----------
950
+ X : array-like of shape (n_samples, n_features)
951
+ Data from which to compute the covariance estimate.
952
+
953
+ y : Ignored
954
+ Not used, present for API consistency by convention.
955
+
956
+ Returns
957
+ -------
958
+ self : object
959
+ Returns the instance itself.
960
+ """
961
+ # Covariance does not make sense for a single feature
962
+ X = self._validate_data(X, ensure_min_features=2)
963
+ if self.assume_centered:
964
+ self.location_ = np.zeros(X.shape[1])
965
+ else:
966
+ self.location_ = X.mean(0)
967
+ emp_cov = empirical_covariance(X, assume_centered=self.assume_centered)
968
+
969
+ cv = check_cv(self.cv, y, classifier=False)
970
+
971
+ # List of (alpha, scores, covs)
972
+ path = list()
973
+ n_alphas = self.alphas
974
+ inner_verbose = max(0, self.verbose - 1)
975
+
976
+ if _is_arraylike_not_scalar(n_alphas):
977
+ for alpha in self.alphas:
978
+ check_scalar(
979
+ alpha,
980
+ "alpha",
981
+ Real,
982
+ min_val=0,
983
+ max_val=np.inf,
984
+ include_boundaries="right",
985
+ )
986
+ alphas = self.alphas
987
+ n_refinements = 1
988
+ else:
989
+ n_refinements = self.n_refinements
990
+ alpha_1 = alpha_max(emp_cov)
991
+ alpha_0 = 1e-2 * alpha_1
992
+ alphas = np.logspace(np.log10(alpha_0), np.log10(alpha_1), n_alphas)[::-1]
993
+
994
+ t0 = time.time()
995
+ for i in range(n_refinements):
996
+ with warnings.catch_warnings():
997
+ # No need to see the convergence warnings on this grid:
998
+ # they will always be points that will not converge
999
+ # during the cross-validation
1000
+ warnings.simplefilter("ignore", ConvergenceWarning)
1001
+ # Compute the cross-validated loss on the current grid
1002
+
1003
+ # NOTE: Warm-restarting graphical_lasso_path has been tried,
1004
+ # and this did not allow to gain anything
1005
+ # (same execution time with or without).
1006
+ this_path = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(
1007
+ delayed(graphical_lasso_path)(
1008
+ X[train],
1009
+ alphas=alphas,
1010
+ X_test=X[test],
1011
+ mode=self.mode,
1012
+ tol=self.tol,
1013
+ enet_tol=self.enet_tol,
1014
+ max_iter=int(0.1 * self.max_iter),
1015
+ verbose=inner_verbose,
1016
+ eps=self.eps,
1017
+ )
1018
+ for train, test in cv.split(X, y)
1019
+ )
1020
+
1021
+ # Little danse to transform the list in what we need
1022
+ covs, _, scores = zip(*this_path)
1023
+ covs = zip(*covs)
1024
+ scores = zip(*scores)
1025
+ path.extend(zip(alphas, scores, covs))
1026
+ path = sorted(path, key=operator.itemgetter(0), reverse=True)
1027
+
1028
+ # Find the maximum (avoid using built in 'max' function to
1029
+ # have a fully-reproducible selection of the smallest alpha
1030
+ # in case of equality)
1031
+ best_score = -np.inf
1032
+ last_finite_idx = 0
1033
+ for index, (alpha, scores, _) in enumerate(path):
1034
+ this_score = np.mean(scores)
1035
+ if this_score >= 0.1 / np.finfo(np.float64).eps:
1036
+ this_score = np.nan
1037
+ if np.isfinite(this_score):
1038
+ last_finite_idx = index
1039
+ if this_score >= best_score:
1040
+ best_score = this_score
1041
+ best_index = index
1042
+
1043
+ # Refine the grid
1044
+ if best_index == 0:
1045
+ # We do not need to go back: we have chosen
1046
+ # the highest value of alpha for which there are
1047
+ # non-zero coefficients
1048
+ alpha_1 = path[0][0]
1049
+ alpha_0 = path[1][0]
1050
+ elif best_index == last_finite_idx and not best_index == len(path) - 1:
1051
+ # We have non-converged models on the upper bound of the
1052
+ # grid, we need to refine the grid there
1053
+ alpha_1 = path[best_index][0]
1054
+ alpha_0 = path[best_index + 1][0]
1055
+ elif best_index == len(path) - 1:
1056
+ alpha_1 = path[best_index][0]
1057
+ alpha_0 = 0.01 * path[best_index][0]
1058
+ else:
1059
+ alpha_1 = path[best_index - 1][0]
1060
+ alpha_0 = path[best_index + 1][0]
1061
+
1062
+ if not _is_arraylike_not_scalar(n_alphas):
1063
+ alphas = np.logspace(np.log10(alpha_1), np.log10(alpha_0), n_alphas + 2)
1064
+ alphas = alphas[1:-1]
1065
+
1066
+ if self.verbose and n_refinements > 1:
1067
+ print(
1068
+ "[GraphicalLassoCV] Done refinement % 2i out of %i: % 3is"
1069
+ % (i + 1, n_refinements, time.time() - t0)
1070
+ )
1071
+
1072
+ path = list(zip(*path))
1073
+ grid_scores = list(path[1])
1074
+ alphas = list(path[0])
1075
+ # Finally, compute the score with alpha = 0
1076
+ alphas.append(0)
1077
+ grid_scores.append(
1078
+ cross_val_score(
1079
+ EmpiricalCovariance(),
1080
+ X,
1081
+ cv=cv,
1082
+ n_jobs=self.n_jobs,
1083
+ verbose=inner_verbose,
1084
+ )
1085
+ )
1086
+ grid_scores = np.array(grid_scores)
1087
+
1088
+ self.cv_results_ = {"alphas": np.array(alphas)}
1089
+
1090
+ for i in range(grid_scores.shape[1]):
1091
+ self.cv_results_[f"split{i}_test_score"] = grid_scores[:, i]
1092
+
1093
+ self.cv_results_["mean_test_score"] = np.mean(grid_scores, axis=1)
1094
+ self.cv_results_["std_test_score"] = np.std(grid_scores, axis=1)
1095
+
1096
+ best_alpha = alphas[best_index]
1097
+ self.alpha_ = best_alpha
1098
+
1099
+ # Finally fit the model with the selected alpha
1100
+ self.covariance_, self.precision_, self.costs_, self.n_iter_ = _graphical_lasso(
1101
+ emp_cov,
1102
+ alpha=best_alpha,
1103
+ mode=self.mode,
1104
+ tol=self.tol,
1105
+ enet_tol=self.enet_tol,
1106
+ max_iter=self.max_iter,
1107
+ verbose=inner_verbose,
1108
+ eps=self.eps,
1109
+ )
1110
+ return self
env-llmeval/lib/python3.10/site-packages/sklearn/covariance/_robust_covariance.py ADDED
@@ -0,0 +1,868 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Robust location and covariance estimators.
3
+
4
+ Here are implemented estimators that are resistant to outliers.
5
+
6
+ """
7
+ # Author: Virgile Fritsch <[email protected]>
8
+ #
9
+ # License: BSD 3 clause
10
+
11
+ import warnings
12
+ from numbers import Integral, Real
13
+
14
+ import numpy as np
15
+ from scipy import linalg
16
+ from scipy.stats import chi2
17
+
18
+ from ..base import _fit_context
19
+ from ..utils import check_array, check_random_state
20
+ from ..utils._param_validation import Interval
21
+ from ..utils.extmath import fast_logdet
22
+ from ._empirical_covariance import EmpiricalCovariance, empirical_covariance
23
+
24
+
25
+ # Minimum Covariance Determinant
26
+ # Implementing of an algorithm by Rousseeuw & Van Driessen described in
27
+ # (A Fast Algorithm for the Minimum Covariance Determinant Estimator,
28
+ # 1999, American Statistical Association and the American Society
29
+ # for Quality, TECHNOMETRICS)
30
+ # XXX Is this really a public function? It's not listed in the docs or
31
+ # exported by sklearn.covariance. Deprecate?
32
+ def c_step(
33
+ X,
34
+ n_support,
35
+ remaining_iterations=30,
36
+ initial_estimates=None,
37
+ verbose=False,
38
+ cov_computation_method=empirical_covariance,
39
+ random_state=None,
40
+ ):
41
+ """C_step procedure described in [Rouseeuw1984]_ aiming at computing MCD.
42
+
43
+ Parameters
44
+ ----------
45
+ X : array-like of shape (n_samples, n_features)
46
+ Data set in which we look for the n_support observations whose
47
+ scatter matrix has minimum determinant.
48
+
49
+ n_support : int
50
+ Number of observations to compute the robust estimates of location
51
+ and covariance from. This parameter must be greater than
52
+ `n_samples / 2`.
53
+
54
+ remaining_iterations : int, default=30
55
+ Number of iterations to perform.
56
+ According to [Rouseeuw1999]_, two iterations are sufficient to get
57
+ close to the minimum, and we never need more than 30 to reach
58
+ convergence.
59
+
60
+ initial_estimates : tuple of shape (2,), default=None
61
+ Initial estimates of location and shape from which to run the c_step
62
+ procedure:
63
+ - initial_estimates[0]: an initial location estimate
64
+ - initial_estimates[1]: an initial covariance estimate
65
+
66
+ verbose : bool, default=False
67
+ Verbose mode.
68
+
69
+ cov_computation_method : callable, \
70
+ default=:func:`sklearn.covariance.empirical_covariance`
71
+ The function which will be used to compute the covariance.
72
+ Must return array of shape (n_features, n_features).
73
+
74
+ random_state : int, RandomState instance or None, default=None
75
+ Determines the pseudo random number generator for shuffling the data.
76
+ Pass an int for reproducible results across multiple function calls.
77
+ See :term:`Glossary <random_state>`.
78
+
79
+ Returns
80
+ -------
81
+ location : ndarray of shape (n_features,)
82
+ Robust location estimates.
83
+
84
+ covariance : ndarray of shape (n_features, n_features)
85
+ Robust covariance estimates.
86
+
87
+ support : ndarray of shape (n_samples,)
88
+ A mask for the `n_support` observations whose scatter matrix has
89
+ minimum determinant.
90
+
91
+ References
92
+ ----------
93
+ .. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance Determinant
94
+ Estimator, 1999, American Statistical Association and the American
95
+ Society for Quality, TECHNOMETRICS
96
+ """
97
+ X = np.asarray(X)
98
+ random_state = check_random_state(random_state)
99
+ return _c_step(
100
+ X,
101
+ n_support,
102
+ remaining_iterations=remaining_iterations,
103
+ initial_estimates=initial_estimates,
104
+ verbose=verbose,
105
+ cov_computation_method=cov_computation_method,
106
+ random_state=random_state,
107
+ )
108
+
109
+
110
+ def _c_step(
111
+ X,
112
+ n_support,
113
+ random_state,
114
+ remaining_iterations=30,
115
+ initial_estimates=None,
116
+ verbose=False,
117
+ cov_computation_method=empirical_covariance,
118
+ ):
119
+ n_samples, n_features = X.shape
120
+ dist = np.inf
121
+
122
+ # Initialisation
123
+ support = np.zeros(n_samples, dtype=bool)
124
+ if initial_estimates is None:
125
+ # compute initial robust estimates from a random subset
126
+ support[random_state.permutation(n_samples)[:n_support]] = True
127
+ else:
128
+ # get initial robust estimates from the function parameters
129
+ location = initial_estimates[0]
130
+ covariance = initial_estimates[1]
131
+ # run a special iteration for that case (to get an initial support)
132
+ precision = linalg.pinvh(covariance)
133
+ X_centered = X - location
134
+ dist = (np.dot(X_centered, precision) * X_centered).sum(1)
135
+ # compute new estimates
136
+ support[np.argsort(dist)[:n_support]] = True
137
+
138
+ X_support = X[support]
139
+ location = X_support.mean(0)
140
+ covariance = cov_computation_method(X_support)
141
+
142
+ # Iterative procedure for Minimum Covariance Determinant computation
143
+ det = fast_logdet(covariance)
144
+ # If the data already has singular covariance, calculate the precision,
145
+ # as the loop below will not be entered.
146
+ if np.isinf(det):
147
+ precision = linalg.pinvh(covariance)
148
+
149
+ previous_det = np.inf
150
+ while det < previous_det and remaining_iterations > 0 and not np.isinf(det):
151
+ # save old estimates values
152
+ previous_location = location
153
+ previous_covariance = covariance
154
+ previous_det = det
155
+ previous_support = support
156
+ # compute a new support from the full data set mahalanobis distances
157
+ precision = linalg.pinvh(covariance)
158
+ X_centered = X - location
159
+ dist = (np.dot(X_centered, precision) * X_centered).sum(axis=1)
160
+ # compute new estimates
161
+ support = np.zeros(n_samples, dtype=bool)
162
+ support[np.argsort(dist)[:n_support]] = True
163
+ X_support = X[support]
164
+ location = X_support.mean(axis=0)
165
+ covariance = cov_computation_method(X_support)
166
+ det = fast_logdet(covariance)
167
+ # update remaining iterations for early stopping
168
+ remaining_iterations -= 1
169
+
170
+ previous_dist = dist
171
+ dist = (np.dot(X - location, precision) * (X - location)).sum(axis=1)
172
+ # Check if best fit already found (det => 0, logdet => -inf)
173
+ if np.isinf(det):
174
+ results = location, covariance, det, support, dist
175
+ # Check convergence
176
+ if np.allclose(det, previous_det):
177
+ # c_step procedure converged
178
+ if verbose:
179
+ print(
180
+ "Optimal couple (location, covariance) found before"
181
+ " ending iterations (%d left)" % (remaining_iterations)
182
+ )
183
+ results = location, covariance, det, support, dist
184
+ elif det > previous_det:
185
+ # determinant has increased (should not happen)
186
+ warnings.warn(
187
+ "Determinant has increased; this should not happen: "
188
+ "log(det) > log(previous_det) (%.15f > %.15f). "
189
+ "You may want to try with a higher value of "
190
+ "support_fraction (current value: %.3f)."
191
+ % (det, previous_det, n_support / n_samples),
192
+ RuntimeWarning,
193
+ )
194
+ results = (
195
+ previous_location,
196
+ previous_covariance,
197
+ previous_det,
198
+ previous_support,
199
+ previous_dist,
200
+ )
201
+
202
+ # Check early stopping
203
+ if remaining_iterations == 0:
204
+ if verbose:
205
+ print("Maximum number of iterations reached")
206
+ results = location, covariance, det, support, dist
207
+
208
+ return results
209
+
210
+
211
+ def select_candidates(
212
+ X,
213
+ n_support,
214
+ n_trials,
215
+ select=1,
216
+ n_iter=30,
217
+ verbose=False,
218
+ cov_computation_method=empirical_covariance,
219
+ random_state=None,
220
+ ):
221
+ """Finds the best pure subset of observations to compute MCD from it.
222
+
223
+ The purpose of this function is to find the best sets of n_support
224
+ observations with respect to a minimization of their covariance
225
+ matrix determinant. Equivalently, it removes n_samples-n_support
226
+ observations to construct what we call a pure data set (i.e. not
227
+ containing outliers). The list of the observations of the pure
228
+ data set is referred to as the `support`.
229
+
230
+ Starting from a random support, the pure data set is found by the
231
+ c_step procedure introduced by Rousseeuw and Van Driessen in
232
+ [RV]_.
233
+
234
+ Parameters
235
+ ----------
236
+ X : array-like of shape (n_samples, n_features)
237
+ Data (sub)set in which we look for the n_support purest observations.
238
+
239
+ n_support : int
240
+ The number of samples the pure data set must contain.
241
+ This parameter must be in the range `[(n + p + 1)/2] < n_support < n`.
242
+
243
+ n_trials : int or tuple of shape (2,)
244
+ Number of different initial sets of observations from which to
245
+ run the algorithm. This parameter should be a strictly positive
246
+ integer.
247
+ Instead of giving a number of trials to perform, one can provide a
248
+ list of initial estimates that will be used to iteratively run
249
+ c_step procedures. In this case:
250
+ - n_trials[0]: array-like, shape (n_trials, n_features)
251
+ is the list of `n_trials` initial location estimates
252
+ - n_trials[1]: array-like, shape (n_trials, n_features, n_features)
253
+ is the list of `n_trials` initial covariances estimates
254
+
255
+ select : int, default=1
256
+ Number of best candidates results to return. This parameter must be
257
+ a strictly positive integer.
258
+
259
+ n_iter : int, default=30
260
+ Maximum number of iterations for the c_step procedure.
261
+ (2 is enough to be close to the final solution. "Never" exceeds 20).
262
+ This parameter must be a strictly positive integer.
263
+
264
+ verbose : bool, default=False
265
+ Control the output verbosity.
266
+
267
+ cov_computation_method : callable, \
268
+ default=:func:`sklearn.covariance.empirical_covariance`
269
+ The function which will be used to compute the covariance.
270
+ Must return an array of shape (n_features, n_features).
271
+
272
+ random_state : int, RandomState instance or None, default=None
273
+ Determines the pseudo random number generator for shuffling the data.
274
+ Pass an int for reproducible results across multiple function calls.
275
+ See :term:`Glossary <random_state>`.
276
+
277
+ See Also
278
+ ---------
279
+ c_step
280
+
281
+ Returns
282
+ -------
283
+ best_locations : ndarray of shape (select, n_features)
284
+ The `select` location estimates computed from the `select` best
285
+ supports found in the data set (`X`).
286
+
287
+ best_covariances : ndarray of shape (select, n_features, n_features)
288
+ The `select` covariance estimates computed from the `select`
289
+ best supports found in the data set (`X`).
290
+
291
+ best_supports : ndarray of shape (select, n_samples)
292
+ The `select` best supports found in the data set (`X`).
293
+
294
+ References
295
+ ----------
296
+ .. [RV] A Fast Algorithm for the Minimum Covariance Determinant
297
+ Estimator, 1999, American Statistical Association and the American
298
+ Society for Quality, TECHNOMETRICS
299
+ """
300
+ random_state = check_random_state(random_state)
301
+
302
+ if isinstance(n_trials, Integral):
303
+ run_from_estimates = False
304
+ elif isinstance(n_trials, tuple):
305
+ run_from_estimates = True
306
+ estimates_list = n_trials
307
+ n_trials = estimates_list[0].shape[0]
308
+ else:
309
+ raise TypeError(
310
+ "Invalid 'n_trials' parameter, expected tuple or integer, got %s (%s)"
311
+ % (n_trials, type(n_trials))
312
+ )
313
+
314
+ # compute `n_trials` location and shape estimates candidates in the subset
315
+ all_estimates = []
316
+ if not run_from_estimates:
317
+ # perform `n_trials` computations from random initial supports
318
+ for j in range(n_trials):
319
+ all_estimates.append(
320
+ _c_step(
321
+ X,
322
+ n_support,
323
+ remaining_iterations=n_iter,
324
+ verbose=verbose,
325
+ cov_computation_method=cov_computation_method,
326
+ random_state=random_state,
327
+ )
328
+ )
329
+ else:
330
+ # perform computations from every given initial estimates
331
+ for j in range(n_trials):
332
+ initial_estimates = (estimates_list[0][j], estimates_list[1][j])
333
+ all_estimates.append(
334
+ _c_step(
335
+ X,
336
+ n_support,
337
+ remaining_iterations=n_iter,
338
+ initial_estimates=initial_estimates,
339
+ verbose=verbose,
340
+ cov_computation_method=cov_computation_method,
341
+ random_state=random_state,
342
+ )
343
+ )
344
+ all_locs_sub, all_covs_sub, all_dets_sub, all_supports_sub, all_ds_sub = zip(
345
+ *all_estimates
346
+ )
347
+ # find the `n_best` best results among the `n_trials` ones
348
+ index_best = np.argsort(all_dets_sub)[:select]
349
+ best_locations = np.asarray(all_locs_sub)[index_best]
350
+ best_covariances = np.asarray(all_covs_sub)[index_best]
351
+ best_supports = np.asarray(all_supports_sub)[index_best]
352
+ best_ds = np.asarray(all_ds_sub)[index_best]
353
+
354
+ return best_locations, best_covariances, best_supports, best_ds
355
+
356
+
357
+ def fast_mcd(
358
+ X,
359
+ support_fraction=None,
360
+ cov_computation_method=empirical_covariance,
361
+ random_state=None,
362
+ ):
363
+ """Estimate the Minimum Covariance Determinant matrix.
364
+
365
+ Read more in the :ref:`User Guide <robust_covariance>`.
366
+
367
+ Parameters
368
+ ----------
369
+ X : array-like of shape (n_samples, n_features)
370
+ The data matrix, with p features and n samples.
371
+
372
+ support_fraction : float, default=None
373
+ The proportion of points to be included in the support of the raw
374
+ MCD estimate. Default is `None`, which implies that the minimum
375
+ value of `support_fraction` will be used within the algorithm:
376
+ `(n_samples + n_features + 1) / 2 * n_samples`. This parameter must be
377
+ in the range (0, 1).
378
+
379
+ cov_computation_method : callable, \
380
+ default=:func:`sklearn.covariance.empirical_covariance`
381
+ The function which will be used to compute the covariance.
382
+ Must return an array of shape (n_features, n_features).
383
+
384
+ random_state : int, RandomState instance or None, default=None
385
+ Determines the pseudo random number generator for shuffling the data.
386
+ Pass an int for reproducible results across multiple function calls.
387
+ See :term:`Glossary <random_state>`.
388
+
389
+ Returns
390
+ -------
391
+ location : ndarray of shape (n_features,)
392
+ Robust location of the data.
393
+
394
+ covariance : ndarray of shape (n_features, n_features)
395
+ Robust covariance of the features.
396
+
397
+ support : ndarray of shape (n_samples,), dtype=bool
398
+ A mask of the observations that have been used to compute
399
+ the robust location and covariance estimates of the data set.
400
+
401
+ Notes
402
+ -----
403
+ The FastMCD algorithm has been introduced by Rousseuw and Van Driessen
404
+ in "A Fast Algorithm for the Minimum Covariance Determinant Estimator,
405
+ 1999, American Statistical Association and the American Society
406
+ for Quality, TECHNOMETRICS".
407
+ The principle is to compute robust estimates and random subsets before
408
+ pooling them into a larger subsets, and finally into the full data set.
409
+ Depending on the size of the initial sample, we have one, two or three
410
+ such computation levels.
411
+
412
+ Note that only raw estimates are returned. If one is interested in
413
+ the correction and reweighting steps described in [RouseeuwVan]_,
414
+ see the MinCovDet object.
415
+
416
+ References
417
+ ----------
418
+
419
+ .. [RouseeuwVan] A Fast Algorithm for the Minimum Covariance
420
+ Determinant Estimator, 1999, American Statistical Association
421
+ and the American Society for Quality, TECHNOMETRICS
422
+
423
+ .. [Butler1993] R. W. Butler, P. L. Davies and M. Jhun,
424
+ Asymptotics For The Minimum Covariance Determinant Estimator,
425
+ The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400
426
+ """
427
+ random_state = check_random_state(random_state)
428
+
429
+ X = check_array(X, ensure_min_samples=2, estimator="fast_mcd")
430
+ n_samples, n_features = X.shape
431
+
432
+ # minimum breakdown value
433
+ if support_fraction is None:
434
+ n_support = int(np.ceil(0.5 * (n_samples + n_features + 1)))
435
+ else:
436
+ n_support = int(support_fraction * n_samples)
437
+
438
+ # 1-dimensional case quick computation
439
+ # (Rousseeuw, P. J. and Leroy, A. M. (2005) References, in Robust
440
+ # Regression and Outlier Detection, John Wiley & Sons, chapter 4)
441
+ if n_features == 1:
442
+ if n_support < n_samples:
443
+ # find the sample shortest halves
444
+ X_sorted = np.sort(np.ravel(X))
445
+ diff = X_sorted[n_support:] - X_sorted[: (n_samples - n_support)]
446
+ halves_start = np.where(diff == np.min(diff))[0]
447
+ # take the middle points' mean to get the robust location estimate
448
+ location = (
449
+ 0.5
450
+ * (X_sorted[n_support + halves_start] + X_sorted[halves_start]).mean()
451
+ )
452
+ support = np.zeros(n_samples, dtype=bool)
453
+ X_centered = X - location
454
+ support[np.argsort(np.abs(X_centered), 0)[:n_support]] = True
455
+ covariance = np.asarray([[np.var(X[support])]])
456
+ location = np.array([location])
457
+ # get precision matrix in an optimized way
458
+ precision = linalg.pinvh(covariance)
459
+ dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1)
460
+ else:
461
+ support = np.ones(n_samples, dtype=bool)
462
+ covariance = np.asarray([[np.var(X)]])
463
+ location = np.asarray([np.mean(X)])
464
+ X_centered = X - location
465
+ # get precision matrix in an optimized way
466
+ precision = linalg.pinvh(covariance)
467
+ dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1)
468
+ # Starting FastMCD algorithm for p-dimensional case
469
+ if (n_samples > 500) and (n_features > 1):
470
+ # 1. Find candidate supports on subsets
471
+ # a. split the set in subsets of size ~ 300
472
+ n_subsets = n_samples // 300
473
+ n_samples_subsets = n_samples // n_subsets
474
+ samples_shuffle = random_state.permutation(n_samples)
475
+ h_subset = int(np.ceil(n_samples_subsets * (n_support / float(n_samples))))
476
+ # b. perform a total of 500 trials
477
+ n_trials_tot = 500
478
+ # c. select 10 best (location, covariance) for each subset
479
+ n_best_sub = 10
480
+ n_trials = max(10, n_trials_tot // n_subsets)
481
+ n_best_tot = n_subsets * n_best_sub
482
+ all_best_locations = np.zeros((n_best_tot, n_features))
483
+ try:
484
+ all_best_covariances = np.zeros((n_best_tot, n_features, n_features))
485
+ except MemoryError:
486
+ # The above is too big. Let's try with something much small
487
+ # (and less optimal)
488
+ n_best_tot = 10
489
+ all_best_covariances = np.zeros((n_best_tot, n_features, n_features))
490
+ n_best_sub = 2
491
+ for i in range(n_subsets):
492
+ low_bound = i * n_samples_subsets
493
+ high_bound = low_bound + n_samples_subsets
494
+ current_subset = X[samples_shuffle[low_bound:high_bound]]
495
+ best_locations_sub, best_covariances_sub, _, _ = select_candidates(
496
+ current_subset,
497
+ h_subset,
498
+ n_trials,
499
+ select=n_best_sub,
500
+ n_iter=2,
501
+ cov_computation_method=cov_computation_method,
502
+ random_state=random_state,
503
+ )
504
+ subset_slice = np.arange(i * n_best_sub, (i + 1) * n_best_sub)
505
+ all_best_locations[subset_slice] = best_locations_sub
506
+ all_best_covariances[subset_slice] = best_covariances_sub
507
+ # 2. Pool the candidate supports into a merged set
508
+ # (possibly the full dataset)
509
+ n_samples_merged = min(1500, n_samples)
510
+ h_merged = int(np.ceil(n_samples_merged * (n_support / float(n_samples))))
511
+ if n_samples > 1500:
512
+ n_best_merged = 10
513
+ else:
514
+ n_best_merged = 1
515
+ # find the best couples (location, covariance) on the merged set
516
+ selection = random_state.permutation(n_samples)[:n_samples_merged]
517
+ locations_merged, covariances_merged, supports_merged, d = select_candidates(
518
+ X[selection],
519
+ h_merged,
520
+ n_trials=(all_best_locations, all_best_covariances),
521
+ select=n_best_merged,
522
+ cov_computation_method=cov_computation_method,
523
+ random_state=random_state,
524
+ )
525
+ # 3. Finally get the overall best (locations, covariance) couple
526
+ if n_samples < 1500:
527
+ # directly get the best couple (location, covariance)
528
+ location = locations_merged[0]
529
+ covariance = covariances_merged[0]
530
+ support = np.zeros(n_samples, dtype=bool)
531
+ dist = np.zeros(n_samples)
532
+ support[selection] = supports_merged[0]
533
+ dist[selection] = d[0]
534
+ else:
535
+ # select the best couple on the full dataset
536
+ locations_full, covariances_full, supports_full, d = select_candidates(
537
+ X,
538
+ n_support,
539
+ n_trials=(locations_merged, covariances_merged),
540
+ select=1,
541
+ cov_computation_method=cov_computation_method,
542
+ random_state=random_state,
543
+ )
544
+ location = locations_full[0]
545
+ covariance = covariances_full[0]
546
+ support = supports_full[0]
547
+ dist = d[0]
548
+ elif n_features > 1:
549
+ # 1. Find the 10 best couples (location, covariance)
550
+ # considering two iterations
551
+ n_trials = 30
552
+ n_best = 10
553
+ locations_best, covariances_best, _, _ = select_candidates(
554
+ X,
555
+ n_support,
556
+ n_trials=n_trials,
557
+ select=n_best,
558
+ n_iter=2,
559
+ cov_computation_method=cov_computation_method,
560
+ random_state=random_state,
561
+ )
562
+ # 2. Select the best couple on the full dataset amongst the 10
563
+ locations_full, covariances_full, supports_full, d = select_candidates(
564
+ X,
565
+ n_support,
566
+ n_trials=(locations_best, covariances_best),
567
+ select=1,
568
+ cov_computation_method=cov_computation_method,
569
+ random_state=random_state,
570
+ )
571
+ location = locations_full[0]
572
+ covariance = covariances_full[0]
573
+ support = supports_full[0]
574
+ dist = d[0]
575
+
576
+ return location, covariance, support, dist
577
+
578
+
579
+ class MinCovDet(EmpiricalCovariance):
580
+ """Minimum Covariance Determinant (MCD): robust estimator of covariance.
581
+
582
+ The Minimum Covariance Determinant covariance estimator is to be applied
583
+ on Gaussian-distributed data, but could still be relevant on data
584
+ drawn from a unimodal, symmetric distribution. It is not meant to be used
585
+ with multi-modal data (the algorithm used to fit a MinCovDet object is
586
+ likely to fail in such a case).
587
+ One should consider projection pursuit methods to deal with multi-modal
588
+ datasets.
589
+
590
+ Read more in the :ref:`User Guide <robust_covariance>`.
591
+
592
+ Parameters
593
+ ----------
594
+ store_precision : bool, default=True
595
+ Specify if the estimated precision is stored.
596
+
597
+ assume_centered : bool, default=False
598
+ If True, the support of the robust location and the covariance
599
+ estimates is computed, and a covariance estimate is recomputed from
600
+ it, without centering the data.
601
+ Useful to work with data whose mean is significantly equal to
602
+ zero but is not exactly zero.
603
+ If False, the robust location and covariance are directly computed
604
+ with the FastMCD algorithm without additional treatment.
605
+
606
+ support_fraction : float, default=None
607
+ The proportion of points to be included in the support of the raw
608
+ MCD estimate. Default is None, which implies that the minimum
609
+ value of support_fraction will be used within the algorithm:
610
+ `(n_samples + n_features + 1) / 2 * n_samples`. The parameter must be
611
+ in the range (0, 1].
612
+
613
+ random_state : int, RandomState instance or None, default=None
614
+ Determines the pseudo random number generator for shuffling the data.
615
+ Pass an int for reproducible results across multiple function calls.
616
+ See :term:`Glossary <random_state>`.
617
+
618
+ Attributes
619
+ ----------
620
+ raw_location_ : ndarray of shape (n_features,)
621
+ The raw robust estimated location before correction and re-weighting.
622
+
623
+ raw_covariance_ : ndarray of shape (n_features, n_features)
624
+ The raw robust estimated covariance before correction and re-weighting.
625
+
626
+ raw_support_ : ndarray of shape (n_samples,)
627
+ A mask of the observations that have been used to compute
628
+ the raw robust estimates of location and shape, before correction
629
+ and re-weighting.
630
+
631
+ location_ : ndarray of shape (n_features,)
632
+ Estimated robust location.
633
+
634
+ covariance_ : ndarray of shape (n_features, n_features)
635
+ Estimated robust covariance matrix.
636
+
637
+ precision_ : ndarray of shape (n_features, n_features)
638
+ Estimated pseudo inverse matrix.
639
+ (stored only if store_precision is True)
640
+
641
+ support_ : ndarray of shape (n_samples,)
642
+ A mask of the observations that have been used to compute
643
+ the robust estimates of location and shape.
644
+
645
+ dist_ : ndarray of shape (n_samples,)
646
+ Mahalanobis distances of the training set (on which :meth:`fit` is
647
+ called) observations.
648
+
649
+ n_features_in_ : int
650
+ Number of features seen during :term:`fit`.
651
+
652
+ .. versionadded:: 0.24
653
+
654
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
655
+ Names of features seen during :term:`fit`. Defined only when `X`
656
+ has feature names that are all strings.
657
+
658
+ .. versionadded:: 1.0
659
+
660
+ See Also
661
+ --------
662
+ EllipticEnvelope : An object for detecting outliers in
663
+ a Gaussian distributed dataset.
664
+ EmpiricalCovariance : Maximum likelihood covariance estimator.
665
+ GraphicalLasso : Sparse inverse covariance estimation
666
+ with an l1-penalized estimator.
667
+ GraphicalLassoCV : Sparse inverse covariance with cross-validated
668
+ choice of the l1 penalty.
669
+ LedoitWolf : LedoitWolf Estimator.
670
+ OAS : Oracle Approximating Shrinkage Estimator.
671
+ ShrunkCovariance : Covariance estimator with shrinkage.
672
+
673
+ References
674
+ ----------
675
+
676
+ .. [Rouseeuw1984] P. J. Rousseeuw. Least median of squares regression.
677
+ J. Am Stat Ass, 79:871, 1984.
678
+ .. [Rousseeuw] A Fast Algorithm for the Minimum Covariance Determinant
679
+ Estimator, 1999, American Statistical Association and the American
680
+ Society for Quality, TECHNOMETRICS
681
+ .. [ButlerDavies] R. W. Butler, P. L. Davies and M. Jhun,
682
+ Asymptotics For The Minimum Covariance Determinant Estimator,
683
+ The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400
684
+
685
+ Examples
686
+ --------
687
+ >>> import numpy as np
688
+ >>> from sklearn.covariance import MinCovDet
689
+ >>> from sklearn.datasets import make_gaussian_quantiles
690
+ >>> real_cov = np.array([[.8, .3],
691
+ ... [.3, .4]])
692
+ >>> rng = np.random.RandomState(0)
693
+ >>> X = rng.multivariate_normal(mean=[0, 0],
694
+ ... cov=real_cov,
695
+ ... size=500)
696
+ >>> cov = MinCovDet(random_state=0).fit(X)
697
+ >>> cov.covariance_
698
+ array([[0.7411..., 0.2535...],
699
+ [0.2535..., 0.3053...]])
700
+ >>> cov.location_
701
+ array([0.0813... , 0.0427...])
702
+ """
703
+
704
+ _parameter_constraints: dict = {
705
+ **EmpiricalCovariance._parameter_constraints,
706
+ "support_fraction": [Interval(Real, 0, 1, closed="right"), None],
707
+ "random_state": ["random_state"],
708
+ }
709
+ _nonrobust_covariance = staticmethod(empirical_covariance)
710
+
711
+ def __init__(
712
+ self,
713
+ *,
714
+ store_precision=True,
715
+ assume_centered=False,
716
+ support_fraction=None,
717
+ random_state=None,
718
+ ):
719
+ self.store_precision = store_precision
720
+ self.assume_centered = assume_centered
721
+ self.support_fraction = support_fraction
722
+ self.random_state = random_state
723
+
724
+ @_fit_context(prefer_skip_nested_validation=True)
725
+ def fit(self, X, y=None):
726
+ """Fit a Minimum Covariance Determinant with the FastMCD algorithm.
727
+
728
+ Parameters
729
+ ----------
730
+ X : array-like of shape (n_samples, n_features)
731
+ Training data, where `n_samples` is the number of samples
732
+ and `n_features` is the number of features.
733
+
734
+ y : Ignored
735
+ Not used, present for API consistency by convention.
736
+
737
+ Returns
738
+ -------
739
+ self : object
740
+ Returns the instance itself.
741
+ """
742
+ X = self._validate_data(X, ensure_min_samples=2, estimator="MinCovDet")
743
+ random_state = check_random_state(self.random_state)
744
+ n_samples, n_features = X.shape
745
+ # check that the empirical covariance is full rank
746
+ if (linalg.svdvals(np.dot(X.T, X)) > 1e-8).sum() != n_features:
747
+ warnings.warn(
748
+ "The covariance matrix associated to your dataset is not full rank"
749
+ )
750
+ # compute and store raw estimates
751
+ raw_location, raw_covariance, raw_support, raw_dist = fast_mcd(
752
+ X,
753
+ support_fraction=self.support_fraction,
754
+ cov_computation_method=self._nonrobust_covariance,
755
+ random_state=random_state,
756
+ )
757
+ if self.assume_centered:
758
+ raw_location = np.zeros(n_features)
759
+ raw_covariance = self._nonrobust_covariance(
760
+ X[raw_support], assume_centered=True
761
+ )
762
+ # get precision matrix in an optimized way
763
+ precision = linalg.pinvh(raw_covariance)
764
+ raw_dist = np.sum(np.dot(X, precision) * X, 1)
765
+ self.raw_location_ = raw_location
766
+ self.raw_covariance_ = raw_covariance
767
+ self.raw_support_ = raw_support
768
+ self.location_ = raw_location
769
+ self.support_ = raw_support
770
+ self.dist_ = raw_dist
771
+ # obtain consistency at normal models
772
+ self.correct_covariance(X)
773
+ # re-weight estimator
774
+ self.reweight_covariance(X)
775
+
776
+ return self
777
+
778
+ def correct_covariance(self, data):
779
+ """Apply a correction to raw Minimum Covariance Determinant estimates.
780
+
781
+ Correction using the empirical correction factor suggested
782
+ by Rousseeuw and Van Driessen in [RVD]_.
783
+
784
+ Parameters
785
+ ----------
786
+ data : array-like of shape (n_samples, n_features)
787
+ The data matrix, with p features and n samples.
788
+ The data set must be the one which was used to compute
789
+ the raw estimates.
790
+
791
+ Returns
792
+ -------
793
+ covariance_corrected : ndarray of shape (n_features, n_features)
794
+ Corrected robust covariance estimate.
795
+
796
+ References
797
+ ----------
798
+
799
+ .. [RVD] A Fast Algorithm for the Minimum Covariance
800
+ Determinant Estimator, 1999, American Statistical Association
801
+ and the American Society for Quality, TECHNOMETRICS
802
+ """
803
+
804
+ # Check that the covariance of the support data is not equal to 0.
805
+ # Otherwise self.dist_ = 0 and thus correction = 0.
806
+ n_samples = len(self.dist_)
807
+ n_support = np.sum(self.support_)
808
+ if n_support < n_samples and np.allclose(self.raw_covariance_, 0):
809
+ raise ValueError(
810
+ "The covariance matrix of the support data "
811
+ "is equal to 0, try to increase support_fraction"
812
+ )
813
+ correction = np.median(self.dist_) / chi2(data.shape[1]).isf(0.5)
814
+ covariance_corrected = self.raw_covariance_ * correction
815
+ self.dist_ /= correction
816
+ return covariance_corrected
817
+
818
+ def reweight_covariance(self, data):
819
+ """Re-weight raw Minimum Covariance Determinant estimates.
820
+
821
+ Re-weight observations using Rousseeuw's method (equivalent to
822
+ deleting outlying observations from the data set before
823
+ computing location and covariance estimates) described
824
+ in [RVDriessen]_.
825
+
826
+ Parameters
827
+ ----------
828
+ data : array-like of shape (n_samples, n_features)
829
+ The data matrix, with p features and n samples.
830
+ The data set must be the one which was used to compute
831
+ the raw estimates.
832
+
833
+ Returns
834
+ -------
835
+ location_reweighted : ndarray of shape (n_features,)
836
+ Re-weighted robust location estimate.
837
+
838
+ covariance_reweighted : ndarray of shape (n_features, n_features)
839
+ Re-weighted robust covariance estimate.
840
+
841
+ support_reweighted : ndarray of shape (n_samples,), dtype=bool
842
+ A mask of the observations that have been used to compute
843
+ the re-weighted robust location and covariance estimates.
844
+
845
+ References
846
+ ----------
847
+
848
+ .. [RVDriessen] A Fast Algorithm for the Minimum Covariance
849
+ Determinant Estimator, 1999, American Statistical Association
850
+ and the American Society for Quality, TECHNOMETRICS
851
+ """
852
+ n_samples, n_features = data.shape
853
+ mask = self.dist_ < chi2(n_features).isf(0.025)
854
+ if self.assume_centered:
855
+ location_reweighted = np.zeros(n_features)
856
+ else:
857
+ location_reweighted = data[mask].mean(0)
858
+ covariance_reweighted = self._nonrobust_covariance(
859
+ data[mask], assume_centered=self.assume_centered
860
+ )
861
+ support_reweighted = np.zeros(n_samples, dtype=bool)
862
+ support_reweighted[mask] = True
863
+ self._set_covariance(covariance_reweighted)
864
+ self.location_ = location_reweighted
865
+ self.support_ = support_reweighted
866
+ X_centered = data - self.location_
867
+ self.dist_ = np.sum(np.dot(X_centered, self.get_precision()) * X_centered, 1)
868
+ return location_reweighted, covariance_reweighted, support_reweighted
env-llmeval/lib/python3.10/site-packages/sklearn/covariance/_shrunk_covariance.py ADDED
@@ -0,0 +1,816 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Covariance estimators using shrinkage.
3
+
4
+ Shrinkage corresponds to regularising `cov` using a convex combination:
5
+ shrunk_cov = (1-shrinkage)*cov + shrinkage*structured_estimate.
6
+
7
+ """
8
+
9
+ # Author: Alexandre Gramfort <[email protected]>
10
+ # Gael Varoquaux <[email protected]>
11
+ # Virgile Fritsch <[email protected]>
12
+ #
13
+ # License: BSD 3 clause
14
+
15
+ # avoid division truncation
16
+ import warnings
17
+ from numbers import Integral, Real
18
+
19
+ import numpy as np
20
+
21
+ from ..base import _fit_context
22
+ from ..utils import check_array
23
+ from ..utils._param_validation import Interval, validate_params
24
+ from . import EmpiricalCovariance, empirical_covariance
25
+
26
+
27
+ def _ledoit_wolf(X, *, assume_centered, block_size):
28
+ """Estimate the shrunk Ledoit-Wolf covariance matrix."""
29
+ # for only one feature, the result is the same whatever the shrinkage
30
+ if len(X.shape) == 2 and X.shape[1] == 1:
31
+ if not assume_centered:
32
+ X = X - X.mean()
33
+ return np.atleast_2d((X**2).mean()), 0.0
34
+ n_features = X.shape[1]
35
+
36
+ # get Ledoit-Wolf shrinkage
37
+ shrinkage = ledoit_wolf_shrinkage(
38
+ X, assume_centered=assume_centered, block_size=block_size
39
+ )
40
+ emp_cov = empirical_covariance(X, assume_centered=assume_centered)
41
+ mu = np.sum(np.trace(emp_cov)) / n_features
42
+ shrunk_cov = (1.0 - shrinkage) * emp_cov
43
+ shrunk_cov.flat[:: n_features + 1] += shrinkage * mu
44
+
45
+ return shrunk_cov, shrinkage
46
+
47
+
48
+ def _oas(X, *, assume_centered=False):
49
+ """Estimate covariance with the Oracle Approximating Shrinkage algorithm.
50
+
51
+ The formulation is based on [1]_.
52
+ [1] "Shrinkage algorithms for MMSE covariance estimation.",
53
+ Chen, Y., Wiesel, A., Eldar, Y. C., & Hero, A. O.
54
+ IEEE Transactions on Signal Processing, 58(10), 5016-5029, 2010.
55
+ https://arxiv.org/pdf/0907.4698.pdf
56
+ """
57
+ if len(X.shape) == 2 and X.shape[1] == 1:
58
+ # for only one feature, the result is the same whatever the shrinkage
59
+ if not assume_centered:
60
+ X = X - X.mean()
61
+ return np.atleast_2d((X**2).mean()), 0.0
62
+
63
+ n_samples, n_features = X.shape
64
+
65
+ emp_cov = empirical_covariance(X, assume_centered=assume_centered)
66
+
67
+ # The shrinkage is defined as:
68
+ # shrinkage = min(
69
+ # trace(S @ S.T) + trace(S)**2) / ((n + 1) (trace(S @ S.T) - trace(S)**2 / p), 1
70
+ # )
71
+ # where n and p are n_samples and n_features, respectively (cf. Eq. 23 in [1]).
72
+ # The factor 2 / p is omitted since it does not impact the value of the estimator
73
+ # for large p.
74
+
75
+ # Instead of computing trace(S)**2, we can compute the average of the squared
76
+ # elements of S that is equal to trace(S)**2 / p**2.
77
+ # See the definition of the Frobenius norm:
78
+ # https://en.wikipedia.org/wiki/Matrix_norm#Frobenius_norm
79
+ alpha = np.mean(emp_cov**2)
80
+ mu = np.trace(emp_cov) / n_features
81
+ mu_squared = mu**2
82
+
83
+ # The factor 1 / p**2 will cancel out since it is in both the numerator and
84
+ # denominator
85
+ num = alpha + mu_squared
86
+ den = (n_samples + 1) * (alpha - mu_squared / n_features)
87
+ shrinkage = 1.0 if den == 0 else min(num / den, 1.0)
88
+
89
+ # The shrunk covariance is defined as:
90
+ # (1 - shrinkage) * S + shrinkage * F (cf. Eq. 4 in [1])
91
+ # where S is the empirical covariance and F is the shrinkage target defined as
92
+ # F = trace(S) / n_features * np.identity(n_features) (cf. Eq. 3 in [1])
93
+ shrunk_cov = (1.0 - shrinkage) * emp_cov
94
+ shrunk_cov.flat[:: n_features + 1] += shrinkage * mu
95
+
96
+ return shrunk_cov, shrinkage
97
+
98
+
99
+ ###############################################################################
100
+ # Public API
101
+ # ShrunkCovariance estimator
102
+
103
+
104
+ @validate_params(
105
+ {
106
+ "emp_cov": ["array-like"],
107
+ "shrinkage": [Interval(Real, 0, 1, closed="both")],
108
+ },
109
+ prefer_skip_nested_validation=True,
110
+ )
111
+ def shrunk_covariance(emp_cov, shrinkage=0.1):
112
+ """Calculate covariance matrices shrunk on the diagonal.
113
+
114
+ Read more in the :ref:`User Guide <shrunk_covariance>`.
115
+
116
+ Parameters
117
+ ----------
118
+ emp_cov : array-like of shape (..., n_features, n_features)
119
+ Covariance matrices to be shrunk, at least 2D ndarray.
120
+
121
+ shrinkage : float, default=0.1
122
+ Coefficient in the convex combination used for the computation
123
+ of the shrunk estimate. Range is [0, 1].
124
+
125
+ Returns
126
+ -------
127
+ shrunk_cov : ndarray of shape (..., n_features, n_features)
128
+ Shrunk covariance matrices.
129
+
130
+ Notes
131
+ -----
132
+ The regularized (shrunk) covariance is given by::
133
+
134
+ (1 - shrinkage) * cov + shrinkage * mu * np.identity(n_features)
135
+
136
+ where `mu = trace(cov) / n_features`.
137
+
138
+ Examples
139
+ --------
140
+ >>> import numpy as np
141
+ >>> from sklearn.datasets import make_gaussian_quantiles
142
+ >>> from sklearn.covariance import empirical_covariance, shrunk_covariance
143
+ >>> real_cov = np.array([[.8, .3], [.3, .4]])
144
+ >>> rng = np.random.RandomState(0)
145
+ >>> X = rng.multivariate_normal(mean=[0, 0], cov=real_cov, size=500)
146
+ >>> shrunk_covariance(empirical_covariance(X))
147
+ array([[0.73..., 0.25...],
148
+ [0.25..., 0.41...]])
149
+ """
150
+ emp_cov = check_array(emp_cov, allow_nd=True)
151
+ n_features = emp_cov.shape[-1]
152
+
153
+ shrunk_cov = (1.0 - shrinkage) * emp_cov
154
+ mu = np.trace(emp_cov, axis1=-2, axis2=-1) / n_features
155
+ mu = np.expand_dims(mu, axis=tuple(range(mu.ndim, emp_cov.ndim)))
156
+ shrunk_cov += shrinkage * mu * np.eye(n_features)
157
+
158
+ return shrunk_cov
159
+
160
+
161
+ class ShrunkCovariance(EmpiricalCovariance):
162
+ """Covariance estimator with shrinkage.
163
+
164
+ Read more in the :ref:`User Guide <shrunk_covariance>`.
165
+
166
+ Parameters
167
+ ----------
168
+ store_precision : bool, default=True
169
+ Specify if the estimated precision is stored.
170
+
171
+ assume_centered : bool, default=False
172
+ If True, data will not be centered before computation.
173
+ Useful when working with data whose mean is almost, but not exactly
174
+ zero.
175
+ If False, data will be centered before computation.
176
+
177
+ shrinkage : float, default=0.1
178
+ Coefficient in the convex combination used for the computation
179
+ of the shrunk estimate. Range is [0, 1].
180
+
181
+ Attributes
182
+ ----------
183
+ covariance_ : ndarray of shape (n_features, n_features)
184
+ Estimated covariance matrix
185
+
186
+ location_ : ndarray of shape (n_features,)
187
+ Estimated location, i.e. the estimated mean.
188
+
189
+ precision_ : ndarray of shape (n_features, n_features)
190
+ Estimated pseudo inverse matrix.
191
+ (stored only if store_precision is True)
192
+
193
+ n_features_in_ : int
194
+ Number of features seen during :term:`fit`.
195
+
196
+ .. versionadded:: 0.24
197
+
198
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
199
+ Names of features seen during :term:`fit`. Defined only when `X`
200
+ has feature names that are all strings.
201
+
202
+ .. versionadded:: 1.0
203
+
204
+ See Also
205
+ --------
206
+ EllipticEnvelope : An object for detecting outliers in
207
+ a Gaussian distributed dataset.
208
+ EmpiricalCovariance : Maximum likelihood covariance estimator.
209
+ GraphicalLasso : Sparse inverse covariance estimation
210
+ with an l1-penalized estimator.
211
+ GraphicalLassoCV : Sparse inverse covariance with cross-validated
212
+ choice of the l1 penalty.
213
+ LedoitWolf : LedoitWolf Estimator.
214
+ MinCovDet : Minimum Covariance Determinant
215
+ (robust estimator of covariance).
216
+ OAS : Oracle Approximating Shrinkage Estimator.
217
+
218
+ Notes
219
+ -----
220
+ The regularized covariance is given by:
221
+
222
+ (1 - shrinkage) * cov + shrinkage * mu * np.identity(n_features)
223
+
224
+ where mu = trace(cov) / n_features
225
+
226
+ Examples
227
+ --------
228
+ >>> import numpy as np
229
+ >>> from sklearn.covariance import ShrunkCovariance
230
+ >>> from sklearn.datasets import make_gaussian_quantiles
231
+ >>> real_cov = np.array([[.8, .3],
232
+ ... [.3, .4]])
233
+ >>> rng = np.random.RandomState(0)
234
+ >>> X = rng.multivariate_normal(mean=[0, 0],
235
+ ... cov=real_cov,
236
+ ... size=500)
237
+ >>> cov = ShrunkCovariance().fit(X)
238
+ >>> cov.covariance_
239
+ array([[0.7387..., 0.2536...],
240
+ [0.2536..., 0.4110...]])
241
+ >>> cov.location_
242
+ array([0.0622..., 0.0193...])
243
+ """
244
+
245
+ _parameter_constraints: dict = {
246
+ **EmpiricalCovariance._parameter_constraints,
247
+ "shrinkage": [Interval(Real, 0, 1, closed="both")],
248
+ }
249
+
250
+ def __init__(self, *, store_precision=True, assume_centered=False, shrinkage=0.1):
251
+ super().__init__(
252
+ store_precision=store_precision, assume_centered=assume_centered
253
+ )
254
+ self.shrinkage = shrinkage
255
+
256
+ @_fit_context(prefer_skip_nested_validation=True)
257
+ def fit(self, X, y=None):
258
+ """Fit the shrunk covariance model to X.
259
+
260
+ Parameters
261
+ ----------
262
+ X : array-like of shape (n_samples, n_features)
263
+ Training data, where `n_samples` is the number of samples
264
+ and `n_features` is the number of features.
265
+
266
+ y : Ignored
267
+ Not used, present for API consistency by convention.
268
+
269
+ Returns
270
+ -------
271
+ self : object
272
+ Returns the instance itself.
273
+ """
274
+ X = self._validate_data(X)
275
+ # Not calling the parent object to fit, to avoid a potential
276
+ # matrix inversion when setting the precision
277
+ if self.assume_centered:
278
+ self.location_ = np.zeros(X.shape[1])
279
+ else:
280
+ self.location_ = X.mean(0)
281
+ covariance = empirical_covariance(X, assume_centered=self.assume_centered)
282
+ covariance = shrunk_covariance(covariance, self.shrinkage)
283
+ self._set_covariance(covariance)
284
+
285
+ return self
286
+
287
+
288
+ # Ledoit-Wolf estimator
289
+
290
+
291
+ @validate_params(
292
+ {
293
+ "X": ["array-like"],
294
+ "assume_centered": ["boolean"],
295
+ "block_size": [Interval(Integral, 1, None, closed="left")],
296
+ },
297
+ prefer_skip_nested_validation=True,
298
+ )
299
+ def ledoit_wolf_shrinkage(X, assume_centered=False, block_size=1000):
300
+ """Estimate the shrunk Ledoit-Wolf covariance matrix.
301
+
302
+ Read more in the :ref:`User Guide <shrunk_covariance>`.
303
+
304
+ Parameters
305
+ ----------
306
+ X : array-like of shape (n_samples, n_features)
307
+ Data from which to compute the Ledoit-Wolf shrunk covariance shrinkage.
308
+
309
+ assume_centered : bool, default=False
310
+ If True, data will not be centered before computation.
311
+ Useful to work with data whose mean is significantly equal to
312
+ zero but is not exactly zero.
313
+ If False, data will be centered before computation.
314
+
315
+ block_size : int, default=1000
316
+ Size of blocks into which the covariance matrix will be split.
317
+
318
+ Returns
319
+ -------
320
+ shrinkage : float
321
+ Coefficient in the convex combination used for the computation
322
+ of the shrunk estimate.
323
+
324
+ Notes
325
+ -----
326
+ The regularized (shrunk) covariance is:
327
+
328
+ (1 - shrinkage) * cov + shrinkage * mu * np.identity(n_features)
329
+
330
+ where mu = trace(cov) / n_features
331
+
332
+ Examples
333
+ --------
334
+ >>> import numpy as np
335
+ >>> from sklearn.covariance import ledoit_wolf_shrinkage
336
+ >>> real_cov = np.array([[.4, .2], [.2, .8]])
337
+ >>> rng = np.random.RandomState(0)
338
+ >>> X = rng.multivariate_normal(mean=[0, 0], cov=real_cov, size=50)
339
+ >>> shrinkage_coefficient = ledoit_wolf_shrinkage(X)
340
+ >>> shrinkage_coefficient
341
+ 0.23...
342
+ """
343
+ X = check_array(X)
344
+ # for only one feature, the result is the same whatever the shrinkage
345
+ if len(X.shape) == 2 and X.shape[1] == 1:
346
+ return 0.0
347
+ if X.ndim == 1:
348
+ X = np.reshape(X, (1, -1))
349
+
350
+ if X.shape[0] == 1:
351
+ warnings.warn(
352
+ "Only one sample available. You may want to reshape your data array"
353
+ )
354
+ n_samples, n_features = X.shape
355
+
356
+ # optionally center data
357
+ if not assume_centered:
358
+ X = X - X.mean(0)
359
+
360
+ # A non-blocked version of the computation is present in the tests
361
+ # in tests/test_covariance.py
362
+
363
+ # number of blocks to split the covariance matrix into
364
+ n_splits = int(n_features / block_size)
365
+ X2 = X**2
366
+ emp_cov_trace = np.sum(X2, axis=0) / n_samples
367
+ mu = np.sum(emp_cov_trace) / n_features
368
+ beta_ = 0.0 # sum of the coefficients of <X2.T, X2>
369
+ delta_ = 0.0 # sum of the *squared* coefficients of <X.T, X>
370
+ # starting block computation
371
+ for i in range(n_splits):
372
+ for j in range(n_splits):
373
+ rows = slice(block_size * i, block_size * (i + 1))
374
+ cols = slice(block_size * j, block_size * (j + 1))
375
+ beta_ += np.sum(np.dot(X2.T[rows], X2[:, cols]))
376
+ delta_ += np.sum(np.dot(X.T[rows], X[:, cols]) ** 2)
377
+ rows = slice(block_size * i, block_size * (i + 1))
378
+ beta_ += np.sum(np.dot(X2.T[rows], X2[:, block_size * n_splits :]))
379
+ delta_ += np.sum(np.dot(X.T[rows], X[:, block_size * n_splits :]) ** 2)
380
+ for j in range(n_splits):
381
+ cols = slice(block_size * j, block_size * (j + 1))
382
+ beta_ += np.sum(np.dot(X2.T[block_size * n_splits :], X2[:, cols]))
383
+ delta_ += np.sum(np.dot(X.T[block_size * n_splits :], X[:, cols]) ** 2)
384
+ delta_ += np.sum(
385
+ np.dot(X.T[block_size * n_splits :], X[:, block_size * n_splits :]) ** 2
386
+ )
387
+ delta_ /= n_samples**2
388
+ beta_ += np.sum(
389
+ np.dot(X2.T[block_size * n_splits :], X2[:, block_size * n_splits :])
390
+ )
391
+ # use delta_ to compute beta
392
+ beta = 1.0 / (n_features * n_samples) * (beta_ / n_samples - delta_)
393
+ # delta is the sum of the squared coefficients of (<X.T,X> - mu*Id) / p
394
+ delta = delta_ - 2.0 * mu * emp_cov_trace.sum() + n_features * mu**2
395
+ delta /= n_features
396
+ # get final beta as the min between beta and delta
397
+ # We do this to prevent shrinking more than "1", which would invert
398
+ # the value of covariances
399
+ beta = min(beta, delta)
400
+ # finally get shrinkage
401
+ shrinkage = 0 if beta == 0 else beta / delta
402
+ return shrinkage
403
+
404
+
405
+ @validate_params(
406
+ {"X": ["array-like"]},
407
+ prefer_skip_nested_validation=False,
408
+ )
409
+ def ledoit_wolf(X, *, assume_centered=False, block_size=1000):
410
+ """Estimate the shrunk Ledoit-Wolf covariance matrix.
411
+
412
+ Read more in the :ref:`User Guide <shrunk_covariance>`.
413
+
414
+ Parameters
415
+ ----------
416
+ X : array-like of shape (n_samples, n_features)
417
+ Data from which to compute the covariance estimate.
418
+
419
+ assume_centered : bool, default=False
420
+ If True, data will not be centered before computation.
421
+ Useful to work with data whose mean is significantly equal to
422
+ zero but is not exactly zero.
423
+ If False, data will be centered before computation.
424
+
425
+ block_size : int, default=1000
426
+ Size of blocks into which the covariance matrix will be split.
427
+ This is purely a memory optimization and does not affect results.
428
+
429
+ Returns
430
+ -------
431
+ shrunk_cov : ndarray of shape (n_features, n_features)
432
+ Shrunk covariance.
433
+
434
+ shrinkage : float
435
+ Coefficient in the convex combination used for the computation
436
+ of the shrunk estimate.
437
+
438
+ Notes
439
+ -----
440
+ The regularized (shrunk) covariance is:
441
+
442
+ (1 - shrinkage) * cov + shrinkage * mu * np.identity(n_features)
443
+
444
+ where mu = trace(cov) / n_features
445
+
446
+ Examples
447
+ --------
448
+ >>> import numpy as np
449
+ >>> from sklearn.covariance import empirical_covariance, ledoit_wolf
450
+ >>> real_cov = np.array([[.4, .2], [.2, .8]])
451
+ >>> rng = np.random.RandomState(0)
452
+ >>> X = rng.multivariate_normal(mean=[0, 0], cov=real_cov, size=50)
453
+ >>> covariance, shrinkage = ledoit_wolf(X)
454
+ >>> covariance
455
+ array([[0.44..., 0.16...],
456
+ [0.16..., 0.80...]])
457
+ >>> shrinkage
458
+ 0.23...
459
+ """
460
+ estimator = LedoitWolf(
461
+ assume_centered=assume_centered,
462
+ block_size=block_size,
463
+ store_precision=False,
464
+ ).fit(X)
465
+
466
+ return estimator.covariance_, estimator.shrinkage_
467
+
468
+
469
+ class LedoitWolf(EmpiricalCovariance):
470
+ """LedoitWolf Estimator.
471
+
472
+ Ledoit-Wolf is a particular form of shrinkage, where the shrinkage
473
+ coefficient is computed using O. Ledoit and M. Wolf's formula as
474
+ described in "A Well-Conditioned Estimator for Large-Dimensional
475
+ Covariance Matrices", Ledoit and Wolf, Journal of Multivariate
476
+ Analysis, Volume 88, Issue 2, February 2004, pages 365-411.
477
+
478
+ Read more in the :ref:`User Guide <shrunk_covariance>`.
479
+
480
+ Parameters
481
+ ----------
482
+ store_precision : bool, default=True
483
+ Specify if the estimated precision is stored.
484
+
485
+ assume_centered : bool, default=False
486
+ If True, data will not be centered before computation.
487
+ Useful when working with data whose mean is almost, but not exactly
488
+ zero.
489
+ If False (default), data will be centered before computation.
490
+
491
+ block_size : int, default=1000
492
+ Size of blocks into which the covariance matrix will be split
493
+ during its Ledoit-Wolf estimation. This is purely a memory
494
+ optimization and does not affect results.
495
+
496
+ Attributes
497
+ ----------
498
+ covariance_ : ndarray of shape (n_features, n_features)
499
+ Estimated covariance matrix.
500
+
501
+ location_ : ndarray of shape (n_features,)
502
+ Estimated location, i.e. the estimated mean.
503
+
504
+ precision_ : ndarray of shape (n_features, n_features)
505
+ Estimated pseudo inverse matrix.
506
+ (stored only if store_precision is True)
507
+
508
+ shrinkage_ : float
509
+ Coefficient in the convex combination used for the computation
510
+ of the shrunk estimate. Range is [0, 1].
511
+
512
+ n_features_in_ : int
513
+ Number of features seen during :term:`fit`.
514
+
515
+ .. versionadded:: 0.24
516
+
517
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
518
+ Names of features seen during :term:`fit`. Defined only when `X`
519
+ has feature names that are all strings.
520
+
521
+ .. versionadded:: 1.0
522
+
523
+ See Also
524
+ --------
525
+ EllipticEnvelope : An object for detecting outliers in
526
+ a Gaussian distributed dataset.
527
+ EmpiricalCovariance : Maximum likelihood covariance estimator.
528
+ GraphicalLasso : Sparse inverse covariance estimation
529
+ with an l1-penalized estimator.
530
+ GraphicalLassoCV : Sparse inverse covariance with cross-validated
531
+ choice of the l1 penalty.
532
+ MinCovDet : Minimum Covariance Determinant
533
+ (robust estimator of covariance).
534
+ OAS : Oracle Approximating Shrinkage Estimator.
535
+ ShrunkCovariance : Covariance estimator with shrinkage.
536
+
537
+ Notes
538
+ -----
539
+ The regularised covariance is:
540
+
541
+ (1 - shrinkage) * cov + shrinkage * mu * np.identity(n_features)
542
+
543
+ where mu = trace(cov) / n_features
544
+ and shrinkage is given by the Ledoit and Wolf formula (see References)
545
+
546
+ References
547
+ ----------
548
+ "A Well-Conditioned Estimator for Large-Dimensional Covariance Matrices",
549
+ Ledoit and Wolf, Journal of Multivariate Analysis, Volume 88, Issue 2,
550
+ February 2004, pages 365-411.
551
+
552
+ Examples
553
+ --------
554
+ >>> import numpy as np
555
+ >>> from sklearn.covariance import LedoitWolf
556
+ >>> real_cov = np.array([[.4, .2],
557
+ ... [.2, .8]])
558
+ >>> np.random.seed(0)
559
+ >>> X = np.random.multivariate_normal(mean=[0, 0],
560
+ ... cov=real_cov,
561
+ ... size=50)
562
+ >>> cov = LedoitWolf().fit(X)
563
+ >>> cov.covariance_
564
+ array([[0.4406..., 0.1616...],
565
+ [0.1616..., 0.8022...]])
566
+ >>> cov.location_
567
+ array([ 0.0595... , -0.0075...])
568
+ """
569
+
570
+ _parameter_constraints: dict = {
571
+ **EmpiricalCovariance._parameter_constraints,
572
+ "block_size": [Interval(Integral, 1, None, closed="left")],
573
+ }
574
+
575
+ def __init__(self, *, store_precision=True, assume_centered=False, block_size=1000):
576
+ super().__init__(
577
+ store_precision=store_precision, assume_centered=assume_centered
578
+ )
579
+ self.block_size = block_size
580
+
581
+ @_fit_context(prefer_skip_nested_validation=True)
582
+ def fit(self, X, y=None):
583
+ """Fit the Ledoit-Wolf shrunk covariance model to X.
584
+
585
+ Parameters
586
+ ----------
587
+ X : array-like of shape (n_samples, n_features)
588
+ Training data, where `n_samples` is the number of samples
589
+ and `n_features` is the number of features.
590
+ y : Ignored
591
+ Not used, present for API consistency by convention.
592
+
593
+ Returns
594
+ -------
595
+ self : object
596
+ Returns the instance itself.
597
+ """
598
+ # Not calling the parent object to fit, to avoid computing the
599
+ # covariance matrix (and potentially the precision)
600
+ X = self._validate_data(X)
601
+ if self.assume_centered:
602
+ self.location_ = np.zeros(X.shape[1])
603
+ else:
604
+ self.location_ = X.mean(0)
605
+ covariance, shrinkage = _ledoit_wolf(
606
+ X - self.location_, assume_centered=True, block_size=self.block_size
607
+ )
608
+ self.shrinkage_ = shrinkage
609
+ self._set_covariance(covariance)
610
+
611
+ return self
612
+
613
+
614
+ # OAS estimator
615
+ @validate_params(
616
+ {"X": ["array-like"]},
617
+ prefer_skip_nested_validation=False,
618
+ )
619
+ def oas(X, *, assume_centered=False):
620
+ """Estimate covariance with the Oracle Approximating Shrinkage as proposed in [1]_.
621
+
622
+ Read more in the :ref:`User Guide <shrunk_covariance>`.
623
+
624
+ Parameters
625
+ ----------
626
+ X : array-like of shape (n_samples, n_features)
627
+ Data from which to compute the covariance estimate.
628
+
629
+ assume_centered : bool, default=False
630
+ If True, data will not be centered before computation.
631
+ Useful to work with data whose mean is significantly equal to
632
+ zero but is not exactly zero.
633
+ If False, data will be centered before computation.
634
+
635
+ Returns
636
+ -------
637
+ shrunk_cov : array-like of shape (n_features, n_features)
638
+ Shrunk covariance.
639
+
640
+ shrinkage : float
641
+ Coefficient in the convex combination used for the computation
642
+ of the shrunk estimate.
643
+
644
+ Notes
645
+ -----
646
+ The regularised covariance is:
647
+
648
+ (1 - shrinkage) * cov + shrinkage * mu * np.identity(n_features),
649
+
650
+ where mu = trace(cov) / n_features and shrinkage is given by the OAS formula
651
+ (see [1]_).
652
+
653
+ The shrinkage formulation implemented here differs from Eq. 23 in [1]_. In
654
+ the original article, formula (23) states that 2/p (p being the number of
655
+ features) is multiplied by Trace(cov*cov) in both the numerator and
656
+ denominator, but this operation is omitted because for a large p, the value
657
+ of 2/p is so small that it doesn't affect the value of the estimator.
658
+
659
+ References
660
+ ----------
661
+ .. [1] :arxiv:`"Shrinkage algorithms for MMSE covariance estimation.",
662
+ Chen, Y., Wiesel, A., Eldar, Y. C., & Hero, A. O.
663
+ IEEE Transactions on Signal Processing, 58(10), 5016-5029, 2010.
664
+ <0907.4698>`
665
+
666
+ Examples
667
+ --------
668
+ >>> import numpy as np
669
+ >>> from sklearn.covariance import oas
670
+ >>> rng = np.random.RandomState(0)
671
+ >>> real_cov = [[.8, .3], [.3, .4]]
672
+ >>> X = rng.multivariate_normal(mean=[0, 0], cov=real_cov, size=500)
673
+ >>> shrunk_cov, shrinkage = oas(X)
674
+ >>> shrunk_cov
675
+ array([[0.7533..., 0.2763...],
676
+ [0.2763..., 0.3964...]])
677
+ >>> shrinkage
678
+ 0.0195...
679
+ """
680
+ estimator = OAS(
681
+ assume_centered=assume_centered,
682
+ ).fit(X)
683
+ return estimator.covariance_, estimator.shrinkage_
684
+
685
+
686
+ class OAS(EmpiricalCovariance):
687
+ """Oracle Approximating Shrinkage Estimator as proposed in [1]_.
688
+
689
+ Read more in the :ref:`User Guide <shrunk_covariance>`.
690
+
691
+ Parameters
692
+ ----------
693
+ store_precision : bool, default=True
694
+ Specify if the estimated precision is stored.
695
+
696
+ assume_centered : bool, default=False
697
+ If True, data will not be centered before computation.
698
+ Useful when working with data whose mean is almost, but not exactly
699
+ zero.
700
+ If False (default), data will be centered before computation.
701
+
702
+ Attributes
703
+ ----------
704
+ covariance_ : ndarray of shape (n_features, n_features)
705
+ Estimated covariance matrix.
706
+
707
+ location_ : ndarray of shape (n_features,)
708
+ Estimated location, i.e. the estimated mean.
709
+
710
+ precision_ : ndarray of shape (n_features, n_features)
711
+ Estimated pseudo inverse matrix.
712
+ (stored only if store_precision is True)
713
+
714
+ shrinkage_ : float
715
+ coefficient in the convex combination used for the computation
716
+ of the shrunk estimate. Range is [0, 1].
717
+
718
+ n_features_in_ : int
719
+ Number of features seen during :term:`fit`.
720
+
721
+ .. versionadded:: 0.24
722
+
723
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
724
+ Names of features seen during :term:`fit`. Defined only when `X`
725
+ has feature names that are all strings.
726
+
727
+ .. versionadded:: 1.0
728
+
729
+ See Also
730
+ --------
731
+ EllipticEnvelope : An object for detecting outliers in
732
+ a Gaussian distributed dataset.
733
+ EmpiricalCovariance : Maximum likelihood covariance estimator.
734
+ GraphicalLasso : Sparse inverse covariance estimation
735
+ with an l1-penalized estimator.
736
+ GraphicalLassoCV : Sparse inverse covariance with cross-validated
737
+ choice of the l1 penalty.
738
+ LedoitWolf : LedoitWolf Estimator.
739
+ MinCovDet : Minimum Covariance Determinant
740
+ (robust estimator of covariance).
741
+ ShrunkCovariance : Covariance estimator with shrinkage.
742
+
743
+ Notes
744
+ -----
745
+ The regularised covariance is:
746
+
747
+ (1 - shrinkage) * cov + shrinkage * mu * np.identity(n_features),
748
+
749
+ where mu = trace(cov) / n_features and shrinkage is given by the OAS formula
750
+ (see [1]_).
751
+
752
+ The shrinkage formulation implemented here differs from Eq. 23 in [1]_. In
753
+ the original article, formula (23) states that 2/p (p being the number of
754
+ features) is multiplied by Trace(cov*cov) in both the numerator and
755
+ denominator, but this operation is omitted because for a large p, the value
756
+ of 2/p is so small that it doesn't affect the value of the estimator.
757
+
758
+ References
759
+ ----------
760
+ .. [1] :arxiv:`"Shrinkage algorithms for MMSE covariance estimation.",
761
+ Chen, Y., Wiesel, A., Eldar, Y. C., & Hero, A. O.
762
+ IEEE Transactions on Signal Processing, 58(10), 5016-5029, 2010.
763
+ <0907.4698>`
764
+
765
+ Examples
766
+ --------
767
+ >>> import numpy as np
768
+ >>> from sklearn.covariance import OAS
769
+ >>> from sklearn.datasets import make_gaussian_quantiles
770
+ >>> real_cov = np.array([[.8, .3],
771
+ ... [.3, .4]])
772
+ >>> rng = np.random.RandomState(0)
773
+ >>> X = rng.multivariate_normal(mean=[0, 0],
774
+ ... cov=real_cov,
775
+ ... size=500)
776
+ >>> oas = OAS().fit(X)
777
+ >>> oas.covariance_
778
+ array([[0.7533..., 0.2763...],
779
+ [0.2763..., 0.3964...]])
780
+ >>> oas.precision_
781
+ array([[ 1.7833..., -1.2431... ],
782
+ [-1.2431..., 3.3889...]])
783
+ >>> oas.shrinkage_
784
+ 0.0195...
785
+ """
786
+
787
+ @_fit_context(prefer_skip_nested_validation=True)
788
+ def fit(self, X, y=None):
789
+ """Fit the Oracle Approximating Shrinkage covariance model to X.
790
+
791
+ Parameters
792
+ ----------
793
+ X : array-like of shape (n_samples, n_features)
794
+ Training data, where `n_samples` is the number of samples
795
+ and `n_features` is the number of features.
796
+ y : Ignored
797
+ Not used, present for API consistency by convention.
798
+
799
+ Returns
800
+ -------
801
+ self : object
802
+ Returns the instance itself.
803
+ """
804
+ X = self._validate_data(X)
805
+ # Not calling the parent object to fit, to avoid computing the
806
+ # covariance matrix (and potentially the precision)
807
+ if self.assume_centered:
808
+ self.location_ = np.zeros(X.shape[1])
809
+ else:
810
+ self.location_ = X.mean(0)
811
+
812
+ covariance, shrinkage = _oas(X - self.location_, assume_centered=True)
813
+ self.shrinkage_ = shrinkage
814
+ self._set_covariance(covariance)
815
+
816
+ return self
env-llmeval/lib/python3.10/site-packages/sklearn/experimental/__init__.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ """
2
+ The :mod:`sklearn.experimental` module provides importable modules that enable
3
+ the use of experimental features or estimators.
4
+
5
+ The features and estimators that are experimental aren't subject to
6
+ deprecation cycles. Use them at your own risks!
7
+ """
env-llmeval/lib/python3.10/site-packages/sklearn/experimental/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (442 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/experimental/__pycache__/enable_halving_search_cv.cpython-310.pyc ADDED
Binary file (1.24 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/experimental/__pycache__/enable_hist_gradient_boosting.cpython-310.pyc ADDED
Binary file (830 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/experimental/__pycache__/enable_iterative_imputer.cpython-310.pyc ADDED
Binary file (824 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/experimental/enable_halving_search_cv.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Enables Successive Halving search-estimators
2
+
3
+ The API and results of these estimators might change without any deprecation
4
+ cycle.
5
+
6
+ Importing this file dynamically sets the
7
+ :class:`~sklearn.model_selection.HalvingRandomSearchCV` and
8
+ :class:`~sklearn.model_selection.HalvingGridSearchCV` as attributes of the
9
+ `model_selection` module::
10
+
11
+ >>> # explicitly require this experimental feature
12
+ >>> from sklearn.experimental import enable_halving_search_cv # noqa
13
+ >>> # now you can import normally from model_selection
14
+ >>> from sklearn.model_selection import HalvingRandomSearchCV
15
+ >>> from sklearn.model_selection import HalvingGridSearchCV
16
+
17
+
18
+ The ``# noqa`` comment comment can be removed: it just tells linters like
19
+ flake8 to ignore the import, which appears as unused.
20
+ """
21
+
22
+ from .. import model_selection
23
+ from ..model_selection._search_successive_halving import (
24
+ HalvingGridSearchCV,
25
+ HalvingRandomSearchCV,
26
+ )
27
+
28
+ # use settattr to avoid mypy errors when monkeypatching
29
+ setattr(model_selection, "HalvingRandomSearchCV", HalvingRandomSearchCV)
30
+ setattr(model_selection, "HalvingGridSearchCV", HalvingGridSearchCV)
31
+
32
+ model_selection.__all__ += ["HalvingRandomSearchCV", "HalvingGridSearchCV"]
env-llmeval/lib/python3.10/site-packages/sklearn/experimental/enable_hist_gradient_boosting.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """This is now a no-op and can be safely removed from your code.
2
+
3
+ It used to enable the use of
4
+ :class:`~sklearn.ensemble.HistGradientBoostingClassifier` and
5
+ :class:`~sklearn.ensemble.HistGradientBoostingRegressor` when they were still
6
+ :term:`experimental`, but these estimators are now stable and can be imported
7
+ normally from `sklearn.ensemble`.
8
+ """
9
+ # Don't remove this file, we don't want to break users code just because the
10
+ # feature isn't experimental anymore.
11
+
12
+
13
+ import warnings
14
+
15
+ warnings.warn(
16
+ "Since version 1.0, "
17
+ "it is not needed to import enable_hist_gradient_boosting anymore. "
18
+ "HistGradientBoostingClassifier and HistGradientBoostingRegressor are now "
19
+ "stable and can be normally imported from sklearn.ensemble."
20
+ )
env-llmeval/lib/python3.10/site-packages/sklearn/experimental/enable_iterative_imputer.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Enables IterativeImputer
2
+
3
+ The API and results of this estimator might change without any deprecation
4
+ cycle.
5
+
6
+ Importing this file dynamically sets :class:`~sklearn.impute.IterativeImputer`
7
+ as an attribute of the impute module::
8
+
9
+ >>> # explicitly require this experimental feature
10
+ >>> from sklearn.experimental import enable_iterative_imputer # noqa
11
+ >>> # now you can import normally from impute
12
+ >>> from sklearn.impute import IterativeImputer
13
+ """
14
+
15
+ from .. import impute
16
+ from ..impute._iterative import IterativeImputer
17
+
18
+ # use settattr to avoid mypy errors when monkeypatching
19
+ setattr(impute, "IterativeImputer", IterativeImputer)
20
+ impute.__all__ += ["IterativeImputer"]
env-llmeval/lib/python3.10/site-packages/sklearn/experimental/tests/__init__.py ADDED
File without changes
env-llmeval/lib/python3.10/site-packages/sklearn/experimental/tests/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (191 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/experimental/tests/__pycache__/test_enable_hist_gradient_boosting.cpython-310.pyc ADDED
Binary file (957 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/experimental/tests/__pycache__/test_enable_iterative_imputer.cpython-310.pyc ADDED
Binary file (1.4 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/experimental/tests/__pycache__/test_enable_successive_halving.cpython-310.pyc ADDED
Binary file (1.6 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/experimental/tests/test_enable_hist_gradient_boosting.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Tests for making sure experimental imports work as expected."""
2
+
3
+ import textwrap
4
+
5
+ import pytest
6
+
7
+ from sklearn.utils import _IS_WASM
8
+ from sklearn.utils._testing import assert_run_python_script_without_output
9
+
10
+
11
+ @pytest.mark.xfail(_IS_WASM, reason="cannot start subprocess")
12
+ def test_import_raises_warning():
13
+ code = """
14
+ import pytest
15
+ with pytest.warns(UserWarning, match="it is not needed to import"):
16
+ from sklearn.experimental import enable_hist_gradient_boosting # noqa
17
+ """
18
+ pattern = "it is not needed to import enable_hist_gradient_boosting anymore"
19
+ assert_run_python_script_without_output(textwrap.dedent(code), pattern=pattern)
env-llmeval/lib/python3.10/site-packages/sklearn/experimental/tests/test_enable_iterative_imputer.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Tests for making sure experimental imports work as expected."""
2
+
3
+ import textwrap
4
+
5
+ import pytest
6
+
7
+ from sklearn.utils import _IS_WASM
8
+ from sklearn.utils._testing import assert_run_python_script_without_output
9
+
10
+
11
+ @pytest.mark.xfail(_IS_WASM, reason="cannot start subprocess")
12
+ def test_imports_strategies():
13
+ # Make sure different import strategies work or fail as expected.
14
+
15
+ # Since Python caches the imported modules, we need to run a child process
16
+ # for every test case. Else, the tests would not be independent
17
+ # (manually removing the imports from the cache (sys.modules) is not
18
+ # recommended and can lead to many complications).
19
+ pattern = "IterativeImputer is experimental"
20
+ good_import = """
21
+ from sklearn.experimental import enable_iterative_imputer
22
+ from sklearn.impute import IterativeImputer
23
+ """
24
+ assert_run_python_script_without_output(
25
+ textwrap.dedent(good_import), pattern=pattern
26
+ )
27
+
28
+ good_import_with_ensemble_first = """
29
+ import sklearn.ensemble
30
+ from sklearn.experimental import enable_iterative_imputer
31
+ from sklearn.impute import IterativeImputer
32
+ """
33
+ assert_run_python_script_without_output(
34
+ textwrap.dedent(good_import_with_ensemble_first),
35
+ pattern=pattern,
36
+ )
37
+
38
+ bad_imports = f"""
39
+ import pytest
40
+
41
+ with pytest.raises(ImportError, match={pattern!r}):
42
+ from sklearn.impute import IterativeImputer
43
+
44
+ import sklearn.experimental
45
+ with pytest.raises(ImportError, match={pattern!r}):
46
+ from sklearn.impute import IterativeImputer
47
+ """
48
+ assert_run_python_script_without_output(
49
+ textwrap.dedent(bad_imports),
50
+ pattern=pattern,
51
+ )
env-llmeval/lib/python3.10/site-packages/sklearn/experimental/tests/test_enable_successive_halving.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Tests for making sure experimental imports work as expected."""
2
+
3
+ import textwrap
4
+
5
+ import pytest
6
+
7
+ from sklearn.utils import _IS_WASM
8
+ from sklearn.utils._testing import assert_run_python_script_without_output
9
+
10
+
11
+ @pytest.mark.xfail(_IS_WASM, reason="cannot start subprocess")
12
+ def test_imports_strategies():
13
+ # Make sure different import strategies work or fail as expected.
14
+
15
+ # Since Python caches the imported modules, we need to run a child process
16
+ # for every test case. Else, the tests would not be independent
17
+ # (manually removing the imports from the cache (sys.modules) is not
18
+ # recommended and can lead to many complications).
19
+ pattern = "Halving(Grid|Random)SearchCV is experimental"
20
+ good_import = """
21
+ from sklearn.experimental import enable_halving_search_cv
22
+ from sklearn.model_selection import HalvingGridSearchCV
23
+ from sklearn.model_selection import HalvingRandomSearchCV
24
+ """
25
+ assert_run_python_script_without_output(
26
+ textwrap.dedent(good_import), pattern=pattern
27
+ )
28
+
29
+ good_import_with_model_selection_first = """
30
+ import sklearn.model_selection
31
+ from sklearn.experimental import enable_halving_search_cv
32
+ from sklearn.model_selection import HalvingGridSearchCV
33
+ from sklearn.model_selection import HalvingRandomSearchCV
34
+ """
35
+ assert_run_python_script_without_output(
36
+ textwrap.dedent(good_import_with_model_selection_first),
37
+ pattern=pattern,
38
+ )
39
+
40
+ bad_imports = f"""
41
+ import pytest
42
+
43
+ with pytest.raises(ImportError, match={pattern!r}):
44
+ from sklearn.model_selection import HalvingGridSearchCV
45
+
46
+ import sklearn.experimental
47
+ with pytest.raises(ImportError, match={pattern!r}):
48
+ from sklearn.model_selection import HalvingRandomSearchCV
49
+ """
50
+ assert_run_python_script_without_output(
51
+ textwrap.dedent(bad_imports),
52
+ pattern=pattern,
53
+ )
env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/__init__.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ The :mod:`sklearn.neighbors` module implements the k-nearest neighbors
3
+ algorithm.
4
+ """
5
+
6
+ from ._ball_tree import BallTree
7
+ from ._base import VALID_METRICS, VALID_METRICS_SPARSE, sort_graph_by_row_values
8
+ from ._classification import KNeighborsClassifier, RadiusNeighborsClassifier
9
+ from ._graph import (
10
+ KNeighborsTransformer,
11
+ RadiusNeighborsTransformer,
12
+ kneighbors_graph,
13
+ radius_neighbors_graph,
14
+ )
15
+ from ._kd_tree import KDTree
16
+ from ._kde import KernelDensity
17
+ from ._lof import LocalOutlierFactor
18
+ from ._nca import NeighborhoodComponentsAnalysis
19
+ from ._nearest_centroid import NearestCentroid
20
+ from ._regression import KNeighborsRegressor, RadiusNeighborsRegressor
21
+ from ._unsupervised import NearestNeighbors
22
+
23
+ __all__ = [
24
+ "BallTree",
25
+ "KDTree",
26
+ "KNeighborsClassifier",
27
+ "KNeighborsRegressor",
28
+ "KNeighborsTransformer",
29
+ "NearestCentroid",
30
+ "NearestNeighbors",
31
+ "RadiusNeighborsClassifier",
32
+ "RadiusNeighborsRegressor",
33
+ "RadiusNeighborsTransformer",
34
+ "kneighbors_graph",
35
+ "radius_neighbors_graph",
36
+ "KernelDensity",
37
+ "LocalOutlierFactor",
38
+ "NeighborhoodComponentsAnalysis",
39
+ "sort_graph_by_row_values",
40
+ "VALID_METRICS",
41
+ "VALID_METRICS_SPARSE",
42
+ ]
env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/__pycache__/_graph.cpython-310.pyc ADDED
Binary file (22.2 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/_ball_tree.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (774 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/_base.py ADDED
@@ -0,0 +1,1387 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Base and mixin classes for nearest neighbors."""
2
+ # Authors: Jake Vanderplas <[email protected]>
3
+ # Fabian Pedregosa <[email protected]>
4
+ # Alexandre Gramfort <[email protected]>
5
+ # Sparseness support by Lars Buitinck
6
+ # Multi-output support by Arnaud Joly <[email protected]>
7
+ #
8
+ # License: BSD 3 clause (C) INRIA, University of Amsterdam
9
+ import itertools
10
+ import numbers
11
+ import warnings
12
+ from abc import ABCMeta, abstractmethod
13
+ from functools import partial
14
+ from numbers import Integral, Real
15
+
16
+ import numpy as np
17
+ from joblib import effective_n_jobs
18
+ from scipy.sparse import csr_matrix, issparse
19
+
20
+ from ..base import BaseEstimator, MultiOutputMixin, is_classifier
21
+ from ..exceptions import DataConversionWarning, EfficiencyWarning
22
+ from ..metrics import DistanceMetric, pairwise_distances_chunked
23
+ from ..metrics._pairwise_distances_reduction import (
24
+ ArgKmin,
25
+ RadiusNeighbors,
26
+ )
27
+ from ..metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS
28
+ from ..utils import (
29
+ _to_object_array,
30
+ check_array,
31
+ gen_even_slices,
32
+ )
33
+ from ..utils._param_validation import Interval, StrOptions, validate_params
34
+ from ..utils.fixes import parse_version, sp_base_version
35
+ from ..utils.multiclass import check_classification_targets
36
+ from ..utils.parallel import Parallel, delayed
37
+ from ..utils.validation import check_is_fitted, check_non_negative
38
+ from ._ball_tree import BallTree
39
+ from ._kd_tree import KDTree
40
+
41
+ SCIPY_METRICS = [
42
+ "braycurtis",
43
+ "canberra",
44
+ "chebyshev",
45
+ "correlation",
46
+ "cosine",
47
+ "dice",
48
+ "hamming",
49
+ "jaccard",
50
+ "mahalanobis",
51
+ "minkowski",
52
+ "rogerstanimoto",
53
+ "russellrao",
54
+ "seuclidean",
55
+ "sokalmichener",
56
+ "sokalsneath",
57
+ "sqeuclidean",
58
+ "yule",
59
+ ]
60
+ if sp_base_version < parse_version("1.11"):
61
+ # Deprecated in SciPy 1.9 and removed in SciPy 1.11
62
+ SCIPY_METRICS += ["kulsinski"]
63
+ if sp_base_version < parse_version("1.9"):
64
+ # Deprecated in SciPy 1.0 and removed in SciPy 1.9
65
+ SCIPY_METRICS += ["matching"]
66
+
67
+ VALID_METRICS = dict(
68
+ ball_tree=BallTree.valid_metrics,
69
+ kd_tree=KDTree.valid_metrics,
70
+ # The following list comes from the
71
+ # sklearn.metrics.pairwise doc string
72
+ brute=sorted(set(PAIRWISE_DISTANCE_FUNCTIONS).union(SCIPY_METRICS)),
73
+ )
74
+
75
+ VALID_METRICS_SPARSE = dict(
76
+ ball_tree=[],
77
+ kd_tree=[],
78
+ brute=(PAIRWISE_DISTANCE_FUNCTIONS.keys() - {"haversine", "nan_euclidean"}),
79
+ )
80
+
81
+
82
+ def _get_weights(dist, weights):
83
+ """Get the weights from an array of distances and a parameter ``weights``.
84
+
85
+ Assume weights have already been validated.
86
+
87
+ Parameters
88
+ ----------
89
+ dist : ndarray
90
+ The input distances.
91
+
92
+ weights : {'uniform', 'distance'}, callable or None
93
+ The kind of weighting used.
94
+
95
+ Returns
96
+ -------
97
+ weights_arr : array of the same shape as ``dist``
98
+ If ``weights == 'uniform'``, then returns None.
99
+ """
100
+ if weights in (None, "uniform"):
101
+ return None
102
+
103
+ if weights == "distance":
104
+ # if user attempts to classify a point that was zero distance from one
105
+ # or more training points, those training points are weighted as 1.0
106
+ # and the other points as 0.0
107
+ if dist.dtype is np.dtype(object):
108
+ for point_dist_i, point_dist in enumerate(dist):
109
+ # check if point_dist is iterable
110
+ # (ex: RadiusNeighborClassifier.predict may set an element of
111
+ # dist to 1e-6 to represent an 'outlier')
112
+ if hasattr(point_dist, "__contains__") and 0.0 in point_dist:
113
+ dist[point_dist_i] = point_dist == 0.0
114
+ else:
115
+ dist[point_dist_i] = 1.0 / point_dist
116
+ else:
117
+ with np.errstate(divide="ignore"):
118
+ dist = 1.0 / dist
119
+ inf_mask = np.isinf(dist)
120
+ inf_row = np.any(inf_mask, axis=1)
121
+ dist[inf_row] = inf_mask[inf_row]
122
+ return dist
123
+
124
+ if callable(weights):
125
+ return weights(dist)
126
+
127
+
128
+ def _is_sorted_by_data(graph):
129
+ """Return whether the graph's non-zero entries are sorted by data.
130
+
131
+ The non-zero entries are stored in graph.data and graph.indices.
132
+ For each row (or sample), the non-zero entries can be either:
133
+ - sorted by indices, as after graph.sort_indices();
134
+ - sorted by data, as after _check_precomputed(graph);
135
+ - not sorted.
136
+
137
+ Parameters
138
+ ----------
139
+ graph : sparse matrix of shape (n_samples, n_samples)
140
+ Neighbors graph as given by `kneighbors_graph` or
141
+ `radius_neighbors_graph`. Matrix should be of format CSR format.
142
+
143
+ Returns
144
+ -------
145
+ res : bool
146
+ Whether input graph is sorted by data.
147
+ """
148
+ assert graph.format == "csr"
149
+ out_of_order = graph.data[:-1] > graph.data[1:]
150
+ line_change = np.unique(graph.indptr[1:-1] - 1)
151
+ line_change = line_change[line_change < out_of_order.shape[0]]
152
+ return out_of_order.sum() == out_of_order[line_change].sum()
153
+
154
+
155
+ def _check_precomputed(X):
156
+ """Check precomputed distance matrix.
157
+
158
+ If the precomputed distance matrix is sparse, it checks that the non-zero
159
+ entries are sorted by distances. If not, the matrix is copied and sorted.
160
+
161
+ Parameters
162
+ ----------
163
+ X : {sparse matrix, array-like}, (n_samples, n_samples)
164
+ Distance matrix to other samples. X may be a sparse matrix, in which
165
+ case only non-zero elements may be considered neighbors.
166
+
167
+ Returns
168
+ -------
169
+ X : {sparse matrix, array-like}, (n_samples, n_samples)
170
+ Distance matrix to other samples. X may be a sparse matrix, in which
171
+ case only non-zero elements may be considered neighbors.
172
+ """
173
+ if not issparse(X):
174
+ X = check_array(X)
175
+ check_non_negative(X, whom="precomputed distance matrix.")
176
+ return X
177
+ else:
178
+ graph = X
179
+
180
+ if graph.format not in ("csr", "csc", "coo", "lil"):
181
+ raise TypeError(
182
+ "Sparse matrix in {!r} format is not supported due to "
183
+ "its handling of explicit zeros".format(graph.format)
184
+ )
185
+ copied = graph.format != "csr"
186
+ graph = check_array(graph, accept_sparse="csr")
187
+ check_non_negative(graph, whom="precomputed distance matrix.")
188
+ graph = sort_graph_by_row_values(graph, copy=not copied, warn_when_not_sorted=True)
189
+
190
+ return graph
191
+
192
+
193
+ @validate_params(
194
+ {
195
+ "graph": ["sparse matrix"],
196
+ "copy": ["boolean"],
197
+ "warn_when_not_sorted": ["boolean"],
198
+ },
199
+ prefer_skip_nested_validation=True,
200
+ )
201
+ def sort_graph_by_row_values(graph, copy=False, warn_when_not_sorted=True):
202
+ """Sort a sparse graph such that each row is stored with increasing values.
203
+
204
+ .. versionadded:: 1.2
205
+
206
+ Parameters
207
+ ----------
208
+ graph : sparse matrix of shape (n_samples, n_samples)
209
+ Distance matrix to other samples, where only non-zero elements are
210
+ considered neighbors. Matrix is converted to CSR format if not already.
211
+
212
+ copy : bool, default=False
213
+ If True, the graph is copied before sorting. If False, the sorting is
214
+ performed inplace. If the graph is not of CSR format, `copy` must be
215
+ True to allow the conversion to CSR format, otherwise an error is
216
+ raised.
217
+
218
+ warn_when_not_sorted : bool, default=True
219
+ If True, a :class:`~sklearn.exceptions.EfficiencyWarning` is raised
220
+ when the input graph is not sorted by row values.
221
+
222
+ Returns
223
+ -------
224
+ graph : sparse matrix of shape (n_samples, n_samples)
225
+ Distance matrix to other samples, where only non-zero elements are
226
+ considered neighbors. Matrix is in CSR format.
227
+
228
+ Examples
229
+ --------
230
+ >>> from scipy.sparse import csr_matrix
231
+ >>> from sklearn.neighbors import sort_graph_by_row_values
232
+ >>> X = csr_matrix(
233
+ ... [[0., 3., 1.],
234
+ ... [3., 0., 2.],
235
+ ... [1., 2., 0.]])
236
+ >>> X.data
237
+ array([3., 1., 3., 2., 1., 2.])
238
+ >>> X_ = sort_graph_by_row_values(X)
239
+ >>> X_.data
240
+ array([1., 3., 2., 3., 1., 2.])
241
+ """
242
+ if graph.format == "csr" and _is_sorted_by_data(graph):
243
+ return graph
244
+
245
+ if warn_when_not_sorted:
246
+ warnings.warn(
247
+ (
248
+ "Precomputed sparse input was not sorted by row values. Use the"
249
+ " function sklearn.neighbors.sort_graph_by_row_values to sort the input"
250
+ " by row values, with warn_when_not_sorted=False to remove this"
251
+ " warning."
252
+ ),
253
+ EfficiencyWarning,
254
+ )
255
+
256
+ if graph.format not in ("csr", "csc", "coo", "lil"):
257
+ raise TypeError(
258
+ f"Sparse matrix in {graph.format!r} format is not supported due to "
259
+ "its handling of explicit zeros"
260
+ )
261
+ elif graph.format != "csr":
262
+ if not copy:
263
+ raise ValueError(
264
+ "The input graph is not in CSR format. Use copy=True to allow "
265
+ "the conversion to CSR format."
266
+ )
267
+ graph = graph.asformat("csr")
268
+ elif copy: # csr format with copy=True
269
+ graph = graph.copy()
270
+
271
+ row_nnz = np.diff(graph.indptr)
272
+ if row_nnz.max() == row_nnz.min():
273
+ # if each sample has the same number of provided neighbors
274
+ n_samples = graph.shape[0]
275
+ distances = graph.data.reshape(n_samples, -1)
276
+
277
+ order = np.argsort(distances, kind="mergesort")
278
+ order += np.arange(n_samples)[:, None] * row_nnz[0]
279
+ order = order.ravel()
280
+ graph.data = graph.data[order]
281
+ graph.indices = graph.indices[order]
282
+
283
+ else:
284
+ for start, stop in zip(graph.indptr, graph.indptr[1:]):
285
+ order = np.argsort(graph.data[start:stop], kind="mergesort")
286
+ graph.data[start:stop] = graph.data[start:stop][order]
287
+ graph.indices[start:stop] = graph.indices[start:stop][order]
288
+
289
+ return graph
290
+
291
+
292
+ def _kneighbors_from_graph(graph, n_neighbors, return_distance):
293
+ """Decompose a nearest neighbors sparse graph into distances and indices.
294
+
295
+ Parameters
296
+ ----------
297
+ graph : sparse matrix of shape (n_samples, n_samples)
298
+ Neighbors graph as given by `kneighbors_graph` or
299
+ `radius_neighbors_graph`. Matrix should be of format CSR format.
300
+
301
+ n_neighbors : int
302
+ Number of neighbors required for each sample.
303
+
304
+ return_distance : bool
305
+ Whether or not to return the distances.
306
+
307
+ Returns
308
+ -------
309
+ neigh_dist : ndarray of shape (n_samples, n_neighbors)
310
+ Distances to nearest neighbors. Only present if `return_distance=True`.
311
+
312
+ neigh_ind : ndarray of shape (n_samples, n_neighbors)
313
+ Indices of nearest neighbors.
314
+ """
315
+ n_samples = graph.shape[0]
316
+ assert graph.format == "csr"
317
+
318
+ # number of neighbors by samples
319
+ row_nnz = np.diff(graph.indptr)
320
+ row_nnz_min = row_nnz.min()
321
+ if n_neighbors is not None and row_nnz_min < n_neighbors:
322
+ raise ValueError(
323
+ "%d neighbors per samples are required, but some samples have only"
324
+ " %d neighbors in precomputed graph matrix. Decrease number of "
325
+ "neighbors used or recompute the graph with more neighbors."
326
+ % (n_neighbors, row_nnz_min)
327
+ )
328
+
329
+ def extract(a):
330
+ # if each sample has the same number of provided neighbors
331
+ if row_nnz.max() == row_nnz_min:
332
+ return a.reshape(n_samples, -1)[:, :n_neighbors]
333
+ else:
334
+ idx = np.tile(np.arange(n_neighbors), (n_samples, 1))
335
+ idx += graph.indptr[:-1, None]
336
+ return a.take(idx, mode="clip").reshape(n_samples, n_neighbors)
337
+
338
+ if return_distance:
339
+ return extract(graph.data), extract(graph.indices)
340
+ else:
341
+ return extract(graph.indices)
342
+
343
+
344
+ def _radius_neighbors_from_graph(graph, radius, return_distance):
345
+ """Decompose a nearest neighbors sparse graph into distances and indices.
346
+
347
+ Parameters
348
+ ----------
349
+ graph : sparse matrix of shape (n_samples, n_samples)
350
+ Neighbors graph as given by `kneighbors_graph` or
351
+ `radius_neighbors_graph`. Matrix should be of format CSR format.
352
+
353
+ radius : float
354
+ Radius of neighborhoods which should be strictly positive.
355
+
356
+ return_distance : bool
357
+ Whether or not to return the distances.
358
+
359
+ Returns
360
+ -------
361
+ neigh_dist : ndarray of shape (n_samples,) of arrays
362
+ Distances to nearest neighbors. Only present if `return_distance=True`.
363
+
364
+ neigh_ind : ndarray of shape (n_samples,) of arrays
365
+ Indices of nearest neighbors.
366
+ """
367
+ assert graph.format == "csr"
368
+
369
+ no_filter_needed = bool(graph.data.max() <= radius)
370
+
371
+ if no_filter_needed:
372
+ data, indices, indptr = graph.data, graph.indices, graph.indptr
373
+ else:
374
+ mask = graph.data <= radius
375
+ if return_distance:
376
+ data = np.compress(mask, graph.data)
377
+ indices = np.compress(mask, graph.indices)
378
+ indptr = np.concatenate(([0], np.cumsum(mask)))[graph.indptr]
379
+
380
+ indices = indices.astype(np.intp, copy=no_filter_needed)
381
+
382
+ if return_distance:
383
+ neigh_dist = _to_object_array(np.split(data, indptr[1:-1]))
384
+ neigh_ind = _to_object_array(np.split(indices, indptr[1:-1]))
385
+
386
+ if return_distance:
387
+ return neigh_dist, neigh_ind
388
+ else:
389
+ return neigh_ind
390
+
391
+
392
+ class NeighborsBase(MultiOutputMixin, BaseEstimator, metaclass=ABCMeta):
393
+ """Base class for nearest neighbors estimators."""
394
+
395
+ _parameter_constraints: dict = {
396
+ "n_neighbors": [Interval(Integral, 1, None, closed="left"), None],
397
+ "radius": [Interval(Real, 0, None, closed="both"), None],
398
+ "algorithm": [StrOptions({"auto", "ball_tree", "kd_tree", "brute"})],
399
+ "leaf_size": [Interval(Integral, 1, None, closed="left")],
400
+ "p": [Interval(Real, 0, None, closed="right"), None],
401
+ "metric": [StrOptions(set(itertools.chain(*VALID_METRICS.values()))), callable],
402
+ "metric_params": [dict, None],
403
+ "n_jobs": [Integral, None],
404
+ }
405
+
406
+ @abstractmethod
407
+ def __init__(
408
+ self,
409
+ n_neighbors=None,
410
+ radius=None,
411
+ algorithm="auto",
412
+ leaf_size=30,
413
+ metric="minkowski",
414
+ p=2,
415
+ metric_params=None,
416
+ n_jobs=None,
417
+ ):
418
+ self.n_neighbors = n_neighbors
419
+ self.radius = radius
420
+ self.algorithm = algorithm
421
+ self.leaf_size = leaf_size
422
+ self.metric = metric
423
+ self.metric_params = metric_params
424
+ self.p = p
425
+ self.n_jobs = n_jobs
426
+
427
+ def _check_algorithm_metric(self):
428
+ if self.algorithm == "auto":
429
+ if self.metric == "precomputed":
430
+ alg_check = "brute"
431
+ elif (
432
+ callable(self.metric)
433
+ or self.metric in VALID_METRICS["ball_tree"]
434
+ or isinstance(self.metric, DistanceMetric)
435
+ ):
436
+ alg_check = "ball_tree"
437
+ else:
438
+ alg_check = "brute"
439
+ else:
440
+ alg_check = self.algorithm
441
+
442
+ if callable(self.metric):
443
+ if self.algorithm == "kd_tree":
444
+ # callable metric is only valid for brute force and ball_tree
445
+ raise ValueError(
446
+ "kd_tree does not support callable metric '%s'"
447
+ "Function call overhead will result"
448
+ "in very poor performance."
449
+ % self.metric
450
+ )
451
+ elif self.metric not in VALID_METRICS[alg_check] and not isinstance(
452
+ self.metric, DistanceMetric
453
+ ):
454
+ raise ValueError(
455
+ "Metric '%s' not valid. Use "
456
+ "sorted(sklearn.neighbors.VALID_METRICS['%s']) "
457
+ "to get valid options. "
458
+ "Metric can also be a callable function." % (self.metric, alg_check)
459
+ )
460
+
461
+ if self.metric_params is not None and "p" in self.metric_params:
462
+ if self.p is not None:
463
+ warnings.warn(
464
+ (
465
+ "Parameter p is found in metric_params. "
466
+ "The corresponding parameter from __init__ "
467
+ "is ignored."
468
+ ),
469
+ SyntaxWarning,
470
+ stacklevel=3,
471
+ )
472
+
473
+ def _fit(self, X, y=None):
474
+ if self._get_tags()["requires_y"]:
475
+ if not isinstance(X, (KDTree, BallTree, NeighborsBase)):
476
+ X, y = self._validate_data(
477
+ X, y, accept_sparse="csr", multi_output=True, order="C"
478
+ )
479
+
480
+ if is_classifier(self):
481
+ # Classification targets require a specific format
482
+ if y.ndim == 1 or y.ndim == 2 and y.shape[1] == 1:
483
+ if y.ndim != 1:
484
+ warnings.warn(
485
+ (
486
+ "A column-vector y was passed when a "
487
+ "1d array was expected. Please change "
488
+ "the shape of y to (n_samples,), for "
489
+ "example using ravel()."
490
+ ),
491
+ DataConversionWarning,
492
+ stacklevel=2,
493
+ )
494
+
495
+ self.outputs_2d_ = False
496
+ y = y.reshape((-1, 1))
497
+ else:
498
+ self.outputs_2d_ = True
499
+
500
+ check_classification_targets(y)
501
+ self.classes_ = []
502
+ # Using `dtype=np.intp` is necessary since `np.bincount`
503
+ # (called in _classification.py) fails when dealing
504
+ # with a float64 array on 32bit systems.
505
+ self._y = np.empty(y.shape, dtype=np.intp)
506
+ for k in range(self._y.shape[1]):
507
+ classes, self._y[:, k] = np.unique(y[:, k], return_inverse=True)
508
+ self.classes_.append(classes)
509
+
510
+ if not self.outputs_2d_:
511
+ self.classes_ = self.classes_[0]
512
+ self._y = self._y.ravel()
513
+ else:
514
+ self._y = y
515
+
516
+ else:
517
+ if not isinstance(X, (KDTree, BallTree, NeighborsBase)):
518
+ X = self._validate_data(X, accept_sparse="csr", order="C")
519
+
520
+ self._check_algorithm_metric()
521
+ if self.metric_params is None:
522
+ self.effective_metric_params_ = {}
523
+ else:
524
+ self.effective_metric_params_ = self.metric_params.copy()
525
+
526
+ effective_p = self.effective_metric_params_.get("p", self.p)
527
+ if self.metric == "minkowski":
528
+ self.effective_metric_params_["p"] = effective_p
529
+
530
+ self.effective_metric_ = self.metric
531
+ # For minkowski distance, use more efficient methods where available
532
+ if self.metric == "minkowski":
533
+ p = self.effective_metric_params_.pop("p", 2)
534
+ w = self.effective_metric_params_.pop("w", None)
535
+
536
+ if p == 1 and w is None:
537
+ self.effective_metric_ = "manhattan"
538
+ elif p == 2 and w is None:
539
+ self.effective_metric_ = "euclidean"
540
+ elif p == np.inf and w is None:
541
+ self.effective_metric_ = "chebyshev"
542
+ else:
543
+ # Use the generic minkowski metric, possibly weighted.
544
+ self.effective_metric_params_["p"] = p
545
+ self.effective_metric_params_["w"] = w
546
+
547
+ if isinstance(X, NeighborsBase):
548
+ self._fit_X = X._fit_X
549
+ self._tree = X._tree
550
+ self._fit_method = X._fit_method
551
+ self.n_samples_fit_ = X.n_samples_fit_
552
+ return self
553
+
554
+ elif isinstance(X, BallTree):
555
+ self._fit_X = X.data
556
+ self._tree = X
557
+ self._fit_method = "ball_tree"
558
+ self.n_samples_fit_ = X.data.shape[0]
559
+ return self
560
+
561
+ elif isinstance(X, KDTree):
562
+ self._fit_X = X.data
563
+ self._tree = X
564
+ self._fit_method = "kd_tree"
565
+ self.n_samples_fit_ = X.data.shape[0]
566
+ return self
567
+
568
+ if self.metric == "precomputed":
569
+ X = _check_precomputed(X)
570
+ # Precomputed matrix X must be squared
571
+ if X.shape[0] != X.shape[1]:
572
+ raise ValueError(
573
+ "Precomputed matrix must be square."
574
+ " Input is a {}x{} matrix.".format(X.shape[0], X.shape[1])
575
+ )
576
+ self.n_features_in_ = X.shape[1]
577
+
578
+ n_samples = X.shape[0]
579
+ if n_samples == 0:
580
+ raise ValueError("n_samples must be greater than 0")
581
+
582
+ if issparse(X):
583
+ if self.algorithm not in ("auto", "brute"):
584
+ warnings.warn("cannot use tree with sparse input: using brute force")
585
+
586
+ if (
587
+ self.effective_metric_ not in VALID_METRICS_SPARSE["brute"]
588
+ and not callable(self.effective_metric_)
589
+ and not isinstance(self.effective_metric_, DistanceMetric)
590
+ ):
591
+ raise ValueError(
592
+ "Metric '%s' not valid for sparse input. "
593
+ "Use sorted(sklearn.neighbors."
594
+ "VALID_METRICS_SPARSE['brute']) "
595
+ "to get valid options. "
596
+ "Metric can also be a callable function." % (self.effective_metric_)
597
+ )
598
+ self._fit_X = X.copy()
599
+ self._tree = None
600
+ self._fit_method = "brute"
601
+ self.n_samples_fit_ = X.shape[0]
602
+ return self
603
+
604
+ self._fit_method = self.algorithm
605
+ self._fit_X = X
606
+ self.n_samples_fit_ = X.shape[0]
607
+
608
+ if self._fit_method == "auto":
609
+ # A tree approach is better for small number of neighbors or small
610
+ # number of features, with KDTree generally faster when available
611
+ if (
612
+ self.metric == "precomputed"
613
+ or self._fit_X.shape[1] > 15
614
+ or (
615
+ self.n_neighbors is not None
616
+ and self.n_neighbors >= self._fit_X.shape[0] // 2
617
+ )
618
+ ):
619
+ self._fit_method = "brute"
620
+ else:
621
+ if (
622
+ self.effective_metric_ == "minkowski"
623
+ and self.effective_metric_params_["p"] < 1
624
+ ):
625
+ self._fit_method = "brute"
626
+ elif (
627
+ self.effective_metric_ == "minkowski"
628
+ and self.effective_metric_params_.get("w") is not None
629
+ ):
630
+ # 'minkowski' with weights is not supported by KDTree but is
631
+ # supported byBallTree.
632
+ self._fit_method = "ball_tree"
633
+ elif self.effective_metric_ in VALID_METRICS["kd_tree"]:
634
+ self._fit_method = "kd_tree"
635
+ elif (
636
+ callable(self.effective_metric_)
637
+ or self.effective_metric_ in VALID_METRICS["ball_tree"]
638
+ ):
639
+ self._fit_method = "ball_tree"
640
+ else:
641
+ self._fit_method = "brute"
642
+
643
+ if (
644
+ self.effective_metric_ == "minkowski"
645
+ and self.effective_metric_params_["p"] < 1
646
+ ):
647
+ # For 0 < p < 1 Minkowski distances aren't valid distance
648
+ # metric as they do not satisfy triangular inequality:
649
+ # they are semi-metrics.
650
+ # algorithm="kd_tree" and algorithm="ball_tree" can't be used because
651
+ # KDTree and BallTree require a proper distance metric to work properly.
652
+ # However, the brute-force algorithm supports semi-metrics.
653
+ if self._fit_method == "brute":
654
+ warnings.warn(
655
+ "Mind that for 0 < p < 1, Minkowski metrics are not distance"
656
+ " metrics. Continuing the execution with `algorithm='brute'`."
657
+ )
658
+ else: # self._fit_method in ("kd_tree", "ball_tree")
659
+ raise ValueError(
660
+ f'algorithm="{self._fit_method}" does not support 0 < p < 1 for '
661
+ "the Minkowski metric. To resolve this problem either "
662
+ 'set p >= 1 or algorithm="brute".'
663
+ )
664
+
665
+ if self._fit_method == "ball_tree":
666
+ self._tree = BallTree(
667
+ X,
668
+ self.leaf_size,
669
+ metric=self.effective_metric_,
670
+ **self.effective_metric_params_,
671
+ )
672
+ elif self._fit_method == "kd_tree":
673
+ if (
674
+ self.effective_metric_ == "minkowski"
675
+ and self.effective_metric_params_.get("w") is not None
676
+ ):
677
+ raise ValueError(
678
+ "algorithm='kd_tree' is not valid for "
679
+ "metric='minkowski' with a weight parameter 'w': "
680
+ "try algorithm='ball_tree' "
681
+ "or algorithm='brute' instead."
682
+ )
683
+ self._tree = KDTree(
684
+ X,
685
+ self.leaf_size,
686
+ metric=self.effective_metric_,
687
+ **self.effective_metric_params_,
688
+ )
689
+ elif self._fit_method == "brute":
690
+ self._tree = None
691
+
692
+ return self
693
+
694
+ def _more_tags(self):
695
+ # For cross-validation routines to split data correctly
696
+ return {"pairwise": self.metric == "precomputed"}
697
+
698
+
699
+ def _tree_query_parallel_helper(tree, *args, **kwargs):
700
+ """Helper for the Parallel calls in KNeighborsMixin.kneighbors.
701
+
702
+ The Cython method tree.query is not directly picklable by cloudpickle
703
+ under PyPy.
704
+ """
705
+ return tree.query(*args, **kwargs)
706
+
707
+
708
+ class KNeighborsMixin:
709
+ """Mixin for k-neighbors searches."""
710
+
711
+ def _kneighbors_reduce_func(self, dist, start, n_neighbors, return_distance):
712
+ """Reduce a chunk of distances to the nearest neighbors.
713
+
714
+ Callback to :func:`sklearn.metrics.pairwise.pairwise_distances_chunked`
715
+
716
+ Parameters
717
+ ----------
718
+ dist : ndarray of shape (n_samples_chunk, n_samples)
719
+ The distance matrix.
720
+
721
+ start : int
722
+ The index in X which the first row of dist corresponds to.
723
+
724
+ n_neighbors : int
725
+ Number of neighbors required for each sample.
726
+
727
+ return_distance : bool
728
+ Whether or not to return the distances.
729
+
730
+ Returns
731
+ -------
732
+ dist : array of shape (n_samples_chunk, n_neighbors)
733
+ Returned only if `return_distance=True`.
734
+
735
+ neigh : array of shape (n_samples_chunk, n_neighbors)
736
+ The neighbors indices.
737
+ """
738
+ sample_range = np.arange(dist.shape[0])[:, None]
739
+ neigh_ind = np.argpartition(dist, n_neighbors - 1, axis=1)
740
+ neigh_ind = neigh_ind[:, :n_neighbors]
741
+ # argpartition doesn't guarantee sorted order, so we sort again
742
+ neigh_ind = neigh_ind[sample_range, np.argsort(dist[sample_range, neigh_ind])]
743
+ if return_distance:
744
+ if self.effective_metric_ == "euclidean":
745
+ result = np.sqrt(dist[sample_range, neigh_ind]), neigh_ind
746
+ else:
747
+ result = dist[sample_range, neigh_ind], neigh_ind
748
+ else:
749
+ result = neigh_ind
750
+ return result
751
+
752
+ def kneighbors(self, X=None, n_neighbors=None, return_distance=True):
753
+ """Find the K-neighbors of a point.
754
+
755
+ Returns indices of and distances to the neighbors of each point.
756
+
757
+ Parameters
758
+ ----------
759
+ X : {array-like, sparse matrix}, shape (n_queries, n_features), \
760
+ or (n_queries, n_indexed) if metric == 'precomputed', default=None
761
+ The query point or points.
762
+ If not provided, neighbors of each indexed point are returned.
763
+ In this case, the query point is not considered its own neighbor.
764
+
765
+ n_neighbors : int, default=None
766
+ Number of neighbors required for each sample. The default is the
767
+ value passed to the constructor.
768
+
769
+ return_distance : bool, default=True
770
+ Whether or not to return the distances.
771
+
772
+ Returns
773
+ -------
774
+ neigh_dist : ndarray of shape (n_queries, n_neighbors)
775
+ Array representing the lengths to points, only present if
776
+ return_distance=True.
777
+
778
+ neigh_ind : ndarray of shape (n_queries, n_neighbors)
779
+ Indices of the nearest points in the population matrix.
780
+
781
+ Examples
782
+ --------
783
+ In the following example, we construct a NearestNeighbors
784
+ class from an array representing our data set and ask who's
785
+ the closest point to [1,1,1]
786
+
787
+ >>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]]
788
+ >>> from sklearn.neighbors import NearestNeighbors
789
+ >>> neigh = NearestNeighbors(n_neighbors=1)
790
+ >>> neigh.fit(samples)
791
+ NearestNeighbors(n_neighbors=1)
792
+ >>> print(neigh.kneighbors([[1., 1., 1.]]))
793
+ (array([[0.5]]), array([[2]]))
794
+
795
+ As you can see, it returns [[0.5]], and [[2]], which means that the
796
+ element is at distance 0.5 and is the third element of samples
797
+ (indexes start at 0). You can also query for multiple points:
798
+
799
+ >>> X = [[0., 1., 0.], [1., 0., 1.]]
800
+ >>> neigh.kneighbors(X, return_distance=False)
801
+ array([[1],
802
+ [2]]...)
803
+ """
804
+ check_is_fitted(self)
805
+
806
+ if n_neighbors is None:
807
+ n_neighbors = self.n_neighbors
808
+ elif n_neighbors <= 0:
809
+ raise ValueError("Expected n_neighbors > 0. Got %d" % n_neighbors)
810
+ elif not isinstance(n_neighbors, numbers.Integral):
811
+ raise TypeError(
812
+ "n_neighbors does not take %s value, enter integer value"
813
+ % type(n_neighbors)
814
+ )
815
+
816
+ query_is_train = X is None
817
+ if query_is_train:
818
+ X = self._fit_X
819
+ # Include an extra neighbor to account for the sample itself being
820
+ # returned, which is removed later
821
+ n_neighbors += 1
822
+ else:
823
+ if self.metric == "precomputed":
824
+ X = _check_precomputed(X)
825
+ else:
826
+ X = self._validate_data(X, accept_sparse="csr", reset=False, order="C")
827
+
828
+ n_samples_fit = self.n_samples_fit_
829
+ if n_neighbors > n_samples_fit:
830
+ if query_is_train:
831
+ n_neighbors -= 1 # ok to modify inplace because an error is raised
832
+ inequality_str = "n_neighbors < n_samples_fit"
833
+ else:
834
+ inequality_str = "n_neighbors <= n_samples_fit"
835
+ raise ValueError(
836
+ f"Expected {inequality_str}, but "
837
+ f"n_neighbors = {n_neighbors}, n_samples_fit = {n_samples_fit}, "
838
+ f"n_samples = {X.shape[0]}" # include n_samples for common tests
839
+ )
840
+
841
+ n_jobs = effective_n_jobs(self.n_jobs)
842
+ chunked_results = None
843
+ use_pairwise_distances_reductions = (
844
+ self._fit_method == "brute"
845
+ and ArgKmin.is_usable_for(
846
+ X if X is not None else self._fit_X, self._fit_X, self.effective_metric_
847
+ )
848
+ )
849
+ if use_pairwise_distances_reductions:
850
+ results = ArgKmin.compute(
851
+ X=X,
852
+ Y=self._fit_X,
853
+ k=n_neighbors,
854
+ metric=self.effective_metric_,
855
+ metric_kwargs=self.effective_metric_params_,
856
+ strategy="auto",
857
+ return_distance=return_distance,
858
+ )
859
+
860
+ elif (
861
+ self._fit_method == "brute" and self.metric == "precomputed" and issparse(X)
862
+ ):
863
+ results = _kneighbors_from_graph(
864
+ X, n_neighbors=n_neighbors, return_distance=return_distance
865
+ )
866
+
867
+ elif self._fit_method == "brute":
868
+ # Joblib-based backend, which is used when user-defined callable
869
+ # are passed for metric.
870
+
871
+ # This won't be used in the future once PairwiseDistancesReductions
872
+ # support:
873
+ # - DistanceMetrics which work on supposedly binary data
874
+ # - CSR-dense and dense-CSR case if 'euclidean' in metric.
875
+ reduce_func = partial(
876
+ self._kneighbors_reduce_func,
877
+ n_neighbors=n_neighbors,
878
+ return_distance=return_distance,
879
+ )
880
+
881
+ # for efficiency, use squared euclidean distances
882
+ if self.effective_metric_ == "euclidean":
883
+ kwds = {"squared": True}
884
+ else:
885
+ kwds = self.effective_metric_params_
886
+
887
+ chunked_results = list(
888
+ pairwise_distances_chunked(
889
+ X,
890
+ self._fit_X,
891
+ reduce_func=reduce_func,
892
+ metric=self.effective_metric_,
893
+ n_jobs=n_jobs,
894
+ **kwds,
895
+ )
896
+ )
897
+
898
+ elif self._fit_method in ["ball_tree", "kd_tree"]:
899
+ if issparse(X):
900
+ raise ValueError(
901
+ "%s does not work with sparse matrices. Densify the data, "
902
+ "or set algorithm='brute'"
903
+ % self._fit_method
904
+ )
905
+ chunked_results = Parallel(n_jobs, prefer="threads")(
906
+ delayed(_tree_query_parallel_helper)(
907
+ self._tree, X[s], n_neighbors, return_distance
908
+ )
909
+ for s in gen_even_slices(X.shape[0], n_jobs)
910
+ )
911
+ else:
912
+ raise ValueError("internal: _fit_method not recognized")
913
+
914
+ if chunked_results is not None:
915
+ if return_distance:
916
+ neigh_dist, neigh_ind = zip(*chunked_results)
917
+ results = np.vstack(neigh_dist), np.vstack(neigh_ind)
918
+ else:
919
+ results = np.vstack(chunked_results)
920
+
921
+ if not query_is_train:
922
+ return results
923
+ else:
924
+ # If the query data is the same as the indexed data, we would like
925
+ # to ignore the first nearest neighbor of every sample, i.e
926
+ # the sample itself.
927
+ if return_distance:
928
+ neigh_dist, neigh_ind = results
929
+ else:
930
+ neigh_ind = results
931
+
932
+ n_queries, _ = X.shape
933
+ sample_range = np.arange(n_queries)[:, None]
934
+ sample_mask = neigh_ind != sample_range
935
+
936
+ # Corner case: When the number of duplicates are more
937
+ # than the number of neighbors, the first NN will not
938
+ # be the sample, but a duplicate.
939
+ # In that case mask the first duplicate.
940
+ dup_gr_nbrs = np.all(sample_mask, axis=1)
941
+ sample_mask[:, 0][dup_gr_nbrs] = False
942
+ neigh_ind = np.reshape(neigh_ind[sample_mask], (n_queries, n_neighbors - 1))
943
+
944
+ if return_distance:
945
+ neigh_dist = np.reshape(
946
+ neigh_dist[sample_mask], (n_queries, n_neighbors - 1)
947
+ )
948
+ return neigh_dist, neigh_ind
949
+ return neigh_ind
950
+
951
+ def kneighbors_graph(self, X=None, n_neighbors=None, mode="connectivity"):
952
+ """Compute the (weighted) graph of k-Neighbors for points in X.
953
+
954
+ Parameters
955
+ ----------
956
+ X : {array-like, sparse matrix} of shape (n_queries, n_features), \
957
+ or (n_queries, n_indexed) if metric == 'precomputed', default=None
958
+ The query point or points.
959
+ If not provided, neighbors of each indexed point are returned.
960
+ In this case, the query point is not considered its own neighbor.
961
+ For ``metric='precomputed'`` the shape should be
962
+ (n_queries, n_indexed). Otherwise the shape should be
963
+ (n_queries, n_features).
964
+
965
+ n_neighbors : int, default=None
966
+ Number of neighbors for each sample. The default is the value
967
+ passed to the constructor.
968
+
969
+ mode : {'connectivity', 'distance'}, default='connectivity'
970
+ Type of returned matrix: 'connectivity' will return the
971
+ connectivity matrix with ones and zeros, in 'distance' the
972
+ edges are distances between points, type of distance
973
+ depends on the selected metric parameter in
974
+ NearestNeighbors class.
975
+
976
+ Returns
977
+ -------
978
+ A : sparse-matrix of shape (n_queries, n_samples_fit)
979
+ `n_samples_fit` is the number of samples in the fitted data.
980
+ `A[i, j]` gives the weight of the edge connecting `i` to `j`.
981
+ The matrix is of CSR format.
982
+
983
+ See Also
984
+ --------
985
+ NearestNeighbors.radius_neighbors_graph : Compute the (weighted) graph
986
+ of Neighbors for points in X.
987
+
988
+ Examples
989
+ --------
990
+ >>> X = [[0], [3], [1]]
991
+ >>> from sklearn.neighbors import NearestNeighbors
992
+ >>> neigh = NearestNeighbors(n_neighbors=2)
993
+ >>> neigh.fit(X)
994
+ NearestNeighbors(n_neighbors=2)
995
+ >>> A = neigh.kneighbors_graph(X)
996
+ >>> A.toarray()
997
+ array([[1., 0., 1.],
998
+ [0., 1., 1.],
999
+ [1., 0., 1.]])
1000
+ """
1001
+ check_is_fitted(self)
1002
+ if n_neighbors is None:
1003
+ n_neighbors = self.n_neighbors
1004
+
1005
+ # check the input only in self.kneighbors
1006
+
1007
+ # construct CSR matrix representation of the k-NN graph
1008
+ if mode == "connectivity":
1009
+ A_ind = self.kneighbors(X, n_neighbors, return_distance=False)
1010
+ n_queries = A_ind.shape[0]
1011
+ A_data = np.ones(n_queries * n_neighbors)
1012
+
1013
+ elif mode == "distance":
1014
+ A_data, A_ind = self.kneighbors(X, n_neighbors, return_distance=True)
1015
+ A_data = np.ravel(A_data)
1016
+
1017
+ else:
1018
+ raise ValueError(
1019
+ 'Unsupported mode, must be one of "connectivity", '
1020
+ f'or "distance" but got "{mode}" instead'
1021
+ )
1022
+
1023
+ n_queries = A_ind.shape[0]
1024
+ n_samples_fit = self.n_samples_fit_
1025
+ n_nonzero = n_queries * n_neighbors
1026
+ A_indptr = np.arange(0, n_nonzero + 1, n_neighbors)
1027
+
1028
+ kneighbors_graph = csr_matrix(
1029
+ (A_data, A_ind.ravel(), A_indptr), shape=(n_queries, n_samples_fit)
1030
+ )
1031
+
1032
+ return kneighbors_graph
1033
+
1034
+
1035
+ def _tree_query_radius_parallel_helper(tree, *args, **kwargs):
1036
+ """Helper for the Parallel calls in RadiusNeighborsMixin.radius_neighbors.
1037
+
1038
+ The Cython method tree.query_radius is not directly picklable by
1039
+ cloudpickle under PyPy.
1040
+ """
1041
+ return tree.query_radius(*args, **kwargs)
1042
+
1043
+
1044
+ class RadiusNeighborsMixin:
1045
+ """Mixin for radius-based neighbors searches."""
1046
+
1047
+ def _radius_neighbors_reduce_func(self, dist, start, radius, return_distance):
1048
+ """Reduce a chunk of distances to the nearest neighbors.
1049
+
1050
+ Callback to :func:`sklearn.metrics.pairwise.pairwise_distances_chunked`
1051
+
1052
+ Parameters
1053
+ ----------
1054
+ dist : ndarray of shape (n_samples_chunk, n_samples)
1055
+ The distance matrix.
1056
+
1057
+ start : int
1058
+ The index in X which the first row of dist corresponds to.
1059
+
1060
+ radius : float
1061
+ The radius considered when making the nearest neighbors search.
1062
+
1063
+ return_distance : bool
1064
+ Whether or not to return the distances.
1065
+
1066
+ Returns
1067
+ -------
1068
+ dist : list of ndarray of shape (n_samples_chunk,)
1069
+ Returned only if `return_distance=True`.
1070
+
1071
+ neigh : list of ndarray of shape (n_samples_chunk,)
1072
+ The neighbors indices.
1073
+ """
1074
+ neigh_ind = [np.where(d <= radius)[0] for d in dist]
1075
+
1076
+ if return_distance:
1077
+ if self.effective_metric_ == "euclidean":
1078
+ dist = [np.sqrt(d[neigh_ind[i]]) for i, d in enumerate(dist)]
1079
+ else:
1080
+ dist = [d[neigh_ind[i]] for i, d in enumerate(dist)]
1081
+ results = dist, neigh_ind
1082
+ else:
1083
+ results = neigh_ind
1084
+ return results
1085
+
1086
+ def radius_neighbors(
1087
+ self, X=None, radius=None, return_distance=True, sort_results=False
1088
+ ):
1089
+ """Find the neighbors within a given radius of a point or points.
1090
+
1091
+ Return the indices and distances of each point from the dataset
1092
+ lying in a ball with size ``radius`` around the points of the query
1093
+ array. Points lying on the boundary are included in the results.
1094
+
1095
+ The result points are *not* necessarily sorted by distance to their
1096
+ query point.
1097
+
1098
+ Parameters
1099
+ ----------
1100
+ X : {array-like, sparse matrix} of (n_samples, n_features), default=None
1101
+ The query point or points.
1102
+ If not provided, neighbors of each indexed point are returned.
1103
+ In this case, the query point is not considered its own neighbor.
1104
+
1105
+ radius : float, default=None
1106
+ Limiting distance of neighbors to return. The default is the value
1107
+ passed to the constructor.
1108
+
1109
+ return_distance : bool, default=True
1110
+ Whether or not to return the distances.
1111
+
1112
+ sort_results : bool, default=False
1113
+ If True, the distances and indices will be sorted by increasing
1114
+ distances before being returned. If False, the results may not
1115
+ be sorted. If `return_distance=False`, setting `sort_results=True`
1116
+ will result in an error.
1117
+
1118
+ .. versionadded:: 0.22
1119
+
1120
+ Returns
1121
+ -------
1122
+ neigh_dist : ndarray of shape (n_samples,) of arrays
1123
+ Array representing the distances to each point, only present if
1124
+ `return_distance=True`. The distance values are computed according
1125
+ to the ``metric`` constructor parameter.
1126
+
1127
+ neigh_ind : ndarray of shape (n_samples,) of arrays
1128
+ An array of arrays of indices of the approximate nearest points
1129
+ from the population matrix that lie within a ball of size
1130
+ ``radius`` around the query points.
1131
+
1132
+ Notes
1133
+ -----
1134
+ Because the number of neighbors of each point is not necessarily
1135
+ equal, the results for multiple query points cannot be fit in a
1136
+ standard data array.
1137
+ For efficiency, `radius_neighbors` returns arrays of objects, where
1138
+ each object is a 1D array of indices or distances.
1139
+
1140
+ Examples
1141
+ --------
1142
+ In the following example, we construct a NeighborsClassifier
1143
+ class from an array representing our data set and ask who's
1144
+ the closest point to [1, 1, 1]:
1145
+
1146
+ >>> import numpy as np
1147
+ >>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]]
1148
+ >>> from sklearn.neighbors import NearestNeighbors
1149
+ >>> neigh = NearestNeighbors(radius=1.6)
1150
+ >>> neigh.fit(samples)
1151
+ NearestNeighbors(radius=1.6)
1152
+ >>> rng = neigh.radius_neighbors([[1., 1., 1.]])
1153
+ >>> print(np.asarray(rng[0][0]))
1154
+ [1.5 0.5]
1155
+ >>> print(np.asarray(rng[1][0]))
1156
+ [1 2]
1157
+
1158
+ The first array returned contains the distances to all points which
1159
+ are closer than 1.6, while the second array returned contains their
1160
+ indices. In general, multiple points can be queried at the same time.
1161
+ """
1162
+ check_is_fitted(self)
1163
+
1164
+ if sort_results and not return_distance:
1165
+ raise ValueError("return_distance must be True if sort_results is True.")
1166
+
1167
+ query_is_train = X is None
1168
+ if query_is_train:
1169
+ X = self._fit_X
1170
+ else:
1171
+ if self.metric == "precomputed":
1172
+ X = _check_precomputed(X)
1173
+ else:
1174
+ X = self._validate_data(X, accept_sparse="csr", reset=False, order="C")
1175
+
1176
+ if radius is None:
1177
+ radius = self.radius
1178
+
1179
+ use_pairwise_distances_reductions = (
1180
+ self._fit_method == "brute"
1181
+ and RadiusNeighbors.is_usable_for(
1182
+ X if X is not None else self._fit_X, self._fit_X, self.effective_metric_
1183
+ )
1184
+ )
1185
+
1186
+ if use_pairwise_distances_reductions:
1187
+ results = RadiusNeighbors.compute(
1188
+ X=X,
1189
+ Y=self._fit_X,
1190
+ radius=radius,
1191
+ metric=self.effective_metric_,
1192
+ metric_kwargs=self.effective_metric_params_,
1193
+ strategy="auto",
1194
+ return_distance=return_distance,
1195
+ sort_results=sort_results,
1196
+ )
1197
+
1198
+ elif (
1199
+ self._fit_method == "brute" and self.metric == "precomputed" and issparse(X)
1200
+ ):
1201
+ results = _radius_neighbors_from_graph(
1202
+ X, radius=radius, return_distance=return_distance
1203
+ )
1204
+
1205
+ elif self._fit_method == "brute":
1206
+ # Joblib-based backend, which is used when user-defined callable
1207
+ # are passed for metric.
1208
+
1209
+ # This won't be used in the future once PairwiseDistancesReductions
1210
+ # support:
1211
+ # - DistanceMetrics which work on supposedly binary data
1212
+ # - CSR-dense and dense-CSR case if 'euclidean' in metric.
1213
+
1214
+ # for efficiency, use squared euclidean distances
1215
+ if self.effective_metric_ == "euclidean":
1216
+ radius *= radius
1217
+ kwds = {"squared": True}
1218
+ else:
1219
+ kwds = self.effective_metric_params_
1220
+
1221
+ reduce_func = partial(
1222
+ self._radius_neighbors_reduce_func,
1223
+ radius=radius,
1224
+ return_distance=return_distance,
1225
+ )
1226
+
1227
+ chunked_results = pairwise_distances_chunked(
1228
+ X,
1229
+ self._fit_X,
1230
+ reduce_func=reduce_func,
1231
+ metric=self.effective_metric_,
1232
+ n_jobs=self.n_jobs,
1233
+ **kwds,
1234
+ )
1235
+ if return_distance:
1236
+ neigh_dist_chunks, neigh_ind_chunks = zip(*chunked_results)
1237
+ neigh_dist_list = sum(neigh_dist_chunks, [])
1238
+ neigh_ind_list = sum(neigh_ind_chunks, [])
1239
+ neigh_dist = _to_object_array(neigh_dist_list)
1240
+ neigh_ind = _to_object_array(neigh_ind_list)
1241
+ results = neigh_dist, neigh_ind
1242
+ else:
1243
+ neigh_ind_list = sum(chunked_results, [])
1244
+ results = _to_object_array(neigh_ind_list)
1245
+
1246
+ if sort_results:
1247
+ for ii in range(len(neigh_dist)):
1248
+ order = np.argsort(neigh_dist[ii], kind="mergesort")
1249
+ neigh_ind[ii] = neigh_ind[ii][order]
1250
+ neigh_dist[ii] = neigh_dist[ii][order]
1251
+ results = neigh_dist, neigh_ind
1252
+
1253
+ elif self._fit_method in ["ball_tree", "kd_tree"]:
1254
+ if issparse(X):
1255
+ raise ValueError(
1256
+ "%s does not work with sparse matrices. Densify the data, "
1257
+ "or set algorithm='brute'"
1258
+ % self._fit_method
1259
+ )
1260
+
1261
+ n_jobs = effective_n_jobs(self.n_jobs)
1262
+ delayed_query = delayed(_tree_query_radius_parallel_helper)
1263
+ chunked_results = Parallel(n_jobs, prefer="threads")(
1264
+ delayed_query(
1265
+ self._tree, X[s], radius, return_distance, sort_results=sort_results
1266
+ )
1267
+ for s in gen_even_slices(X.shape[0], n_jobs)
1268
+ )
1269
+ if return_distance:
1270
+ neigh_ind, neigh_dist = tuple(zip(*chunked_results))
1271
+ results = np.hstack(neigh_dist), np.hstack(neigh_ind)
1272
+ else:
1273
+ results = np.hstack(chunked_results)
1274
+ else:
1275
+ raise ValueError("internal: _fit_method not recognized")
1276
+
1277
+ if not query_is_train:
1278
+ return results
1279
+ else:
1280
+ # If the query data is the same as the indexed data, we would like
1281
+ # to ignore the first nearest neighbor of every sample, i.e
1282
+ # the sample itself.
1283
+ if return_distance:
1284
+ neigh_dist, neigh_ind = results
1285
+ else:
1286
+ neigh_ind = results
1287
+
1288
+ for ind, ind_neighbor in enumerate(neigh_ind):
1289
+ mask = ind_neighbor != ind
1290
+
1291
+ neigh_ind[ind] = ind_neighbor[mask]
1292
+ if return_distance:
1293
+ neigh_dist[ind] = neigh_dist[ind][mask]
1294
+
1295
+ if return_distance:
1296
+ return neigh_dist, neigh_ind
1297
+ return neigh_ind
1298
+
1299
+ def radius_neighbors_graph(
1300
+ self, X=None, radius=None, mode="connectivity", sort_results=False
1301
+ ):
1302
+ """Compute the (weighted) graph of Neighbors for points in X.
1303
+
1304
+ Neighborhoods are restricted the points at a distance lower than
1305
+ radius.
1306
+
1307
+ Parameters
1308
+ ----------
1309
+ X : {array-like, sparse matrix} of shape (n_samples, n_features), default=None
1310
+ The query point or points.
1311
+ If not provided, neighbors of each indexed point are returned.
1312
+ In this case, the query point is not considered its own neighbor.
1313
+
1314
+ radius : float, default=None
1315
+ Radius of neighborhoods. The default is the value passed to the
1316
+ constructor.
1317
+
1318
+ mode : {'connectivity', 'distance'}, default='connectivity'
1319
+ Type of returned matrix: 'connectivity' will return the
1320
+ connectivity matrix with ones and zeros, in 'distance' the
1321
+ edges are distances between points, type of distance
1322
+ depends on the selected metric parameter in
1323
+ NearestNeighbors class.
1324
+
1325
+ sort_results : bool, default=False
1326
+ If True, in each row of the result, the non-zero entries will be
1327
+ sorted by increasing distances. If False, the non-zero entries may
1328
+ not be sorted. Only used with mode='distance'.
1329
+
1330
+ .. versionadded:: 0.22
1331
+
1332
+ Returns
1333
+ -------
1334
+ A : sparse-matrix of shape (n_queries, n_samples_fit)
1335
+ `n_samples_fit` is the number of samples in the fitted data.
1336
+ `A[i, j]` gives the weight of the edge connecting `i` to `j`.
1337
+ The matrix is of CSR format.
1338
+
1339
+ See Also
1340
+ --------
1341
+ kneighbors_graph : Compute the (weighted) graph of k-Neighbors for
1342
+ points in X.
1343
+
1344
+ Examples
1345
+ --------
1346
+ >>> X = [[0], [3], [1]]
1347
+ >>> from sklearn.neighbors import NearestNeighbors
1348
+ >>> neigh = NearestNeighbors(radius=1.5)
1349
+ >>> neigh.fit(X)
1350
+ NearestNeighbors(radius=1.5)
1351
+ >>> A = neigh.radius_neighbors_graph(X)
1352
+ >>> A.toarray()
1353
+ array([[1., 0., 1.],
1354
+ [0., 1., 0.],
1355
+ [1., 0., 1.]])
1356
+ """
1357
+ check_is_fitted(self)
1358
+
1359
+ # check the input only in self.radius_neighbors
1360
+
1361
+ if radius is None:
1362
+ radius = self.radius
1363
+
1364
+ # construct CSR matrix representation of the NN graph
1365
+ if mode == "connectivity":
1366
+ A_ind = self.radius_neighbors(X, radius, return_distance=False)
1367
+ A_data = None
1368
+ elif mode == "distance":
1369
+ dist, A_ind = self.radius_neighbors(
1370
+ X, radius, return_distance=True, sort_results=sort_results
1371
+ )
1372
+ A_data = np.concatenate(list(dist))
1373
+ else:
1374
+ raise ValueError(
1375
+ 'Unsupported mode, must be one of "connectivity", '
1376
+ f'or "distance" but got "{mode}" instead'
1377
+ )
1378
+
1379
+ n_queries = A_ind.shape[0]
1380
+ n_samples_fit = self.n_samples_fit_
1381
+ n_neighbors = np.array([len(a) for a in A_ind])
1382
+ A_ind = np.concatenate(list(A_ind))
1383
+ if A_data is None:
1384
+ A_data = np.ones(len(A_ind))
1385
+ A_indptr = np.concatenate((np.zeros(1, dtype=int), np.cumsum(n_neighbors)))
1386
+
1387
+ return csr_matrix((A_data, A_ind, A_indptr), shape=(n_queries, n_samples_fit))
env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/_classification.py ADDED
@@ -0,0 +1,839 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Nearest Neighbor Classification"""
2
+
3
+ # Authors: Jake Vanderplas <[email protected]>
4
+ # Fabian Pedregosa <[email protected]>
5
+ # Alexandre Gramfort <[email protected]>
6
+ # Sparseness support by Lars Buitinck
7
+ # Multi-output support by Arnaud Joly <[email protected]>
8
+ #
9
+ # License: BSD 3 clause (C) INRIA, University of Amsterdam
10
+ import warnings
11
+ from numbers import Integral
12
+
13
+ import numpy as np
14
+
15
+ from sklearn.neighbors._base import _check_precomputed
16
+
17
+ from ..base import ClassifierMixin, _fit_context
18
+ from ..metrics._pairwise_distances_reduction import (
19
+ ArgKminClassMode,
20
+ RadiusNeighborsClassMode,
21
+ )
22
+ from ..utils._param_validation import StrOptions
23
+ from ..utils.arrayfuncs import _all_with_any_reduction_axis_1
24
+ from ..utils.extmath import weighted_mode
25
+ from ..utils.fixes import _mode
26
+ from ..utils.validation import _is_arraylike, _num_samples, check_is_fitted
27
+ from ._base import KNeighborsMixin, NeighborsBase, RadiusNeighborsMixin, _get_weights
28
+
29
+
30
+ def _adjusted_metric(metric, metric_kwargs, p=None):
31
+ metric_kwargs = metric_kwargs or {}
32
+ if metric == "minkowski":
33
+ metric_kwargs["p"] = p
34
+ if p == 2:
35
+ metric = "euclidean"
36
+ return metric, metric_kwargs
37
+
38
+
39
+ class KNeighborsClassifier(KNeighborsMixin, ClassifierMixin, NeighborsBase):
40
+ """Classifier implementing the k-nearest neighbors vote.
41
+
42
+ Read more in the :ref:`User Guide <classification>`.
43
+
44
+ Parameters
45
+ ----------
46
+ n_neighbors : int, default=5
47
+ Number of neighbors to use by default for :meth:`kneighbors` queries.
48
+
49
+ weights : {'uniform', 'distance'}, callable or None, default='uniform'
50
+ Weight function used in prediction. Possible values:
51
+
52
+ - 'uniform' : uniform weights. All points in each neighborhood
53
+ are weighted equally.
54
+ - 'distance' : weight points by the inverse of their distance.
55
+ in this case, closer neighbors of a query point will have a
56
+ greater influence than neighbors which are further away.
57
+ - [callable] : a user-defined function which accepts an
58
+ array of distances, and returns an array of the same shape
59
+ containing the weights.
60
+
61
+ Refer to the example entitled
62
+ :ref:`sphx_glr_auto_examples_neighbors_plot_classification.py`
63
+ showing the impact of the `weights` parameter on the decision
64
+ boundary.
65
+
66
+ algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto'
67
+ Algorithm used to compute the nearest neighbors:
68
+
69
+ - 'ball_tree' will use :class:`BallTree`
70
+ - 'kd_tree' will use :class:`KDTree`
71
+ - 'brute' will use a brute-force search.
72
+ - 'auto' will attempt to decide the most appropriate algorithm
73
+ based on the values passed to :meth:`fit` method.
74
+
75
+ Note: fitting on sparse input will override the setting of
76
+ this parameter, using brute force.
77
+
78
+ leaf_size : int, default=30
79
+ Leaf size passed to BallTree or KDTree. This can affect the
80
+ speed of the construction and query, as well as the memory
81
+ required to store the tree. The optimal value depends on the
82
+ nature of the problem.
83
+
84
+ p : float, default=2
85
+ Power parameter for the Minkowski metric. When p = 1, this is equivalent
86
+ to using manhattan_distance (l1), and euclidean_distance (l2) for p = 2.
87
+ For arbitrary p, minkowski_distance (l_p) is used. This parameter is expected
88
+ to be positive.
89
+
90
+ metric : str or callable, default='minkowski'
91
+ Metric to use for distance computation. Default is "minkowski", which
92
+ results in the standard Euclidean distance when p = 2. See the
93
+ documentation of `scipy.spatial.distance
94
+ <https://docs.scipy.org/doc/scipy/reference/spatial.distance.html>`_ and
95
+ the metrics listed in
96
+ :class:`~sklearn.metrics.pairwise.distance_metrics` for valid metric
97
+ values.
98
+
99
+ If metric is "precomputed", X is assumed to be a distance matrix and
100
+ must be square during fit. X may be a :term:`sparse graph`, in which
101
+ case only "nonzero" elements may be considered neighbors.
102
+
103
+ If metric is a callable function, it takes two arrays representing 1D
104
+ vectors as inputs and must return one value indicating the distance
105
+ between those vectors. This works for Scipy's metrics, but is less
106
+ efficient than passing the metric name as a string.
107
+
108
+ metric_params : dict, default=None
109
+ Additional keyword arguments for the metric function.
110
+
111
+ n_jobs : int, default=None
112
+ The number of parallel jobs to run for neighbors search.
113
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
114
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
115
+ for more details.
116
+ Doesn't affect :meth:`fit` method.
117
+
118
+ Attributes
119
+ ----------
120
+ classes_ : array of shape (n_classes,)
121
+ Class labels known to the classifier
122
+
123
+ effective_metric_ : str or callble
124
+ The distance metric used. It will be same as the `metric` parameter
125
+ or a synonym of it, e.g. 'euclidean' if the `metric` parameter set to
126
+ 'minkowski' and `p` parameter set to 2.
127
+
128
+ effective_metric_params_ : dict
129
+ Additional keyword arguments for the metric function. For most metrics
130
+ will be same with `metric_params` parameter, but may also contain the
131
+ `p` parameter value if the `effective_metric_` attribute is set to
132
+ 'minkowski'.
133
+
134
+ n_features_in_ : int
135
+ Number of features seen during :term:`fit`.
136
+
137
+ .. versionadded:: 0.24
138
+
139
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
140
+ Names of features seen during :term:`fit`. Defined only when `X`
141
+ has feature names that are all strings.
142
+
143
+ .. versionadded:: 1.0
144
+
145
+ n_samples_fit_ : int
146
+ Number of samples in the fitted data.
147
+
148
+ outputs_2d_ : bool
149
+ False when `y`'s shape is (n_samples, ) or (n_samples, 1) during fit
150
+ otherwise True.
151
+
152
+ See Also
153
+ --------
154
+ RadiusNeighborsClassifier: Classifier based on neighbors within a fixed radius.
155
+ KNeighborsRegressor: Regression based on k-nearest neighbors.
156
+ RadiusNeighborsRegressor: Regression based on neighbors within a fixed radius.
157
+ NearestNeighbors: Unsupervised learner for implementing neighbor searches.
158
+
159
+ Notes
160
+ -----
161
+ See :ref:`Nearest Neighbors <neighbors>` in the online documentation
162
+ for a discussion of the choice of ``algorithm`` and ``leaf_size``.
163
+
164
+ .. warning::
165
+
166
+ Regarding the Nearest Neighbors algorithms, if it is found that two
167
+ neighbors, neighbor `k+1` and `k`, have identical distances
168
+ but different labels, the results will depend on the ordering of the
169
+ training data.
170
+
171
+ https://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
172
+
173
+ Examples
174
+ --------
175
+ >>> X = [[0], [1], [2], [3]]
176
+ >>> y = [0, 0, 1, 1]
177
+ >>> from sklearn.neighbors import KNeighborsClassifier
178
+ >>> neigh = KNeighborsClassifier(n_neighbors=3)
179
+ >>> neigh.fit(X, y)
180
+ KNeighborsClassifier(...)
181
+ >>> print(neigh.predict([[1.1]]))
182
+ [0]
183
+ >>> print(neigh.predict_proba([[0.9]]))
184
+ [[0.666... 0.333...]]
185
+ """
186
+
187
+ _parameter_constraints: dict = {**NeighborsBase._parameter_constraints}
188
+ _parameter_constraints.pop("radius")
189
+ _parameter_constraints.update(
190
+ {"weights": [StrOptions({"uniform", "distance"}), callable, None]}
191
+ )
192
+
193
+ def __init__(
194
+ self,
195
+ n_neighbors=5,
196
+ *,
197
+ weights="uniform",
198
+ algorithm="auto",
199
+ leaf_size=30,
200
+ p=2,
201
+ metric="minkowski",
202
+ metric_params=None,
203
+ n_jobs=None,
204
+ ):
205
+ super().__init__(
206
+ n_neighbors=n_neighbors,
207
+ algorithm=algorithm,
208
+ leaf_size=leaf_size,
209
+ metric=metric,
210
+ p=p,
211
+ metric_params=metric_params,
212
+ n_jobs=n_jobs,
213
+ )
214
+ self.weights = weights
215
+
216
+ @_fit_context(
217
+ # KNeighborsClassifier.metric is not validated yet
218
+ prefer_skip_nested_validation=False
219
+ )
220
+ def fit(self, X, y):
221
+ """Fit the k-nearest neighbors classifier from the training dataset.
222
+
223
+ Parameters
224
+ ----------
225
+ X : {array-like, sparse matrix} of shape (n_samples, n_features) or \
226
+ (n_samples, n_samples) if metric='precomputed'
227
+ Training data.
228
+
229
+ y : {array-like, sparse matrix} of shape (n_samples,) or \
230
+ (n_samples, n_outputs)
231
+ Target values.
232
+
233
+ Returns
234
+ -------
235
+ self : KNeighborsClassifier
236
+ The fitted k-nearest neighbors classifier.
237
+ """
238
+ return self._fit(X, y)
239
+
240
+ def predict(self, X):
241
+ """Predict the class labels for the provided data.
242
+
243
+ Parameters
244
+ ----------
245
+ X : {array-like, sparse matrix} of shape (n_queries, n_features), \
246
+ or (n_queries, n_indexed) if metric == 'precomputed'
247
+ Test samples.
248
+
249
+ Returns
250
+ -------
251
+ y : ndarray of shape (n_queries,) or (n_queries, n_outputs)
252
+ Class labels for each data sample.
253
+ """
254
+ check_is_fitted(self, "_fit_method")
255
+ if self.weights == "uniform":
256
+ if self._fit_method == "brute" and ArgKminClassMode.is_usable_for(
257
+ X, self._fit_X, self.metric
258
+ ):
259
+ probabilities = self.predict_proba(X)
260
+ if self.outputs_2d_:
261
+ return np.stack(
262
+ [
263
+ self.classes_[idx][np.argmax(probas, axis=1)]
264
+ for idx, probas in enumerate(probabilities)
265
+ ],
266
+ axis=1,
267
+ )
268
+ return self.classes_[np.argmax(probabilities, axis=1)]
269
+ # In that case, we do not need the distances to perform
270
+ # the weighting so we do not compute them.
271
+ neigh_ind = self.kneighbors(X, return_distance=False)
272
+ neigh_dist = None
273
+ else:
274
+ neigh_dist, neigh_ind = self.kneighbors(X)
275
+
276
+ classes_ = self.classes_
277
+ _y = self._y
278
+ if not self.outputs_2d_:
279
+ _y = self._y.reshape((-1, 1))
280
+ classes_ = [self.classes_]
281
+
282
+ n_outputs = len(classes_)
283
+ n_queries = _num_samples(X)
284
+ weights = _get_weights(neigh_dist, self.weights)
285
+ if weights is not None and _all_with_any_reduction_axis_1(weights, value=0):
286
+ raise ValueError(
287
+ "All neighbors of some sample is getting zero weights. "
288
+ "Please modify 'weights' to avoid this case if you are "
289
+ "using a user-defined function."
290
+ )
291
+
292
+ y_pred = np.empty((n_queries, n_outputs), dtype=classes_[0].dtype)
293
+ for k, classes_k in enumerate(classes_):
294
+ if weights is None:
295
+ mode, _ = _mode(_y[neigh_ind, k], axis=1)
296
+ else:
297
+ mode, _ = weighted_mode(_y[neigh_ind, k], weights, axis=1)
298
+
299
+ mode = np.asarray(mode.ravel(), dtype=np.intp)
300
+ y_pred[:, k] = classes_k.take(mode)
301
+
302
+ if not self.outputs_2d_:
303
+ y_pred = y_pred.ravel()
304
+
305
+ return y_pred
306
+
307
+ def predict_proba(self, X):
308
+ """Return probability estimates for the test data X.
309
+
310
+ Parameters
311
+ ----------
312
+ X : {array-like, sparse matrix} of shape (n_queries, n_features), \
313
+ or (n_queries, n_indexed) if metric == 'precomputed'
314
+ Test samples.
315
+
316
+ Returns
317
+ -------
318
+ p : ndarray of shape (n_queries, n_classes), or a list of n_outputs \
319
+ of such arrays if n_outputs > 1.
320
+ The class probabilities of the input samples. Classes are ordered
321
+ by lexicographic order.
322
+ """
323
+ check_is_fitted(self, "_fit_method")
324
+ if self.weights == "uniform":
325
+ # TODO: systematize this mapping of metric for
326
+ # PairwiseDistancesReductions.
327
+ metric, metric_kwargs = _adjusted_metric(
328
+ metric=self.metric, metric_kwargs=self.metric_params, p=self.p
329
+ )
330
+ if (
331
+ self._fit_method == "brute"
332
+ and ArgKminClassMode.is_usable_for(X, self._fit_X, metric)
333
+ # TODO: Implement efficient multi-output solution
334
+ and not self.outputs_2d_
335
+ ):
336
+ if self.metric == "precomputed":
337
+ X = _check_precomputed(X)
338
+ else:
339
+ X = self._validate_data(
340
+ X, accept_sparse="csr", reset=False, order="C"
341
+ )
342
+
343
+ probabilities = ArgKminClassMode.compute(
344
+ X,
345
+ self._fit_X,
346
+ k=self.n_neighbors,
347
+ weights=self.weights,
348
+ Y_labels=self._y,
349
+ unique_Y_labels=self.classes_,
350
+ metric=metric,
351
+ metric_kwargs=metric_kwargs,
352
+ # `strategy="parallel_on_X"` has in practice be shown
353
+ # to be more efficient than `strategy="parallel_on_Y``
354
+ # on many combination of datasets.
355
+ # Hence, we choose to enforce it here.
356
+ # For more information, see:
357
+ # https://github.com/scikit-learn/scikit-learn/pull/24076#issuecomment-1445258342 # noqa
358
+ # TODO: adapt the heuristic for `strategy="auto"` for
359
+ # `ArgKminClassMode` and use `strategy="auto"`.
360
+ strategy="parallel_on_X",
361
+ )
362
+ return probabilities
363
+
364
+ # In that case, we do not need the distances to perform
365
+ # the weighting so we do not compute them.
366
+ neigh_ind = self.kneighbors(X, return_distance=False)
367
+ neigh_dist = None
368
+ else:
369
+ neigh_dist, neigh_ind = self.kneighbors(X)
370
+
371
+ classes_ = self.classes_
372
+ _y = self._y
373
+ if not self.outputs_2d_:
374
+ _y = self._y.reshape((-1, 1))
375
+ classes_ = [self.classes_]
376
+
377
+ n_queries = _num_samples(X)
378
+
379
+ weights = _get_weights(neigh_dist, self.weights)
380
+ if weights is None:
381
+ weights = np.ones_like(neigh_ind)
382
+ elif _all_with_any_reduction_axis_1(weights, value=0):
383
+ raise ValueError(
384
+ "All neighbors of some sample is getting zero weights. "
385
+ "Please modify 'weights' to avoid this case if you are "
386
+ "using a user-defined function."
387
+ )
388
+
389
+ all_rows = np.arange(n_queries)
390
+ probabilities = []
391
+ for k, classes_k in enumerate(classes_):
392
+ pred_labels = _y[:, k][neigh_ind]
393
+ proba_k = np.zeros((n_queries, classes_k.size))
394
+
395
+ # a simple ':' index doesn't work right
396
+ for i, idx in enumerate(pred_labels.T): # loop is O(n_neighbors)
397
+ proba_k[all_rows, idx] += weights[:, i]
398
+
399
+ # normalize 'votes' into real [0,1] probabilities
400
+ normalizer = proba_k.sum(axis=1)[:, np.newaxis]
401
+ proba_k /= normalizer
402
+
403
+ probabilities.append(proba_k)
404
+
405
+ if not self.outputs_2d_:
406
+ probabilities = probabilities[0]
407
+
408
+ return probabilities
409
+
410
+ def _more_tags(self):
411
+ return {"multilabel": True}
412
+
413
+
414
+ class RadiusNeighborsClassifier(RadiusNeighborsMixin, ClassifierMixin, NeighborsBase):
415
+ """Classifier implementing a vote among neighbors within a given radius.
416
+
417
+ Read more in the :ref:`User Guide <classification>`.
418
+
419
+ Parameters
420
+ ----------
421
+ radius : float, default=1.0
422
+ Range of parameter space to use by default for :meth:`radius_neighbors`
423
+ queries.
424
+
425
+ weights : {'uniform', 'distance'}, callable or None, default='uniform'
426
+ Weight function used in prediction. Possible values:
427
+
428
+ - 'uniform' : uniform weights. All points in each neighborhood
429
+ are weighted equally.
430
+ - 'distance' : weight points by the inverse of their distance.
431
+ in this case, closer neighbors of a query point will have a
432
+ greater influence than neighbors which are further away.
433
+ - [callable] : a user-defined function which accepts an
434
+ array of distances, and returns an array of the same shape
435
+ containing the weights.
436
+
437
+ Uniform weights are used by default.
438
+
439
+ algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto'
440
+ Algorithm used to compute the nearest neighbors:
441
+
442
+ - 'ball_tree' will use :class:`BallTree`
443
+ - 'kd_tree' will use :class:`KDTree`
444
+ - 'brute' will use a brute-force search.
445
+ - 'auto' will attempt to decide the most appropriate algorithm
446
+ based on the values passed to :meth:`fit` method.
447
+
448
+ Note: fitting on sparse input will override the setting of
449
+ this parameter, using brute force.
450
+
451
+ leaf_size : int, default=30
452
+ Leaf size passed to BallTree or KDTree. This can affect the
453
+ speed of the construction and query, as well as the memory
454
+ required to store the tree. The optimal value depends on the
455
+ nature of the problem.
456
+
457
+ p : float, default=2
458
+ Power parameter for the Minkowski metric. When p = 1, this is
459
+ equivalent to using manhattan_distance (l1), and euclidean_distance
460
+ (l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
461
+ This parameter is expected to be positive.
462
+
463
+ metric : str or callable, default='minkowski'
464
+ Metric to use for distance computation. Default is "minkowski", which
465
+ results in the standard Euclidean distance when p = 2. See the
466
+ documentation of `scipy.spatial.distance
467
+ <https://docs.scipy.org/doc/scipy/reference/spatial.distance.html>`_ and
468
+ the metrics listed in
469
+ :class:`~sklearn.metrics.pairwise.distance_metrics` for valid metric
470
+ values.
471
+
472
+ If metric is "precomputed", X is assumed to be a distance matrix and
473
+ must be square during fit. X may be a :term:`sparse graph`, in which
474
+ case only "nonzero" elements may be considered neighbors.
475
+
476
+ If metric is a callable function, it takes two arrays representing 1D
477
+ vectors as inputs and must return one value indicating the distance
478
+ between those vectors. This works for Scipy's metrics, but is less
479
+ efficient than passing the metric name as a string.
480
+
481
+ outlier_label : {manual label, 'most_frequent'}, default=None
482
+ Label for outlier samples (samples with no neighbors in given radius).
483
+
484
+ - manual label: str or int label (should be the same type as y)
485
+ or list of manual labels if multi-output is used.
486
+ - 'most_frequent' : assign the most frequent label of y to outliers.
487
+ - None : when any outlier is detected, ValueError will be raised.
488
+
489
+ The outlier label should be selected from among the unique 'Y' labels.
490
+ If it is specified with a different value a warning will be raised and
491
+ all class probabilities of outliers will be assigned to be 0.
492
+
493
+ metric_params : dict, default=None
494
+ Additional keyword arguments for the metric function.
495
+
496
+ n_jobs : int, default=None
497
+ The number of parallel jobs to run for neighbors search.
498
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
499
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
500
+ for more details.
501
+
502
+ Attributes
503
+ ----------
504
+ classes_ : ndarray of shape (n_classes,)
505
+ Class labels known to the classifier.
506
+
507
+ effective_metric_ : str or callable
508
+ The distance metric used. It will be same as the `metric` parameter
509
+ or a synonym of it, e.g. 'euclidean' if the `metric` parameter set to
510
+ 'minkowski' and `p` parameter set to 2.
511
+
512
+ effective_metric_params_ : dict
513
+ Additional keyword arguments for the metric function. For most metrics
514
+ will be same with `metric_params` parameter, but may also contain the
515
+ `p` parameter value if the `effective_metric_` attribute is set to
516
+ 'minkowski'.
517
+
518
+ n_features_in_ : int
519
+ Number of features seen during :term:`fit`.
520
+
521
+ .. versionadded:: 0.24
522
+
523
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
524
+ Names of features seen during :term:`fit`. Defined only when `X`
525
+ has feature names that are all strings.
526
+
527
+ .. versionadded:: 1.0
528
+
529
+ n_samples_fit_ : int
530
+ Number of samples in the fitted data.
531
+
532
+ outlier_label_ : int or array-like of shape (n_class,)
533
+ Label which is given for outlier samples (samples with no neighbors
534
+ on given radius).
535
+
536
+ outputs_2d_ : bool
537
+ False when `y`'s shape is (n_samples, ) or (n_samples, 1) during fit
538
+ otherwise True.
539
+
540
+ See Also
541
+ --------
542
+ KNeighborsClassifier : Classifier implementing the k-nearest neighbors
543
+ vote.
544
+ RadiusNeighborsRegressor : Regression based on neighbors within a
545
+ fixed radius.
546
+ KNeighborsRegressor : Regression based on k-nearest neighbors.
547
+ NearestNeighbors : Unsupervised learner for implementing neighbor
548
+ searches.
549
+
550
+ Notes
551
+ -----
552
+ See :ref:`Nearest Neighbors <neighbors>` in the online documentation
553
+ for a discussion of the choice of ``algorithm`` and ``leaf_size``.
554
+
555
+ https://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
556
+
557
+ Examples
558
+ --------
559
+ >>> X = [[0], [1], [2], [3]]
560
+ >>> y = [0, 0, 1, 1]
561
+ >>> from sklearn.neighbors import RadiusNeighborsClassifier
562
+ >>> neigh = RadiusNeighborsClassifier(radius=1.0)
563
+ >>> neigh.fit(X, y)
564
+ RadiusNeighborsClassifier(...)
565
+ >>> print(neigh.predict([[1.5]]))
566
+ [0]
567
+ >>> print(neigh.predict_proba([[1.0]]))
568
+ [[0.66666667 0.33333333]]
569
+ """
570
+
571
+ _parameter_constraints: dict = {
572
+ **NeighborsBase._parameter_constraints,
573
+ "weights": [StrOptions({"uniform", "distance"}), callable, None],
574
+ "outlier_label": [Integral, str, "array-like", None],
575
+ }
576
+ _parameter_constraints.pop("n_neighbors")
577
+
578
+ def __init__(
579
+ self,
580
+ radius=1.0,
581
+ *,
582
+ weights="uniform",
583
+ algorithm="auto",
584
+ leaf_size=30,
585
+ p=2,
586
+ metric="minkowski",
587
+ outlier_label=None,
588
+ metric_params=None,
589
+ n_jobs=None,
590
+ ):
591
+ super().__init__(
592
+ radius=radius,
593
+ algorithm=algorithm,
594
+ leaf_size=leaf_size,
595
+ metric=metric,
596
+ p=p,
597
+ metric_params=metric_params,
598
+ n_jobs=n_jobs,
599
+ )
600
+ self.weights = weights
601
+ self.outlier_label = outlier_label
602
+
603
+ @_fit_context(
604
+ # RadiusNeighborsClassifier.metric is not validated yet
605
+ prefer_skip_nested_validation=False
606
+ )
607
+ def fit(self, X, y):
608
+ """Fit the radius neighbors classifier from the training dataset.
609
+
610
+ Parameters
611
+ ----------
612
+ X : {array-like, sparse matrix} of shape (n_samples, n_features) or \
613
+ (n_samples, n_samples) if metric='precomputed'
614
+ Training data.
615
+
616
+ y : {array-like, sparse matrix} of shape (n_samples,) or \
617
+ (n_samples, n_outputs)
618
+ Target values.
619
+
620
+ Returns
621
+ -------
622
+ self : RadiusNeighborsClassifier
623
+ The fitted radius neighbors classifier.
624
+ """
625
+ self._fit(X, y)
626
+
627
+ classes_ = self.classes_
628
+ _y = self._y
629
+ if not self.outputs_2d_:
630
+ _y = self._y.reshape((-1, 1))
631
+ classes_ = [self.classes_]
632
+
633
+ if self.outlier_label is None:
634
+ outlier_label_ = None
635
+
636
+ elif self.outlier_label == "most_frequent":
637
+ outlier_label_ = []
638
+ # iterate over multi-output, get the most frequent label for each
639
+ # output.
640
+ for k, classes_k in enumerate(classes_):
641
+ label_count = np.bincount(_y[:, k])
642
+ outlier_label_.append(classes_k[label_count.argmax()])
643
+
644
+ else:
645
+ if _is_arraylike(self.outlier_label) and not isinstance(
646
+ self.outlier_label, str
647
+ ):
648
+ if len(self.outlier_label) != len(classes_):
649
+ raise ValueError(
650
+ "The length of outlier_label: {} is "
651
+ "inconsistent with the output "
652
+ "length: {}".format(self.outlier_label, len(classes_))
653
+ )
654
+ outlier_label_ = self.outlier_label
655
+ else:
656
+ outlier_label_ = [self.outlier_label] * len(classes_)
657
+
658
+ for classes, label in zip(classes_, outlier_label_):
659
+ if _is_arraylike(label) and not isinstance(label, str):
660
+ # ensure the outlier label for each output is a scalar.
661
+ raise TypeError(
662
+ "The outlier_label of classes {} is "
663
+ "supposed to be a scalar, got "
664
+ "{}.".format(classes, label)
665
+ )
666
+ if np.append(classes, label).dtype != classes.dtype:
667
+ # ensure the dtype of outlier label is consistent with y.
668
+ raise TypeError(
669
+ "The dtype of outlier_label {} is "
670
+ "inconsistent with classes {} in "
671
+ "y.".format(label, classes)
672
+ )
673
+
674
+ self.outlier_label_ = outlier_label_
675
+
676
+ return self
677
+
678
+ def predict(self, X):
679
+ """Predict the class labels for the provided data.
680
+
681
+ Parameters
682
+ ----------
683
+ X : {array-like, sparse matrix} of shape (n_queries, n_features), \
684
+ or (n_queries, n_indexed) if metric == 'precomputed'
685
+ Test samples.
686
+
687
+ Returns
688
+ -------
689
+ y : ndarray of shape (n_queries,) or (n_queries, n_outputs)
690
+ Class labels for each data sample.
691
+ """
692
+
693
+ probs = self.predict_proba(X)
694
+ classes_ = self.classes_
695
+
696
+ if not self.outputs_2d_:
697
+ probs = [probs]
698
+ classes_ = [self.classes_]
699
+
700
+ n_outputs = len(classes_)
701
+ n_queries = probs[0].shape[0]
702
+ y_pred = np.empty((n_queries, n_outputs), dtype=classes_[0].dtype)
703
+
704
+ for k, prob in enumerate(probs):
705
+ # iterate over multi-output, assign labels based on probabilities
706
+ # of each output.
707
+ max_prob_index = prob.argmax(axis=1)
708
+ y_pred[:, k] = classes_[k].take(max_prob_index)
709
+
710
+ outlier_zero_probs = (prob == 0).all(axis=1)
711
+ if outlier_zero_probs.any():
712
+ zero_prob_index = np.flatnonzero(outlier_zero_probs)
713
+ y_pred[zero_prob_index, k] = self.outlier_label_[k]
714
+
715
+ if not self.outputs_2d_:
716
+ y_pred = y_pred.ravel()
717
+
718
+ return y_pred
719
+
720
+ def predict_proba(self, X):
721
+ """Return probability estimates for the test data X.
722
+
723
+ Parameters
724
+ ----------
725
+ X : {array-like, sparse matrix} of shape (n_queries, n_features), \
726
+ or (n_queries, n_indexed) if metric == 'precomputed'
727
+ Test samples.
728
+
729
+ Returns
730
+ -------
731
+ p : ndarray of shape (n_queries, n_classes), or a list of \
732
+ n_outputs of such arrays if n_outputs > 1.
733
+ The class probabilities of the input samples. Classes are ordered
734
+ by lexicographic order.
735
+ """
736
+ check_is_fitted(self, "_fit_method")
737
+ n_queries = _num_samples(X)
738
+
739
+ metric, metric_kwargs = _adjusted_metric(
740
+ metric=self.metric, metric_kwargs=self.metric_params, p=self.p
741
+ )
742
+
743
+ if (
744
+ self.weights == "uniform"
745
+ and self._fit_method == "brute"
746
+ and not self.outputs_2d_
747
+ and RadiusNeighborsClassMode.is_usable_for(X, self._fit_X, metric)
748
+ ):
749
+ probabilities = RadiusNeighborsClassMode.compute(
750
+ X=X,
751
+ Y=self._fit_X,
752
+ radius=self.radius,
753
+ weights=self.weights,
754
+ Y_labels=self._y,
755
+ unique_Y_labels=self.classes_,
756
+ outlier_label=self.outlier_label,
757
+ metric=metric,
758
+ metric_kwargs=metric_kwargs,
759
+ strategy="parallel_on_X",
760
+ # `strategy="parallel_on_X"` has in practice be shown
761
+ # to be more efficient than `strategy="parallel_on_Y``
762
+ # on many combination of datasets.
763
+ # Hence, we choose to enforce it here.
764
+ # For more information, see:
765
+ # https://github.com/scikit-learn/scikit-learn/pull/26828/files#r1282398471 # noqa
766
+ )
767
+ return probabilities
768
+
769
+ neigh_dist, neigh_ind = self.radius_neighbors(X)
770
+ outlier_mask = np.zeros(n_queries, dtype=bool)
771
+ outlier_mask[:] = [len(nind) == 0 for nind in neigh_ind]
772
+ outliers = np.flatnonzero(outlier_mask)
773
+ inliers = np.flatnonzero(~outlier_mask)
774
+
775
+ classes_ = self.classes_
776
+ _y = self._y
777
+ if not self.outputs_2d_:
778
+ _y = self._y.reshape((-1, 1))
779
+ classes_ = [self.classes_]
780
+
781
+ if self.outlier_label_ is None and outliers.size > 0:
782
+ raise ValueError(
783
+ "No neighbors found for test samples %r, "
784
+ "you can try using larger radius, "
785
+ "giving a label for outliers, "
786
+ "or considering removing them from your dataset." % outliers
787
+ )
788
+
789
+ weights = _get_weights(neigh_dist, self.weights)
790
+ if weights is not None:
791
+ weights = weights[inliers]
792
+
793
+ probabilities = []
794
+ # iterate over multi-output, measure probabilities of the k-th output.
795
+ for k, classes_k in enumerate(classes_):
796
+ pred_labels = np.zeros(len(neigh_ind), dtype=object)
797
+ pred_labels[:] = [_y[ind, k] for ind in neigh_ind]
798
+
799
+ proba_k = np.zeros((n_queries, classes_k.size))
800
+ proba_inl = np.zeros((len(inliers), classes_k.size))
801
+
802
+ # samples have different size of neighbors within the same radius
803
+ if weights is None:
804
+ for i, idx in enumerate(pred_labels[inliers]):
805
+ proba_inl[i, :] = np.bincount(idx, minlength=classes_k.size)
806
+ else:
807
+ for i, idx in enumerate(pred_labels[inliers]):
808
+ proba_inl[i, :] = np.bincount(
809
+ idx, weights[i], minlength=classes_k.size
810
+ )
811
+ proba_k[inliers, :] = proba_inl
812
+
813
+ if outliers.size > 0:
814
+ _outlier_label = self.outlier_label_[k]
815
+ label_index = np.flatnonzero(classes_k == _outlier_label)
816
+ if label_index.size == 1:
817
+ proba_k[outliers, label_index[0]] = 1.0
818
+ else:
819
+ warnings.warn(
820
+ "Outlier label {} is not in training "
821
+ "classes. All class probabilities of "
822
+ "outliers will be assigned with 0."
823
+ "".format(self.outlier_label_[k])
824
+ )
825
+
826
+ # normalize 'votes' into real [0,1] probabilities
827
+ normalizer = proba_k.sum(axis=1)[:, np.newaxis]
828
+ normalizer[normalizer == 0.0] = 1.0
829
+ proba_k /= normalizer
830
+
831
+ probabilities.append(proba_k)
832
+
833
+ if not self.outputs_2d_:
834
+ probabilities = probabilities[0]
835
+
836
+ return probabilities
837
+
838
+ def _more_tags(self):
839
+ return {"multilabel": True}
env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/_graph.py ADDED
@@ -0,0 +1,719 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Nearest Neighbors graph functions"""
2
+
3
+ # Author: Jake Vanderplas <[email protected]>
4
+ # Tom Dupre la Tour
5
+ #
6
+ # License: BSD 3 clause (C) INRIA, University of Amsterdam
7
+ import itertools
8
+
9
+ from ..base import ClassNamePrefixFeaturesOutMixin, TransformerMixin, _fit_context
10
+ from ..utils._param_validation import (
11
+ Integral,
12
+ Interval,
13
+ Real,
14
+ StrOptions,
15
+ validate_params,
16
+ )
17
+ from ..utils.validation import check_is_fitted
18
+ from ._base import VALID_METRICS, KNeighborsMixin, NeighborsBase, RadiusNeighborsMixin
19
+ from ._unsupervised import NearestNeighbors
20
+
21
+
22
+ def _check_params(X, metric, p, metric_params):
23
+ """Check the validity of the input parameters"""
24
+ params = zip(["metric", "p", "metric_params"], [metric, p, metric_params])
25
+ est_params = X.get_params()
26
+ for param_name, func_param in params:
27
+ if func_param != est_params[param_name]:
28
+ raise ValueError(
29
+ "Got %s for %s, while the estimator has %s for the same parameter."
30
+ % (func_param, param_name, est_params[param_name])
31
+ )
32
+
33
+
34
+ def _query_include_self(X, include_self, mode):
35
+ """Return the query based on include_self param"""
36
+ if include_self == "auto":
37
+ include_self = mode == "connectivity"
38
+
39
+ # it does not include each sample as its own neighbors
40
+ if not include_self:
41
+ X = None
42
+
43
+ return X
44
+
45
+
46
+ @validate_params(
47
+ {
48
+ "X": ["array-like", KNeighborsMixin],
49
+ "n_neighbors": [Interval(Integral, 1, None, closed="left")],
50
+ "mode": [StrOptions({"connectivity", "distance"})],
51
+ "metric": [StrOptions(set(itertools.chain(*VALID_METRICS.values()))), callable],
52
+ "p": [Interval(Real, 0, None, closed="right"), None],
53
+ "metric_params": [dict, None],
54
+ "include_self": ["boolean", StrOptions({"auto"})],
55
+ "n_jobs": [Integral, None],
56
+ },
57
+ prefer_skip_nested_validation=False, # metric is not validated yet
58
+ )
59
+ def kneighbors_graph(
60
+ X,
61
+ n_neighbors,
62
+ *,
63
+ mode="connectivity",
64
+ metric="minkowski",
65
+ p=2,
66
+ metric_params=None,
67
+ include_self=False,
68
+ n_jobs=None,
69
+ ):
70
+ """Compute the (weighted) graph of k-Neighbors for points in X.
71
+
72
+ Read more in the :ref:`User Guide <unsupervised_neighbors>`.
73
+
74
+ Parameters
75
+ ----------
76
+ X : array-like of shape (n_samples, n_features)
77
+ Sample data.
78
+
79
+ n_neighbors : int
80
+ Number of neighbors for each sample.
81
+
82
+ mode : {'connectivity', 'distance'}, default='connectivity'
83
+ Type of returned matrix: 'connectivity' will return the connectivity
84
+ matrix with ones and zeros, and 'distance' will return the distances
85
+ between neighbors according to the given metric.
86
+
87
+ metric : str, default='minkowski'
88
+ Metric to use for distance computation. Default is "minkowski", which
89
+ results in the standard Euclidean distance when p = 2. See the
90
+ documentation of `scipy.spatial.distance
91
+ <https://docs.scipy.org/doc/scipy/reference/spatial.distance.html>`_ and
92
+ the metrics listed in
93
+ :class:`~sklearn.metrics.pairwise.distance_metrics` for valid metric
94
+ values.
95
+
96
+ p : float, default=2
97
+ Power parameter for the Minkowski metric. When p = 1, this is equivalent
98
+ to using manhattan_distance (l1), and euclidean_distance (l2) for p = 2.
99
+ For arbitrary p, minkowski_distance (l_p) is used. This parameter is expected
100
+ to be positive.
101
+
102
+ metric_params : dict, default=None
103
+ Additional keyword arguments for the metric function.
104
+
105
+ include_self : bool or 'auto', default=False
106
+ Whether or not to mark each sample as the first nearest neighbor to
107
+ itself. If 'auto', then True is used for mode='connectivity' and False
108
+ for mode='distance'.
109
+
110
+ n_jobs : int, default=None
111
+ The number of parallel jobs to run for neighbors search.
112
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
113
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
114
+ for more details.
115
+
116
+ Returns
117
+ -------
118
+ A : sparse matrix of shape (n_samples, n_samples)
119
+ Graph where A[i, j] is assigned the weight of edge that
120
+ connects i to j. The matrix is of CSR format.
121
+
122
+ See Also
123
+ --------
124
+ radius_neighbors_graph: Compute the (weighted) graph of Neighbors for points in X.
125
+
126
+ Examples
127
+ --------
128
+ >>> X = [[0], [3], [1]]
129
+ >>> from sklearn.neighbors import kneighbors_graph
130
+ >>> A = kneighbors_graph(X, 2, mode='connectivity', include_self=True)
131
+ >>> A.toarray()
132
+ array([[1., 0., 1.],
133
+ [0., 1., 1.],
134
+ [1., 0., 1.]])
135
+ """
136
+ if not isinstance(X, KNeighborsMixin):
137
+ X = NearestNeighbors(
138
+ n_neighbors=n_neighbors,
139
+ metric=metric,
140
+ p=p,
141
+ metric_params=metric_params,
142
+ n_jobs=n_jobs,
143
+ ).fit(X)
144
+ else:
145
+ _check_params(X, metric, p, metric_params)
146
+
147
+ query = _query_include_self(X._fit_X, include_self, mode)
148
+ return X.kneighbors_graph(X=query, n_neighbors=n_neighbors, mode=mode)
149
+
150
+
151
+ @validate_params(
152
+ {
153
+ "X": ["array-like", RadiusNeighborsMixin],
154
+ "radius": [Interval(Real, 0, None, closed="both")],
155
+ "mode": [StrOptions({"connectivity", "distance"})],
156
+ "metric": [StrOptions(set(itertools.chain(*VALID_METRICS.values()))), callable],
157
+ "p": [Interval(Real, 0, None, closed="right"), None],
158
+ "metric_params": [dict, None],
159
+ "include_self": ["boolean", StrOptions({"auto"})],
160
+ "n_jobs": [Integral, None],
161
+ },
162
+ prefer_skip_nested_validation=False, # metric is not validated yet
163
+ )
164
+ def radius_neighbors_graph(
165
+ X,
166
+ radius,
167
+ *,
168
+ mode="connectivity",
169
+ metric="minkowski",
170
+ p=2,
171
+ metric_params=None,
172
+ include_self=False,
173
+ n_jobs=None,
174
+ ):
175
+ """Compute the (weighted) graph of Neighbors for points in X.
176
+
177
+ Neighborhoods are restricted the points at a distance lower than
178
+ radius.
179
+
180
+ Read more in the :ref:`User Guide <unsupervised_neighbors>`.
181
+
182
+ Parameters
183
+ ----------
184
+ X : array-like of shape (n_samples, n_features)
185
+ Sample data.
186
+
187
+ radius : float
188
+ Radius of neighborhoods.
189
+
190
+ mode : {'connectivity', 'distance'}, default='connectivity'
191
+ Type of returned matrix: 'connectivity' will return the connectivity
192
+ matrix with ones and zeros, and 'distance' will return the distances
193
+ between neighbors according to the given metric.
194
+
195
+ metric : str, default='minkowski'
196
+ Metric to use for distance computation. Default is "minkowski", which
197
+ results in the standard Euclidean distance when p = 2. See the
198
+ documentation of `scipy.spatial.distance
199
+ <https://docs.scipy.org/doc/scipy/reference/spatial.distance.html>`_ and
200
+ the metrics listed in
201
+ :class:`~sklearn.metrics.pairwise.distance_metrics` for valid metric
202
+ values.
203
+
204
+ p : float, default=2
205
+ Power parameter for the Minkowski metric. When p = 1, this is
206
+ equivalent to using manhattan_distance (l1), and euclidean_distance
207
+ (l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
208
+
209
+ metric_params : dict, default=None
210
+ Additional keyword arguments for the metric function.
211
+
212
+ include_self : bool or 'auto', default=False
213
+ Whether or not to mark each sample as the first nearest neighbor to
214
+ itself. If 'auto', then True is used for mode='connectivity' and False
215
+ for mode='distance'.
216
+
217
+ n_jobs : int, default=None
218
+ The number of parallel jobs to run for neighbors search.
219
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
220
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
221
+ for more details.
222
+
223
+ Returns
224
+ -------
225
+ A : sparse matrix of shape (n_samples, n_samples)
226
+ Graph where A[i, j] is assigned the weight of edge that connects
227
+ i to j. The matrix is of CSR format.
228
+
229
+ See Also
230
+ --------
231
+ kneighbors_graph: Compute the weighted graph of k-neighbors for points in X.
232
+
233
+ Examples
234
+ --------
235
+ >>> X = [[0], [3], [1]]
236
+ >>> from sklearn.neighbors import radius_neighbors_graph
237
+ >>> A = radius_neighbors_graph(X, 1.5, mode='connectivity',
238
+ ... include_self=True)
239
+ >>> A.toarray()
240
+ array([[1., 0., 1.],
241
+ [0., 1., 0.],
242
+ [1., 0., 1.]])
243
+ """
244
+ if not isinstance(X, RadiusNeighborsMixin):
245
+ X = NearestNeighbors(
246
+ radius=radius,
247
+ metric=metric,
248
+ p=p,
249
+ metric_params=metric_params,
250
+ n_jobs=n_jobs,
251
+ ).fit(X)
252
+ else:
253
+ _check_params(X, metric, p, metric_params)
254
+
255
+ query = _query_include_self(X._fit_X, include_self, mode)
256
+ return X.radius_neighbors_graph(query, radius, mode)
257
+
258
+
259
+ class KNeighborsTransformer(
260
+ ClassNamePrefixFeaturesOutMixin, KNeighborsMixin, TransformerMixin, NeighborsBase
261
+ ):
262
+ """Transform X into a (weighted) graph of k nearest neighbors.
263
+
264
+ The transformed data is a sparse graph as returned by kneighbors_graph.
265
+
266
+ Read more in the :ref:`User Guide <neighbors_transformer>`.
267
+
268
+ .. versionadded:: 0.22
269
+
270
+ Parameters
271
+ ----------
272
+ mode : {'distance', 'connectivity'}, default='distance'
273
+ Type of returned matrix: 'connectivity' will return the connectivity
274
+ matrix with ones and zeros, and 'distance' will return the distances
275
+ between neighbors according to the given metric.
276
+
277
+ n_neighbors : int, default=5
278
+ Number of neighbors for each sample in the transformed sparse graph.
279
+ For compatibility reasons, as each sample is considered as its own
280
+ neighbor, one extra neighbor will be computed when mode == 'distance'.
281
+ In this case, the sparse graph contains (n_neighbors + 1) neighbors.
282
+
283
+ algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto'
284
+ Algorithm used to compute the nearest neighbors:
285
+
286
+ - 'ball_tree' will use :class:`BallTree`
287
+ - 'kd_tree' will use :class:`KDTree`
288
+ - 'brute' will use a brute-force search.
289
+ - 'auto' will attempt to decide the most appropriate algorithm
290
+ based on the values passed to :meth:`fit` method.
291
+
292
+ Note: fitting on sparse input will override the setting of
293
+ this parameter, using brute force.
294
+
295
+ leaf_size : int, default=30
296
+ Leaf size passed to BallTree or KDTree. This can affect the
297
+ speed of the construction and query, as well as the memory
298
+ required to store the tree. The optimal value depends on the
299
+ nature of the problem.
300
+
301
+ metric : str or callable, default='minkowski'
302
+ Metric to use for distance computation. Default is "minkowski", which
303
+ results in the standard Euclidean distance when p = 2. See the
304
+ documentation of `scipy.spatial.distance
305
+ <https://docs.scipy.org/doc/scipy/reference/spatial.distance.html>`_ and
306
+ the metrics listed in
307
+ :class:`~sklearn.metrics.pairwise.distance_metrics` for valid metric
308
+ values.
309
+
310
+ If metric is a callable function, it takes two arrays representing 1D
311
+ vectors as inputs and must return one value indicating the distance
312
+ between those vectors. This works for Scipy's metrics, but is less
313
+ efficient than passing the metric name as a string.
314
+
315
+ Distance matrices are not supported.
316
+
317
+ p : float, default=2
318
+ Parameter for the Minkowski metric from
319
+ sklearn.metrics.pairwise.pairwise_distances. When p = 1, this is
320
+ equivalent to using manhattan_distance (l1), and euclidean_distance
321
+ (l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
322
+ This parameter is expected to be positive.
323
+
324
+ metric_params : dict, default=None
325
+ Additional keyword arguments for the metric function.
326
+
327
+ n_jobs : int, default=None
328
+ The number of parallel jobs to run for neighbors search.
329
+ If ``-1``, then the number of jobs is set to the number of CPU cores.
330
+
331
+ Attributes
332
+ ----------
333
+ effective_metric_ : str or callable
334
+ The distance metric used. It will be same as the `metric` parameter
335
+ or a synonym of it, e.g. 'euclidean' if the `metric` parameter set to
336
+ 'minkowski' and `p` parameter set to 2.
337
+
338
+ effective_metric_params_ : dict
339
+ Additional keyword arguments for the metric function. For most metrics
340
+ will be same with `metric_params` parameter, but may also contain the
341
+ `p` parameter value if the `effective_metric_` attribute is set to
342
+ 'minkowski'.
343
+
344
+ n_features_in_ : int
345
+ Number of features seen during :term:`fit`.
346
+
347
+ .. versionadded:: 0.24
348
+
349
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
350
+ Names of features seen during :term:`fit`. Defined only when `X`
351
+ has feature names that are all strings.
352
+
353
+ .. versionadded:: 1.0
354
+
355
+ n_samples_fit_ : int
356
+ Number of samples in the fitted data.
357
+
358
+ See Also
359
+ --------
360
+ kneighbors_graph : Compute the weighted graph of k-neighbors for
361
+ points in X.
362
+ RadiusNeighborsTransformer : Transform X into a weighted graph of
363
+ neighbors nearer than a radius.
364
+
365
+ Notes
366
+ -----
367
+ For an example of using :class:`~sklearn.neighbors.KNeighborsTransformer`
368
+ in combination with :class:`~sklearn.manifold.TSNE` see
369
+ :ref:`sphx_glr_auto_examples_neighbors_approximate_nearest_neighbors.py`.
370
+
371
+ Examples
372
+ --------
373
+ >>> from sklearn.datasets import load_wine
374
+ >>> from sklearn.neighbors import KNeighborsTransformer
375
+ >>> X, _ = load_wine(return_X_y=True)
376
+ >>> X.shape
377
+ (178, 13)
378
+ >>> transformer = KNeighborsTransformer(n_neighbors=5, mode='distance')
379
+ >>> X_dist_graph = transformer.fit_transform(X)
380
+ >>> X_dist_graph.shape
381
+ (178, 178)
382
+ """
383
+
384
+ _parameter_constraints: dict = {
385
+ **NeighborsBase._parameter_constraints,
386
+ "mode": [StrOptions({"distance", "connectivity"})],
387
+ }
388
+ _parameter_constraints.pop("radius")
389
+
390
+ def __init__(
391
+ self,
392
+ *,
393
+ mode="distance",
394
+ n_neighbors=5,
395
+ algorithm="auto",
396
+ leaf_size=30,
397
+ metric="minkowski",
398
+ p=2,
399
+ metric_params=None,
400
+ n_jobs=None,
401
+ ):
402
+ super(KNeighborsTransformer, self).__init__(
403
+ n_neighbors=n_neighbors,
404
+ radius=None,
405
+ algorithm=algorithm,
406
+ leaf_size=leaf_size,
407
+ metric=metric,
408
+ p=p,
409
+ metric_params=metric_params,
410
+ n_jobs=n_jobs,
411
+ )
412
+ self.mode = mode
413
+
414
+ @_fit_context(
415
+ # KNeighborsTransformer.metric is not validated yet
416
+ prefer_skip_nested_validation=False
417
+ )
418
+ def fit(self, X, y=None):
419
+ """Fit the k-nearest neighbors transformer from the training dataset.
420
+
421
+ Parameters
422
+ ----------
423
+ X : {array-like, sparse matrix} of shape (n_samples, n_features) or \
424
+ (n_samples, n_samples) if metric='precomputed'
425
+ Training data.
426
+ y : Ignored
427
+ Not used, present for API consistency by convention.
428
+
429
+ Returns
430
+ -------
431
+ self : KNeighborsTransformer
432
+ The fitted k-nearest neighbors transformer.
433
+ """
434
+ self._fit(X)
435
+ self._n_features_out = self.n_samples_fit_
436
+ return self
437
+
438
+ def transform(self, X):
439
+ """Compute the (weighted) graph of Neighbors for points in X.
440
+
441
+ Parameters
442
+ ----------
443
+ X : array-like of shape (n_samples_transform, n_features)
444
+ Sample data.
445
+
446
+ Returns
447
+ -------
448
+ Xt : sparse matrix of shape (n_samples_transform, n_samples_fit)
449
+ Xt[i, j] is assigned the weight of edge that connects i to j.
450
+ Only the neighbors have an explicit value.
451
+ The diagonal is always explicit.
452
+ The matrix is of CSR format.
453
+ """
454
+ check_is_fitted(self)
455
+ add_one = self.mode == "distance"
456
+ return self.kneighbors_graph(
457
+ X, mode=self.mode, n_neighbors=self.n_neighbors + add_one
458
+ )
459
+
460
+ def fit_transform(self, X, y=None):
461
+ """Fit to data, then transform it.
462
+
463
+ Fits transformer to X and y with optional parameters fit_params
464
+ and returns a transformed version of X.
465
+
466
+ Parameters
467
+ ----------
468
+ X : array-like of shape (n_samples, n_features)
469
+ Training set.
470
+
471
+ y : Ignored
472
+ Not used, present for API consistency by convention.
473
+
474
+ Returns
475
+ -------
476
+ Xt : sparse matrix of shape (n_samples, n_samples)
477
+ Xt[i, j] is assigned the weight of edge that connects i to j.
478
+ Only the neighbors have an explicit value.
479
+ The diagonal is always explicit.
480
+ The matrix is of CSR format.
481
+ """
482
+ return self.fit(X).transform(X)
483
+
484
+ def _more_tags(self):
485
+ return {
486
+ "_xfail_checks": {
487
+ "check_methods_sample_order_invariance": "check is not applicable."
488
+ }
489
+ }
490
+
491
+
492
+ class RadiusNeighborsTransformer(
493
+ ClassNamePrefixFeaturesOutMixin,
494
+ RadiusNeighborsMixin,
495
+ TransformerMixin,
496
+ NeighborsBase,
497
+ ):
498
+ """Transform X into a (weighted) graph of neighbors nearer than a radius.
499
+
500
+ The transformed data is a sparse graph as returned by
501
+ `radius_neighbors_graph`.
502
+
503
+ Read more in the :ref:`User Guide <neighbors_transformer>`.
504
+
505
+ .. versionadded:: 0.22
506
+
507
+ Parameters
508
+ ----------
509
+ mode : {'distance', 'connectivity'}, default='distance'
510
+ Type of returned matrix: 'connectivity' will return the connectivity
511
+ matrix with ones and zeros, and 'distance' will return the distances
512
+ between neighbors according to the given metric.
513
+
514
+ radius : float, default=1.0
515
+ Radius of neighborhood in the transformed sparse graph.
516
+
517
+ algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto'
518
+ Algorithm used to compute the nearest neighbors:
519
+
520
+ - 'ball_tree' will use :class:`BallTree`
521
+ - 'kd_tree' will use :class:`KDTree`
522
+ - 'brute' will use a brute-force search.
523
+ - 'auto' will attempt to decide the most appropriate algorithm
524
+ based on the values passed to :meth:`fit` method.
525
+
526
+ Note: fitting on sparse input will override the setting of
527
+ this parameter, using brute force.
528
+
529
+ leaf_size : int, default=30
530
+ Leaf size passed to BallTree or KDTree. This can affect the
531
+ speed of the construction and query, as well as the memory
532
+ required to store the tree. The optimal value depends on the
533
+ nature of the problem.
534
+
535
+ metric : str or callable, default='minkowski'
536
+ Metric to use for distance computation. Default is "minkowski", which
537
+ results in the standard Euclidean distance when p = 2. See the
538
+ documentation of `scipy.spatial.distance
539
+ <https://docs.scipy.org/doc/scipy/reference/spatial.distance.html>`_ and
540
+ the metrics listed in
541
+ :class:`~sklearn.metrics.pairwise.distance_metrics` for valid metric
542
+ values.
543
+
544
+ If metric is a callable function, it takes two arrays representing 1D
545
+ vectors as inputs and must return one value indicating the distance
546
+ between those vectors. This works for Scipy's metrics, but is less
547
+ efficient than passing the metric name as a string.
548
+
549
+ Distance matrices are not supported.
550
+
551
+ p : float, default=2
552
+ Parameter for the Minkowski metric from
553
+ sklearn.metrics.pairwise.pairwise_distances. When p = 1, this is
554
+ equivalent to using manhattan_distance (l1), and euclidean_distance
555
+ (l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
556
+ This parameter is expected to be positive.
557
+
558
+ metric_params : dict, default=None
559
+ Additional keyword arguments for the metric function.
560
+
561
+ n_jobs : int, default=None
562
+ The number of parallel jobs to run for neighbors search.
563
+ If ``-1``, then the number of jobs is set to the number of CPU cores.
564
+
565
+ Attributes
566
+ ----------
567
+ effective_metric_ : str or callable
568
+ The distance metric used. It will be same as the `metric` parameter
569
+ or a synonym of it, e.g. 'euclidean' if the `metric` parameter set to
570
+ 'minkowski' and `p` parameter set to 2.
571
+
572
+ effective_metric_params_ : dict
573
+ Additional keyword arguments for the metric function. For most metrics
574
+ will be same with `metric_params` parameter, but may also contain the
575
+ `p` parameter value if the `effective_metric_` attribute is set to
576
+ 'minkowski'.
577
+
578
+ n_features_in_ : int
579
+ Number of features seen during :term:`fit`.
580
+
581
+ .. versionadded:: 0.24
582
+
583
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
584
+ Names of features seen during :term:`fit`. Defined only when `X`
585
+ has feature names that are all strings.
586
+
587
+ .. versionadded:: 1.0
588
+
589
+ n_samples_fit_ : int
590
+ Number of samples in the fitted data.
591
+
592
+ See Also
593
+ --------
594
+ kneighbors_graph : Compute the weighted graph of k-neighbors for
595
+ points in X.
596
+ KNeighborsTransformer : Transform X into a weighted graph of k
597
+ nearest neighbors.
598
+
599
+ Examples
600
+ --------
601
+ >>> import numpy as np
602
+ >>> from sklearn.datasets import load_wine
603
+ >>> from sklearn.cluster import DBSCAN
604
+ >>> from sklearn.neighbors import RadiusNeighborsTransformer
605
+ >>> from sklearn.pipeline import make_pipeline
606
+ >>> X, _ = load_wine(return_X_y=True)
607
+ >>> estimator = make_pipeline(
608
+ ... RadiusNeighborsTransformer(radius=42.0, mode='distance'),
609
+ ... DBSCAN(eps=25.0, metric='precomputed'))
610
+ >>> X_clustered = estimator.fit_predict(X)
611
+ >>> clusters, counts = np.unique(X_clustered, return_counts=True)
612
+ >>> print(counts)
613
+ [ 29 15 111 11 12]
614
+ """
615
+
616
+ _parameter_constraints: dict = {
617
+ **NeighborsBase._parameter_constraints,
618
+ "mode": [StrOptions({"distance", "connectivity"})],
619
+ }
620
+ _parameter_constraints.pop("n_neighbors")
621
+
622
+ def __init__(
623
+ self,
624
+ *,
625
+ mode="distance",
626
+ radius=1.0,
627
+ algorithm="auto",
628
+ leaf_size=30,
629
+ metric="minkowski",
630
+ p=2,
631
+ metric_params=None,
632
+ n_jobs=None,
633
+ ):
634
+ super(RadiusNeighborsTransformer, self).__init__(
635
+ n_neighbors=None,
636
+ radius=radius,
637
+ algorithm=algorithm,
638
+ leaf_size=leaf_size,
639
+ metric=metric,
640
+ p=p,
641
+ metric_params=metric_params,
642
+ n_jobs=n_jobs,
643
+ )
644
+ self.mode = mode
645
+
646
+ @_fit_context(
647
+ # RadiusNeighborsTransformer.metric is not validated yet
648
+ prefer_skip_nested_validation=False
649
+ )
650
+ def fit(self, X, y=None):
651
+ """Fit the radius neighbors transformer from the training dataset.
652
+
653
+ Parameters
654
+ ----------
655
+ X : {array-like, sparse matrix} of shape (n_samples, n_features) or \
656
+ (n_samples, n_samples) if metric='precomputed'
657
+ Training data.
658
+
659
+ y : Ignored
660
+ Not used, present for API consistency by convention.
661
+
662
+ Returns
663
+ -------
664
+ self : RadiusNeighborsTransformer
665
+ The fitted radius neighbors transformer.
666
+ """
667
+ self._fit(X)
668
+ self._n_features_out = self.n_samples_fit_
669
+ return self
670
+
671
+ def transform(self, X):
672
+ """Compute the (weighted) graph of Neighbors for points in X.
673
+
674
+ Parameters
675
+ ----------
676
+ X : array-like of shape (n_samples_transform, n_features)
677
+ Sample data.
678
+
679
+ Returns
680
+ -------
681
+ Xt : sparse matrix of shape (n_samples_transform, n_samples_fit)
682
+ Xt[i, j] is assigned the weight of edge that connects i to j.
683
+ Only the neighbors have an explicit value.
684
+ The diagonal is always explicit.
685
+ The matrix is of CSR format.
686
+ """
687
+ check_is_fitted(self)
688
+ return self.radius_neighbors_graph(X, mode=self.mode, sort_results=True)
689
+
690
+ def fit_transform(self, X, y=None):
691
+ """Fit to data, then transform it.
692
+
693
+ Fits transformer to X and y with optional parameters fit_params
694
+ and returns a transformed version of X.
695
+
696
+ Parameters
697
+ ----------
698
+ X : array-like of shape (n_samples, n_features)
699
+ Training set.
700
+
701
+ y : Ignored
702
+ Not used, present for API consistency by convention.
703
+
704
+ Returns
705
+ -------
706
+ Xt : sparse matrix of shape (n_samples, n_samples)
707
+ Xt[i, j] is assigned the weight of edge that connects i to j.
708
+ Only the neighbors have an explicit value.
709
+ The diagonal is always explicit.
710
+ The matrix is of CSR format.
711
+ """
712
+ return self.fit(X).transform(X)
713
+
714
+ def _more_tags(self):
715
+ return {
716
+ "_xfail_checks": {
717
+ "check_methods_sample_order_invariance": "check is not applicable."
718
+ }
719
+ }
env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/_kd_tree.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (774 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/_kde.py ADDED
@@ -0,0 +1,365 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Kernel Density Estimation
3
+ -------------------------
4
+ """
5
+ # Author: Jake Vanderplas <[email protected]>
6
+ import itertools
7
+ from numbers import Integral, Real
8
+
9
+ import numpy as np
10
+ from scipy.special import gammainc
11
+
12
+ from ..base import BaseEstimator, _fit_context
13
+ from ..neighbors._base import VALID_METRICS
14
+ from ..utils import check_random_state
15
+ from ..utils._param_validation import Interval, StrOptions
16
+ from ..utils.extmath import row_norms
17
+ from ..utils.validation import _check_sample_weight, check_is_fitted
18
+ from ._ball_tree import BallTree
19
+ from ._kd_tree import KDTree
20
+
21
+ VALID_KERNELS = [
22
+ "gaussian",
23
+ "tophat",
24
+ "epanechnikov",
25
+ "exponential",
26
+ "linear",
27
+ "cosine",
28
+ ]
29
+
30
+ TREE_DICT = {"ball_tree": BallTree, "kd_tree": KDTree}
31
+
32
+
33
+ # TODO: implement a brute force version for testing purposes
34
+ # TODO: create a density estimation base class?
35
+ class KernelDensity(BaseEstimator):
36
+ """Kernel Density Estimation.
37
+
38
+ Read more in the :ref:`User Guide <kernel_density>`.
39
+
40
+ Parameters
41
+ ----------
42
+ bandwidth : float or {"scott", "silverman"}, default=1.0
43
+ The bandwidth of the kernel. If bandwidth is a float, it defines the
44
+ bandwidth of the kernel. If bandwidth is a string, one of the estimation
45
+ methods is implemented.
46
+
47
+ algorithm : {'kd_tree', 'ball_tree', 'auto'}, default='auto'
48
+ The tree algorithm to use.
49
+
50
+ kernel : {'gaussian', 'tophat', 'epanechnikov', 'exponential', 'linear', \
51
+ 'cosine'}, default='gaussian'
52
+ The kernel to use.
53
+
54
+ metric : str, default='euclidean'
55
+ Metric to use for distance computation. See the
56
+ documentation of `scipy.spatial.distance
57
+ <https://docs.scipy.org/doc/scipy/reference/spatial.distance.html>`_ and
58
+ the metrics listed in
59
+ :class:`~sklearn.metrics.pairwise.distance_metrics` for valid metric
60
+ values.
61
+
62
+ Not all metrics are valid with all algorithms: refer to the
63
+ documentation of :class:`BallTree` and :class:`KDTree`. Note that the
64
+ normalization of the density output is correct only for the Euclidean
65
+ distance metric.
66
+
67
+ atol : float, default=0
68
+ The desired absolute tolerance of the result. A larger tolerance will
69
+ generally lead to faster execution.
70
+
71
+ rtol : float, default=0
72
+ The desired relative tolerance of the result. A larger tolerance will
73
+ generally lead to faster execution.
74
+
75
+ breadth_first : bool, default=True
76
+ If true (default), use a breadth-first approach to the problem.
77
+ Otherwise use a depth-first approach.
78
+
79
+ leaf_size : int, default=40
80
+ Specify the leaf size of the underlying tree. See :class:`BallTree`
81
+ or :class:`KDTree` for details.
82
+
83
+ metric_params : dict, default=None
84
+ Additional parameters to be passed to the tree for use with the
85
+ metric. For more information, see the documentation of
86
+ :class:`BallTree` or :class:`KDTree`.
87
+
88
+ Attributes
89
+ ----------
90
+ n_features_in_ : int
91
+ Number of features seen during :term:`fit`.
92
+
93
+ .. versionadded:: 0.24
94
+
95
+ tree_ : ``BinaryTree`` instance
96
+ The tree algorithm for fast generalized N-point problems.
97
+
98
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
99
+ Names of features seen during :term:`fit`. Defined only when `X`
100
+ has feature names that are all strings.
101
+
102
+ bandwidth_ : float
103
+ Value of the bandwidth, given directly by the bandwidth parameter or
104
+ estimated using the 'scott' or 'silverman' method.
105
+
106
+ .. versionadded:: 1.0
107
+
108
+ See Also
109
+ --------
110
+ sklearn.neighbors.KDTree : K-dimensional tree for fast generalized N-point
111
+ problems.
112
+ sklearn.neighbors.BallTree : Ball tree for fast generalized N-point
113
+ problems.
114
+
115
+ Examples
116
+ --------
117
+ Compute a gaussian kernel density estimate with a fixed bandwidth.
118
+
119
+ >>> from sklearn.neighbors import KernelDensity
120
+ >>> import numpy as np
121
+ >>> rng = np.random.RandomState(42)
122
+ >>> X = rng.random_sample((100, 3))
123
+ >>> kde = KernelDensity(kernel='gaussian', bandwidth=0.5).fit(X)
124
+ >>> log_density = kde.score_samples(X[:3])
125
+ >>> log_density
126
+ array([-1.52955942, -1.51462041, -1.60244657])
127
+ """
128
+
129
+ _parameter_constraints: dict = {
130
+ "bandwidth": [
131
+ Interval(Real, 0, None, closed="neither"),
132
+ StrOptions({"scott", "silverman"}),
133
+ ],
134
+ "algorithm": [StrOptions(set(TREE_DICT.keys()) | {"auto"})],
135
+ "kernel": [StrOptions(set(VALID_KERNELS))],
136
+ "metric": [
137
+ StrOptions(
138
+ set(itertools.chain(*[VALID_METRICS[alg] for alg in TREE_DICT.keys()]))
139
+ )
140
+ ],
141
+ "atol": [Interval(Real, 0, None, closed="left")],
142
+ "rtol": [Interval(Real, 0, None, closed="left")],
143
+ "breadth_first": ["boolean"],
144
+ "leaf_size": [Interval(Integral, 1, None, closed="left")],
145
+ "metric_params": [None, dict],
146
+ }
147
+
148
+ def __init__(
149
+ self,
150
+ *,
151
+ bandwidth=1.0,
152
+ algorithm="auto",
153
+ kernel="gaussian",
154
+ metric="euclidean",
155
+ atol=0,
156
+ rtol=0,
157
+ breadth_first=True,
158
+ leaf_size=40,
159
+ metric_params=None,
160
+ ):
161
+ self.algorithm = algorithm
162
+ self.bandwidth = bandwidth
163
+ self.kernel = kernel
164
+ self.metric = metric
165
+ self.atol = atol
166
+ self.rtol = rtol
167
+ self.breadth_first = breadth_first
168
+ self.leaf_size = leaf_size
169
+ self.metric_params = metric_params
170
+
171
+ def _choose_algorithm(self, algorithm, metric):
172
+ # given the algorithm string + metric string, choose the optimal
173
+ # algorithm to compute the result.
174
+ if algorithm == "auto":
175
+ # use KD Tree if possible
176
+ if metric in KDTree.valid_metrics:
177
+ return "kd_tree"
178
+ elif metric in BallTree.valid_metrics:
179
+ return "ball_tree"
180
+ else: # kd_tree or ball_tree
181
+ if metric not in TREE_DICT[algorithm].valid_metrics:
182
+ raise ValueError(
183
+ "invalid metric for {0}: '{1}'".format(TREE_DICT[algorithm], metric)
184
+ )
185
+ return algorithm
186
+
187
+ @_fit_context(
188
+ # KernelDensity.metric is not validated yet
189
+ prefer_skip_nested_validation=False
190
+ )
191
+ def fit(self, X, y=None, sample_weight=None):
192
+ """Fit the Kernel Density model on the data.
193
+
194
+ Parameters
195
+ ----------
196
+ X : array-like of shape (n_samples, n_features)
197
+ List of n_features-dimensional data points. Each row
198
+ corresponds to a single data point.
199
+
200
+ y : None
201
+ Ignored. This parameter exists only for compatibility with
202
+ :class:`~sklearn.pipeline.Pipeline`.
203
+
204
+ sample_weight : array-like of shape (n_samples,), default=None
205
+ List of sample weights attached to the data X.
206
+
207
+ .. versionadded:: 0.20
208
+
209
+ Returns
210
+ -------
211
+ self : object
212
+ Returns the instance itself.
213
+ """
214
+ algorithm = self._choose_algorithm(self.algorithm, self.metric)
215
+
216
+ if isinstance(self.bandwidth, str):
217
+ if self.bandwidth == "scott":
218
+ self.bandwidth_ = X.shape[0] ** (-1 / (X.shape[1] + 4))
219
+ elif self.bandwidth == "silverman":
220
+ self.bandwidth_ = (X.shape[0] * (X.shape[1] + 2) / 4) ** (
221
+ -1 / (X.shape[1] + 4)
222
+ )
223
+ else:
224
+ self.bandwidth_ = self.bandwidth
225
+
226
+ X = self._validate_data(X, order="C", dtype=np.float64)
227
+
228
+ if sample_weight is not None:
229
+ sample_weight = _check_sample_weight(
230
+ sample_weight, X, dtype=np.float64, only_non_negative=True
231
+ )
232
+
233
+ kwargs = self.metric_params
234
+ if kwargs is None:
235
+ kwargs = {}
236
+ self.tree_ = TREE_DICT[algorithm](
237
+ X,
238
+ metric=self.metric,
239
+ leaf_size=self.leaf_size,
240
+ sample_weight=sample_weight,
241
+ **kwargs,
242
+ )
243
+ return self
244
+
245
+ def score_samples(self, X):
246
+ """Compute the log-likelihood of each sample under the model.
247
+
248
+ Parameters
249
+ ----------
250
+ X : array-like of shape (n_samples, n_features)
251
+ An array of points to query. Last dimension should match dimension
252
+ of training data (n_features).
253
+
254
+ Returns
255
+ -------
256
+ density : ndarray of shape (n_samples,)
257
+ Log-likelihood of each sample in `X`. These are normalized to be
258
+ probability densities, so values will be low for high-dimensional
259
+ data.
260
+ """
261
+ check_is_fitted(self)
262
+ # The returned density is normalized to the number of points.
263
+ # For it to be a probability, we must scale it. For this reason
264
+ # we'll also scale atol.
265
+ X = self._validate_data(X, order="C", dtype=np.float64, reset=False)
266
+ if self.tree_.sample_weight is None:
267
+ N = self.tree_.data.shape[0]
268
+ else:
269
+ N = self.tree_.sum_weight
270
+ atol_N = self.atol * N
271
+ log_density = self.tree_.kernel_density(
272
+ X,
273
+ h=self.bandwidth_,
274
+ kernel=self.kernel,
275
+ atol=atol_N,
276
+ rtol=self.rtol,
277
+ breadth_first=self.breadth_first,
278
+ return_log=True,
279
+ )
280
+ log_density -= np.log(N)
281
+ return log_density
282
+
283
+ def score(self, X, y=None):
284
+ """Compute the total log-likelihood under the model.
285
+
286
+ Parameters
287
+ ----------
288
+ X : array-like of shape (n_samples, n_features)
289
+ List of n_features-dimensional data points. Each row
290
+ corresponds to a single data point.
291
+
292
+ y : None
293
+ Ignored. This parameter exists only for compatibility with
294
+ :class:`~sklearn.pipeline.Pipeline`.
295
+
296
+ Returns
297
+ -------
298
+ logprob : float
299
+ Total log-likelihood of the data in X. This is normalized to be a
300
+ probability density, so the value will be low for high-dimensional
301
+ data.
302
+ """
303
+ return np.sum(self.score_samples(X))
304
+
305
+ def sample(self, n_samples=1, random_state=None):
306
+ """Generate random samples from the model.
307
+
308
+ Currently, this is implemented only for gaussian and tophat kernels.
309
+
310
+ Parameters
311
+ ----------
312
+ n_samples : int, default=1
313
+ Number of samples to generate.
314
+
315
+ random_state : int, RandomState instance or None, default=None
316
+ Determines random number generation used to generate
317
+ random samples. Pass an int for reproducible results
318
+ across multiple function calls.
319
+ See :term:`Glossary <random_state>`.
320
+
321
+ Returns
322
+ -------
323
+ X : array-like of shape (n_samples, n_features)
324
+ List of samples.
325
+ """
326
+ check_is_fitted(self)
327
+ # TODO: implement sampling for other valid kernel shapes
328
+ if self.kernel not in ["gaussian", "tophat"]:
329
+ raise NotImplementedError()
330
+
331
+ data = np.asarray(self.tree_.data)
332
+
333
+ rng = check_random_state(random_state)
334
+ u = rng.uniform(0, 1, size=n_samples)
335
+ if self.tree_.sample_weight is None:
336
+ i = (u * data.shape[0]).astype(np.int64)
337
+ else:
338
+ cumsum_weight = np.cumsum(np.asarray(self.tree_.sample_weight))
339
+ sum_weight = cumsum_weight[-1]
340
+ i = np.searchsorted(cumsum_weight, u * sum_weight)
341
+ if self.kernel == "gaussian":
342
+ return np.atleast_2d(rng.normal(data[i], self.bandwidth_))
343
+
344
+ elif self.kernel == "tophat":
345
+ # we first draw points from a d-dimensional normal distribution,
346
+ # then use an incomplete gamma function to map them to a uniform
347
+ # d-dimensional tophat distribution.
348
+ dim = data.shape[1]
349
+ X = rng.normal(size=(n_samples, dim))
350
+ s_sq = row_norms(X, squared=True)
351
+ correction = (
352
+ gammainc(0.5 * dim, 0.5 * s_sq) ** (1.0 / dim)
353
+ * self.bandwidth_
354
+ / np.sqrt(s_sq)
355
+ )
356
+ return data[i] + X * correction[:, np.newaxis]
357
+
358
+ def _more_tags(self):
359
+ return {
360
+ "_xfail_checks": {
361
+ "check_sample_weights_invariance": (
362
+ "sample_weight must have positive values"
363
+ ),
364
+ }
365
+ }
env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/_lof.py ADDED
@@ -0,0 +1,516 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Authors: Nicolas Goix <[email protected]>
2
+ # Alexandre Gramfort <[email protected]>
3
+ # License: BSD 3 clause
4
+
5
+ import warnings
6
+ from numbers import Real
7
+
8
+ import numpy as np
9
+
10
+ from ..base import OutlierMixin, _fit_context
11
+ from ..utils import check_array
12
+ from ..utils._param_validation import Interval, StrOptions
13
+ from ..utils.metaestimators import available_if
14
+ from ..utils.validation import check_is_fitted
15
+ from ._base import KNeighborsMixin, NeighborsBase
16
+
17
+ __all__ = ["LocalOutlierFactor"]
18
+
19
+
20
+ class LocalOutlierFactor(KNeighborsMixin, OutlierMixin, NeighborsBase):
21
+ """Unsupervised Outlier Detection using the Local Outlier Factor (LOF).
22
+
23
+ The anomaly score of each sample is called the Local Outlier Factor.
24
+ It measures the local deviation of the density of a given sample with respect
25
+ to its neighbors.
26
+ It is local in that the anomaly score depends on how isolated the object
27
+ is with respect to the surrounding neighborhood.
28
+ More precisely, locality is given by k-nearest neighbors, whose distance
29
+ is used to estimate the local density.
30
+ By comparing the local density of a sample to the local densities of its
31
+ neighbors, one can identify samples that have a substantially lower density
32
+ than their neighbors. These are considered outliers.
33
+
34
+ .. versionadded:: 0.19
35
+
36
+ Parameters
37
+ ----------
38
+ n_neighbors : int, default=20
39
+ Number of neighbors to use by default for :meth:`kneighbors` queries.
40
+ If n_neighbors is larger than the number of samples provided,
41
+ all samples will be used.
42
+
43
+ algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto'
44
+ Algorithm used to compute the nearest neighbors:
45
+
46
+ - 'ball_tree' will use :class:`BallTree`
47
+ - 'kd_tree' will use :class:`KDTree`
48
+ - 'brute' will use a brute-force search.
49
+ - 'auto' will attempt to decide the most appropriate algorithm
50
+ based on the values passed to :meth:`fit` method.
51
+
52
+ Note: fitting on sparse input will override the setting of
53
+ this parameter, using brute force.
54
+
55
+ leaf_size : int, default=30
56
+ Leaf is size passed to :class:`BallTree` or :class:`KDTree`. This can
57
+ affect the speed of the construction and query, as well as the memory
58
+ required to store the tree. The optimal value depends on the
59
+ nature of the problem.
60
+
61
+ metric : str or callable, default='minkowski'
62
+ Metric to use for distance computation. Default is "minkowski", which
63
+ results in the standard Euclidean distance when p = 2. See the
64
+ documentation of `scipy.spatial.distance
65
+ <https://docs.scipy.org/doc/scipy/reference/spatial.distance.html>`_ and
66
+ the metrics listed in
67
+ :class:`~sklearn.metrics.pairwise.distance_metrics` for valid metric
68
+ values.
69
+
70
+ If metric is "precomputed", X is assumed to be a distance matrix and
71
+ must be square during fit. X may be a :term:`sparse graph`, in which
72
+ case only "nonzero" elements may be considered neighbors.
73
+
74
+ If metric is a callable function, it takes two arrays representing 1D
75
+ vectors as inputs and must return one value indicating the distance
76
+ between those vectors. This works for Scipy's metrics, but is less
77
+ efficient than passing the metric name as a string.
78
+
79
+ p : float, default=2
80
+ Parameter for the Minkowski metric from
81
+ :func:`sklearn.metrics.pairwise_distances`. When p = 1, this
82
+ is equivalent to using manhattan_distance (l1), and euclidean_distance
83
+ (l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
84
+
85
+ metric_params : dict, default=None
86
+ Additional keyword arguments for the metric function.
87
+
88
+ contamination : 'auto' or float, default='auto'
89
+ The amount of contamination of the data set, i.e. the proportion
90
+ of outliers in the data set. When fitting this is used to define the
91
+ threshold on the scores of the samples.
92
+
93
+ - if 'auto', the threshold is determined as in the
94
+ original paper,
95
+ - if a float, the contamination should be in the range (0, 0.5].
96
+
97
+ .. versionchanged:: 0.22
98
+ The default value of ``contamination`` changed from 0.1
99
+ to ``'auto'``.
100
+
101
+ novelty : bool, default=False
102
+ By default, LocalOutlierFactor is only meant to be used for outlier
103
+ detection (novelty=False). Set novelty to True if you want to use
104
+ LocalOutlierFactor for novelty detection. In this case be aware that
105
+ you should only use predict, decision_function and score_samples
106
+ on new unseen data and not on the training set; and note that the
107
+ results obtained this way may differ from the standard LOF results.
108
+
109
+ .. versionadded:: 0.20
110
+
111
+ n_jobs : int, default=None
112
+ The number of parallel jobs to run for neighbors search.
113
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
114
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
115
+ for more details.
116
+
117
+ Attributes
118
+ ----------
119
+ negative_outlier_factor_ : ndarray of shape (n_samples,)
120
+ The opposite LOF of the training samples. The higher, the more normal.
121
+ Inliers tend to have a LOF score close to 1
122
+ (``negative_outlier_factor_`` close to -1), while outliers tend to have
123
+ a larger LOF score.
124
+
125
+ The local outlier factor (LOF) of a sample captures its
126
+ supposed 'degree of abnormality'.
127
+ It is the average of the ratio of the local reachability density of
128
+ a sample and those of its k-nearest neighbors.
129
+
130
+ n_neighbors_ : int
131
+ The actual number of neighbors used for :meth:`kneighbors` queries.
132
+
133
+ offset_ : float
134
+ Offset used to obtain binary labels from the raw scores.
135
+ Observations having a negative_outlier_factor smaller than `offset_`
136
+ are detected as abnormal.
137
+ The offset is set to -1.5 (inliers score around -1), except when a
138
+ contamination parameter different than "auto" is provided. In that
139
+ case, the offset is defined in such a way we obtain the expected
140
+ number of outliers in training.
141
+
142
+ .. versionadded:: 0.20
143
+
144
+ effective_metric_ : str
145
+ The effective metric used for the distance computation.
146
+
147
+ effective_metric_params_ : dict
148
+ The effective additional keyword arguments for the metric function.
149
+
150
+ n_features_in_ : int
151
+ Number of features seen during :term:`fit`.
152
+
153
+ .. versionadded:: 0.24
154
+
155
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
156
+ Names of features seen during :term:`fit`. Defined only when `X`
157
+ has feature names that are all strings.
158
+
159
+ .. versionadded:: 1.0
160
+
161
+ n_samples_fit_ : int
162
+ It is the number of samples in the fitted data.
163
+
164
+ See Also
165
+ --------
166
+ sklearn.svm.OneClassSVM: Unsupervised Outlier Detection using
167
+ Support Vector Machine.
168
+
169
+ References
170
+ ----------
171
+ .. [1] Breunig, M. M., Kriegel, H. P., Ng, R. T., & Sander, J. (2000, May).
172
+ LOF: identifying density-based local outliers. In ACM sigmod record.
173
+
174
+ Examples
175
+ --------
176
+ >>> import numpy as np
177
+ >>> from sklearn.neighbors import LocalOutlierFactor
178
+ >>> X = [[-1.1], [0.2], [101.1], [0.3]]
179
+ >>> clf = LocalOutlierFactor(n_neighbors=2)
180
+ >>> clf.fit_predict(X)
181
+ array([ 1, 1, -1, 1])
182
+ >>> clf.negative_outlier_factor_
183
+ array([ -0.9821..., -1.0370..., -73.3697..., -0.9821...])
184
+ """
185
+
186
+ _parameter_constraints: dict = {
187
+ **NeighborsBase._parameter_constraints,
188
+ "contamination": [
189
+ StrOptions({"auto"}),
190
+ Interval(Real, 0, 0.5, closed="right"),
191
+ ],
192
+ "novelty": ["boolean"],
193
+ }
194
+ _parameter_constraints.pop("radius")
195
+
196
+ def __init__(
197
+ self,
198
+ n_neighbors=20,
199
+ *,
200
+ algorithm="auto",
201
+ leaf_size=30,
202
+ metric="minkowski",
203
+ p=2,
204
+ metric_params=None,
205
+ contamination="auto",
206
+ novelty=False,
207
+ n_jobs=None,
208
+ ):
209
+ super().__init__(
210
+ n_neighbors=n_neighbors,
211
+ algorithm=algorithm,
212
+ leaf_size=leaf_size,
213
+ metric=metric,
214
+ p=p,
215
+ metric_params=metric_params,
216
+ n_jobs=n_jobs,
217
+ )
218
+ self.contamination = contamination
219
+ self.novelty = novelty
220
+
221
+ def _check_novelty_fit_predict(self):
222
+ if self.novelty:
223
+ msg = (
224
+ "fit_predict is not available when novelty=True. Use "
225
+ "novelty=False if you want to predict on the training set."
226
+ )
227
+ raise AttributeError(msg)
228
+ return True
229
+
230
+ @available_if(_check_novelty_fit_predict)
231
+ def fit_predict(self, X, y=None):
232
+ """Fit the model to the training set X and return the labels.
233
+
234
+ **Not available for novelty detection (when novelty is set to True).**
235
+ Label is 1 for an inlier and -1 for an outlier according to the LOF
236
+ score and the contamination parameter.
237
+
238
+ Parameters
239
+ ----------
240
+ X : {array-like, sparse matrix} of shape (n_samples, n_features), default=None
241
+ The query sample or samples to compute the Local Outlier Factor
242
+ w.r.t. the training samples.
243
+
244
+ y : Ignored
245
+ Not used, present for API consistency by convention.
246
+
247
+ Returns
248
+ -------
249
+ is_inlier : ndarray of shape (n_samples,)
250
+ Returns -1 for anomalies/outliers and 1 for inliers.
251
+ """
252
+
253
+ # As fit_predict would be different from fit.predict, fit_predict is
254
+ # only available for outlier detection (novelty=False)
255
+
256
+ return self.fit(X)._predict()
257
+
258
+ @_fit_context(
259
+ # LocalOutlierFactor.metric is not validated yet
260
+ prefer_skip_nested_validation=False
261
+ )
262
+ def fit(self, X, y=None):
263
+ """Fit the local outlier factor detector from the training dataset.
264
+
265
+ Parameters
266
+ ----------
267
+ X : {array-like, sparse matrix} of shape (n_samples, n_features) or \
268
+ (n_samples, n_samples) if metric='precomputed'
269
+ Training data.
270
+
271
+ y : Ignored
272
+ Not used, present for API consistency by convention.
273
+
274
+ Returns
275
+ -------
276
+ self : LocalOutlierFactor
277
+ The fitted local outlier factor detector.
278
+ """
279
+ self._fit(X)
280
+
281
+ n_samples = self.n_samples_fit_
282
+ if self.n_neighbors > n_samples:
283
+ warnings.warn(
284
+ "n_neighbors (%s) is greater than the "
285
+ "total number of samples (%s). n_neighbors "
286
+ "will be set to (n_samples - 1) for estimation."
287
+ % (self.n_neighbors, n_samples)
288
+ )
289
+ self.n_neighbors_ = max(1, min(self.n_neighbors, n_samples - 1))
290
+
291
+ self._distances_fit_X_, _neighbors_indices_fit_X_ = self.kneighbors(
292
+ n_neighbors=self.n_neighbors_
293
+ )
294
+
295
+ if self._fit_X.dtype == np.float32:
296
+ self._distances_fit_X_ = self._distances_fit_X_.astype(
297
+ self._fit_X.dtype,
298
+ copy=False,
299
+ )
300
+
301
+ self._lrd = self._local_reachability_density(
302
+ self._distances_fit_X_, _neighbors_indices_fit_X_
303
+ )
304
+
305
+ # Compute lof score over training samples to define offset_:
306
+ lrd_ratios_array = (
307
+ self._lrd[_neighbors_indices_fit_X_] / self._lrd[:, np.newaxis]
308
+ )
309
+
310
+ self.negative_outlier_factor_ = -np.mean(lrd_ratios_array, axis=1)
311
+
312
+ if self.contamination == "auto":
313
+ # inliers score around -1 (the higher, the less abnormal).
314
+ self.offset_ = -1.5
315
+ else:
316
+ self.offset_ = np.percentile(
317
+ self.negative_outlier_factor_, 100.0 * self.contamination
318
+ )
319
+
320
+ return self
321
+
322
+ def _check_novelty_predict(self):
323
+ if not self.novelty:
324
+ msg = (
325
+ "predict is not available when novelty=False, use "
326
+ "fit_predict if you want to predict on training data. Use "
327
+ "novelty=True if you want to use LOF for novelty detection "
328
+ "and predict on new unseen data."
329
+ )
330
+ raise AttributeError(msg)
331
+ return True
332
+
333
+ @available_if(_check_novelty_predict)
334
+ def predict(self, X=None):
335
+ """Predict the labels (1 inlier, -1 outlier) of X according to LOF.
336
+
337
+ **Only available for novelty detection (when novelty is set to True).**
338
+ This method allows to generalize prediction to *new observations* (not
339
+ in the training set). Note that the result of ``clf.fit(X)`` then
340
+ ``clf.predict(X)`` with ``novelty=True`` may differ from the result
341
+ obtained by ``clf.fit_predict(X)`` with ``novelty=False``.
342
+
343
+ Parameters
344
+ ----------
345
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
346
+ The query sample or samples to compute the Local Outlier Factor
347
+ w.r.t. the training samples.
348
+
349
+ Returns
350
+ -------
351
+ is_inlier : ndarray of shape (n_samples,)
352
+ Returns -1 for anomalies/outliers and +1 for inliers.
353
+ """
354
+ return self._predict(X)
355
+
356
+ def _predict(self, X=None):
357
+ """Predict the labels (1 inlier, -1 outlier) of X according to LOF.
358
+
359
+ If X is None, returns the same as fit_predict(X_train).
360
+
361
+ Parameters
362
+ ----------
363
+ X : {array-like, sparse matrix} of shape (n_samples, n_features), default=None
364
+ The query sample or samples to compute the Local Outlier Factor
365
+ w.r.t. the training samples. If None, makes prediction on the
366
+ training data without considering them as their own neighbors.
367
+
368
+ Returns
369
+ -------
370
+ is_inlier : ndarray of shape (n_samples,)
371
+ Returns -1 for anomalies/outliers and +1 for inliers.
372
+ """
373
+ check_is_fitted(self)
374
+
375
+ if X is not None:
376
+ X = check_array(X, accept_sparse="csr")
377
+ is_inlier = np.ones(X.shape[0], dtype=int)
378
+ is_inlier[self.decision_function(X) < 0] = -1
379
+ else:
380
+ is_inlier = np.ones(self.n_samples_fit_, dtype=int)
381
+ is_inlier[self.negative_outlier_factor_ < self.offset_] = -1
382
+
383
+ return is_inlier
384
+
385
+ def _check_novelty_decision_function(self):
386
+ if not self.novelty:
387
+ msg = (
388
+ "decision_function is not available when novelty=False. "
389
+ "Use novelty=True if you want to use LOF for novelty "
390
+ "detection and compute decision_function for new unseen "
391
+ "data. Note that the opposite LOF of the training samples "
392
+ "is always available by considering the "
393
+ "negative_outlier_factor_ attribute."
394
+ )
395
+ raise AttributeError(msg)
396
+ return True
397
+
398
+ @available_if(_check_novelty_decision_function)
399
+ def decision_function(self, X):
400
+ """Shifted opposite of the Local Outlier Factor of X.
401
+
402
+ Bigger is better, i.e. large values correspond to inliers.
403
+
404
+ **Only available for novelty detection (when novelty is set to True).**
405
+ The shift offset allows a zero threshold for being an outlier.
406
+ The argument X is supposed to contain *new data*: if X contains a
407
+ point from training, it considers the later in its own neighborhood.
408
+ Also, the samples in X are not considered in the neighborhood of any
409
+ point.
410
+
411
+ Parameters
412
+ ----------
413
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
414
+ The query sample or samples to compute the Local Outlier Factor
415
+ w.r.t. the training samples.
416
+
417
+ Returns
418
+ -------
419
+ shifted_opposite_lof_scores : ndarray of shape (n_samples,)
420
+ The shifted opposite of the Local Outlier Factor of each input
421
+ samples. The lower, the more abnormal. Negative scores represent
422
+ outliers, positive scores represent inliers.
423
+ """
424
+ return self.score_samples(X) - self.offset_
425
+
426
+ def _check_novelty_score_samples(self):
427
+ if not self.novelty:
428
+ msg = (
429
+ "score_samples is not available when novelty=False. The "
430
+ "scores of the training samples are always available "
431
+ "through the negative_outlier_factor_ attribute. Use "
432
+ "novelty=True if you want to use LOF for novelty detection "
433
+ "and compute score_samples for new unseen data."
434
+ )
435
+ raise AttributeError(msg)
436
+ return True
437
+
438
+ @available_if(_check_novelty_score_samples)
439
+ def score_samples(self, X):
440
+ """Opposite of the Local Outlier Factor of X.
441
+
442
+ It is the opposite as bigger is better, i.e. large values correspond
443
+ to inliers.
444
+
445
+ **Only available for novelty detection (when novelty is set to True).**
446
+ The argument X is supposed to contain *new data*: if X contains a
447
+ point from training, it considers the later in its own neighborhood.
448
+ Also, the samples in X are not considered in the neighborhood of any
449
+ point. Because of this, the scores obtained via ``score_samples`` may
450
+ differ from the standard LOF scores.
451
+ The standard LOF scores for the training data is available via the
452
+ ``negative_outlier_factor_`` attribute.
453
+
454
+ Parameters
455
+ ----------
456
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
457
+ The query sample or samples to compute the Local Outlier Factor
458
+ w.r.t. the training samples.
459
+
460
+ Returns
461
+ -------
462
+ opposite_lof_scores : ndarray of shape (n_samples,)
463
+ The opposite of the Local Outlier Factor of each input samples.
464
+ The lower, the more abnormal.
465
+ """
466
+ check_is_fitted(self)
467
+ X = check_array(X, accept_sparse="csr")
468
+
469
+ distances_X, neighbors_indices_X = self.kneighbors(
470
+ X, n_neighbors=self.n_neighbors_
471
+ )
472
+
473
+ if X.dtype == np.float32:
474
+ distances_X = distances_X.astype(X.dtype, copy=False)
475
+
476
+ X_lrd = self._local_reachability_density(
477
+ distances_X,
478
+ neighbors_indices_X,
479
+ )
480
+
481
+ lrd_ratios_array = self._lrd[neighbors_indices_X] / X_lrd[:, np.newaxis]
482
+
483
+ # as bigger is better:
484
+ return -np.mean(lrd_ratios_array, axis=1)
485
+
486
+ def _local_reachability_density(self, distances_X, neighbors_indices):
487
+ """The local reachability density (LRD)
488
+
489
+ The LRD of a sample is the inverse of the average reachability
490
+ distance of its k-nearest neighbors.
491
+
492
+ Parameters
493
+ ----------
494
+ distances_X : ndarray of shape (n_queries, self.n_neighbors)
495
+ Distances to the neighbors (in the training samples `self._fit_X`)
496
+ of each query point to compute the LRD.
497
+
498
+ neighbors_indices : ndarray of shape (n_queries, self.n_neighbors)
499
+ Neighbors indices (of each query point) among training samples
500
+ self._fit_X.
501
+
502
+ Returns
503
+ -------
504
+ local_reachability_density : ndarray of shape (n_queries,)
505
+ The local reachability density of each sample.
506
+ """
507
+ dist_k = self._distances_fit_X_[neighbors_indices, self.n_neighbors_ - 1]
508
+ reach_dist_array = np.maximum(distances_X, dist_k)
509
+
510
+ # 1e-10 to avoid `nan' when nb of duplicates > n_neighbors_:
511
+ return 1.0 / (np.mean(reach_dist_array, axis=1) + 1e-10)
512
+
513
+ def _more_tags(self):
514
+ return {
515
+ "preserves_dtype": [np.float64, np.float32],
516
+ }
env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/_nca.py ADDED
@@ -0,0 +1,525 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Neighborhood Component Analysis
3
+ """
4
+
5
+ # Authors: William de Vazelhes <[email protected]>
6
+ # John Chiotellis <[email protected]>
7
+ # License: BSD 3 clause
8
+
9
+ import sys
10
+ import time
11
+ from numbers import Integral, Real
12
+ from warnings import warn
13
+
14
+ import numpy as np
15
+ from scipy.optimize import minimize
16
+
17
+ from ..base import (
18
+ BaseEstimator,
19
+ ClassNamePrefixFeaturesOutMixin,
20
+ TransformerMixin,
21
+ _fit_context,
22
+ )
23
+ from ..decomposition import PCA
24
+ from ..exceptions import ConvergenceWarning
25
+ from ..metrics import pairwise_distances
26
+ from ..preprocessing import LabelEncoder
27
+ from ..utils._param_validation import Interval, StrOptions
28
+ from ..utils.extmath import softmax
29
+ from ..utils.multiclass import check_classification_targets
30
+ from ..utils.random import check_random_state
31
+ from ..utils.validation import check_array, check_is_fitted
32
+
33
+
34
+ class NeighborhoodComponentsAnalysis(
35
+ ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator
36
+ ):
37
+ """Neighborhood Components Analysis.
38
+
39
+ Neighborhood Component Analysis (NCA) is a machine learning algorithm for
40
+ metric learning. It learns a linear transformation in a supervised fashion
41
+ to improve the classification accuracy of a stochastic nearest neighbors
42
+ rule in the transformed space.
43
+
44
+ Read more in the :ref:`User Guide <nca>`.
45
+
46
+ Parameters
47
+ ----------
48
+ n_components : int, default=None
49
+ Preferred dimensionality of the projected space.
50
+ If None it will be set to `n_features`.
51
+
52
+ init : {'auto', 'pca', 'lda', 'identity', 'random'} or ndarray of shape \
53
+ (n_features_a, n_features_b), default='auto'
54
+ Initialization of the linear transformation. Possible options are
55
+ `'auto'`, `'pca'`, `'lda'`, `'identity'`, `'random'`, and a numpy
56
+ array of shape `(n_features_a, n_features_b)`.
57
+
58
+ - `'auto'`
59
+ Depending on `n_components`, the most reasonable initialization
60
+ will be chosen. If `n_components <= n_classes` we use `'lda'`, as
61
+ it uses labels information. If not, but
62
+ `n_components < min(n_features, n_samples)`, we use `'pca'`, as
63
+ it projects data in meaningful directions (those of higher
64
+ variance). Otherwise, we just use `'identity'`.
65
+
66
+ - `'pca'`
67
+ `n_components` principal components of the inputs passed
68
+ to :meth:`fit` will be used to initialize the transformation.
69
+ (See :class:`~sklearn.decomposition.PCA`)
70
+
71
+ - `'lda'`
72
+ `min(n_components, n_classes)` most discriminative
73
+ components of the inputs passed to :meth:`fit` will be used to
74
+ initialize the transformation. (If `n_components > n_classes`,
75
+ the rest of the components will be zero.) (See
76
+ :class:`~sklearn.discriminant_analysis.LinearDiscriminantAnalysis`)
77
+
78
+ - `'identity'`
79
+ If `n_components` is strictly smaller than the
80
+ dimensionality of the inputs passed to :meth:`fit`, the identity
81
+ matrix will be truncated to the first `n_components` rows.
82
+
83
+ - `'random'`
84
+ The initial transformation will be a random array of shape
85
+ `(n_components, n_features)`. Each value is sampled from the
86
+ standard normal distribution.
87
+
88
+ - numpy array
89
+ `n_features_b` must match the dimensionality of the inputs passed
90
+ to :meth:`fit` and n_features_a must be less than or equal to that.
91
+ If `n_components` is not `None`, `n_features_a` must match it.
92
+
93
+ warm_start : bool, default=False
94
+ If `True` and :meth:`fit` has been called before, the solution of the
95
+ previous call to :meth:`fit` is used as the initial linear
96
+ transformation (`n_components` and `init` will be ignored).
97
+
98
+ max_iter : int, default=50
99
+ Maximum number of iterations in the optimization.
100
+
101
+ tol : float, default=1e-5
102
+ Convergence tolerance for the optimization.
103
+
104
+ callback : callable, default=None
105
+ If not `None`, this function is called after every iteration of the
106
+ optimizer, taking as arguments the current solution (flattened
107
+ transformation matrix) and the number of iterations. This might be
108
+ useful in case one wants to examine or store the transformation
109
+ found after each iteration.
110
+
111
+ verbose : int, default=0
112
+ If 0, no progress messages will be printed.
113
+ If 1, progress messages will be printed to stdout.
114
+ If > 1, progress messages will be printed and the `disp`
115
+ parameter of :func:`scipy.optimize.minimize` will be set to
116
+ `verbose - 2`.
117
+
118
+ random_state : int or numpy.RandomState, default=None
119
+ A pseudo random number generator object or a seed for it if int. If
120
+ `init='random'`, `random_state` is used to initialize the random
121
+ transformation. If `init='pca'`, `random_state` is passed as an
122
+ argument to PCA when initializing the transformation. Pass an int
123
+ for reproducible results across multiple function calls.
124
+ See :term:`Glossary <random_state>`.
125
+
126
+ Attributes
127
+ ----------
128
+ components_ : ndarray of shape (n_components, n_features)
129
+ The linear transformation learned during fitting.
130
+
131
+ n_features_in_ : int
132
+ Number of features seen during :term:`fit`.
133
+
134
+ .. versionadded:: 0.24
135
+
136
+ n_iter_ : int
137
+ Counts the number of iterations performed by the optimizer.
138
+
139
+ random_state_ : numpy.RandomState
140
+ Pseudo random number generator object used during initialization.
141
+
142
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
143
+ Names of features seen during :term:`fit`. Defined only when `X`
144
+ has feature names that are all strings.
145
+
146
+ .. versionadded:: 1.0
147
+
148
+ See Also
149
+ --------
150
+ sklearn.discriminant_analysis.LinearDiscriminantAnalysis : Linear
151
+ Discriminant Analysis.
152
+ sklearn.decomposition.PCA : Principal component analysis (PCA).
153
+
154
+ References
155
+ ----------
156
+ .. [1] J. Goldberger, G. Hinton, S. Roweis, R. Salakhutdinov.
157
+ "Neighbourhood Components Analysis". Advances in Neural Information
158
+ Processing Systems. 17, 513-520, 2005.
159
+ http://www.cs.nyu.edu/~roweis/papers/ncanips.pdf
160
+
161
+ .. [2] Wikipedia entry on Neighborhood Components Analysis
162
+ https://en.wikipedia.org/wiki/Neighbourhood_components_analysis
163
+
164
+ Examples
165
+ --------
166
+ >>> from sklearn.neighbors import NeighborhoodComponentsAnalysis
167
+ >>> from sklearn.neighbors import KNeighborsClassifier
168
+ >>> from sklearn.datasets import load_iris
169
+ >>> from sklearn.model_selection import train_test_split
170
+ >>> X, y = load_iris(return_X_y=True)
171
+ >>> X_train, X_test, y_train, y_test = train_test_split(X, y,
172
+ ... stratify=y, test_size=0.7, random_state=42)
173
+ >>> nca = NeighborhoodComponentsAnalysis(random_state=42)
174
+ >>> nca.fit(X_train, y_train)
175
+ NeighborhoodComponentsAnalysis(...)
176
+ >>> knn = KNeighborsClassifier(n_neighbors=3)
177
+ >>> knn.fit(X_train, y_train)
178
+ KNeighborsClassifier(...)
179
+ >>> print(knn.score(X_test, y_test))
180
+ 0.933333...
181
+ >>> knn.fit(nca.transform(X_train), y_train)
182
+ KNeighborsClassifier(...)
183
+ >>> print(knn.score(nca.transform(X_test), y_test))
184
+ 0.961904...
185
+ """
186
+
187
+ _parameter_constraints: dict = {
188
+ "n_components": [
189
+ Interval(Integral, 1, None, closed="left"),
190
+ None,
191
+ ],
192
+ "init": [
193
+ StrOptions({"auto", "pca", "lda", "identity", "random"}),
194
+ np.ndarray,
195
+ ],
196
+ "warm_start": ["boolean"],
197
+ "max_iter": [Interval(Integral, 1, None, closed="left")],
198
+ "tol": [Interval(Real, 0, None, closed="left")],
199
+ "callback": [callable, None],
200
+ "verbose": ["verbose"],
201
+ "random_state": ["random_state"],
202
+ }
203
+
204
+ def __init__(
205
+ self,
206
+ n_components=None,
207
+ *,
208
+ init="auto",
209
+ warm_start=False,
210
+ max_iter=50,
211
+ tol=1e-5,
212
+ callback=None,
213
+ verbose=0,
214
+ random_state=None,
215
+ ):
216
+ self.n_components = n_components
217
+ self.init = init
218
+ self.warm_start = warm_start
219
+ self.max_iter = max_iter
220
+ self.tol = tol
221
+ self.callback = callback
222
+ self.verbose = verbose
223
+ self.random_state = random_state
224
+
225
+ @_fit_context(prefer_skip_nested_validation=True)
226
+ def fit(self, X, y):
227
+ """Fit the model according to the given training data.
228
+
229
+ Parameters
230
+ ----------
231
+ X : array-like of shape (n_samples, n_features)
232
+ The training samples.
233
+
234
+ y : array-like of shape (n_samples,)
235
+ The corresponding training labels.
236
+
237
+ Returns
238
+ -------
239
+ self : object
240
+ Fitted estimator.
241
+ """
242
+ # Validate the inputs X and y, and converts y to numerical classes.
243
+ X, y = self._validate_data(X, y, ensure_min_samples=2)
244
+ check_classification_targets(y)
245
+ y = LabelEncoder().fit_transform(y)
246
+
247
+ # Check the preferred dimensionality of the projected space
248
+ if self.n_components is not None and self.n_components > X.shape[1]:
249
+ raise ValueError(
250
+ "The preferred dimensionality of the "
251
+ f"projected space `n_components` ({self.n_components}) cannot "
252
+ "be greater than the given data "
253
+ f"dimensionality ({X.shape[1]})!"
254
+ )
255
+ # If warm_start is enabled, check that the inputs are consistent
256
+ if (
257
+ self.warm_start
258
+ and hasattr(self, "components_")
259
+ and self.components_.shape[1] != X.shape[1]
260
+ ):
261
+ raise ValueError(
262
+ f"The new inputs dimensionality ({X.shape[1]}) does not "
263
+ "match the input dimensionality of the "
264
+ f"previously learned transformation ({self.components_.shape[1]})."
265
+ )
266
+ # Check how the linear transformation should be initialized
267
+ init = self.init
268
+ if isinstance(init, np.ndarray):
269
+ init = check_array(init)
270
+ # Assert that init.shape[1] = X.shape[1]
271
+ if init.shape[1] != X.shape[1]:
272
+ raise ValueError(
273
+ f"The input dimensionality ({init.shape[1]}) of the given "
274
+ "linear transformation `init` must match the "
275
+ f"dimensionality of the given inputs `X` ({X.shape[1]})."
276
+ )
277
+ # Assert that init.shape[0] <= init.shape[1]
278
+ if init.shape[0] > init.shape[1]:
279
+ raise ValueError(
280
+ f"The output dimensionality ({init.shape[0]}) of the given "
281
+ "linear transformation `init` cannot be "
282
+ f"greater than its input dimensionality ({init.shape[1]})."
283
+ )
284
+ # Assert that self.n_components = init.shape[0]
285
+ if self.n_components is not None and self.n_components != init.shape[0]:
286
+ raise ValueError(
287
+ "The preferred dimensionality of the "
288
+ f"projected space `n_components` ({self.n_components}) does"
289
+ " not match the output dimensionality of "
290
+ "the given linear transformation "
291
+ f"`init` ({init.shape[0]})!"
292
+ )
293
+
294
+ # Initialize the random generator
295
+ self.random_state_ = check_random_state(self.random_state)
296
+
297
+ # Measure the total training time
298
+ t_train = time.time()
299
+
300
+ # Compute a mask that stays fixed during optimization:
301
+ same_class_mask = y[:, np.newaxis] == y[np.newaxis, :]
302
+ # (n_samples, n_samples)
303
+
304
+ # Initialize the transformation
305
+ transformation = np.ravel(self._initialize(X, y, init))
306
+
307
+ # Create a dictionary of parameters to be passed to the optimizer
308
+ disp = self.verbose - 2 if self.verbose > 1 else -1
309
+ optimizer_params = {
310
+ "method": "L-BFGS-B",
311
+ "fun": self._loss_grad_lbfgs,
312
+ "args": (X, same_class_mask, -1.0),
313
+ "jac": True,
314
+ "x0": transformation,
315
+ "tol": self.tol,
316
+ "options": dict(maxiter=self.max_iter, disp=disp),
317
+ "callback": self._callback,
318
+ }
319
+
320
+ # Call the optimizer
321
+ self.n_iter_ = 0
322
+ opt_result = minimize(**optimizer_params)
323
+
324
+ # Reshape the solution found by the optimizer
325
+ self.components_ = opt_result.x.reshape(-1, X.shape[1])
326
+ self._n_features_out = self.components_.shape[1]
327
+
328
+ # Stop timer
329
+ t_train = time.time() - t_train
330
+ if self.verbose:
331
+ cls_name = self.__class__.__name__
332
+
333
+ # Warn the user if the algorithm did not converge
334
+ if not opt_result.success:
335
+ warn(
336
+ "[{}] NCA did not converge: {}".format(
337
+ cls_name, opt_result.message
338
+ ),
339
+ ConvergenceWarning,
340
+ )
341
+
342
+ print("[{}] Training took {:8.2f}s.".format(cls_name, t_train))
343
+
344
+ return self
345
+
346
+ def transform(self, X):
347
+ """Apply the learned transformation to the given data.
348
+
349
+ Parameters
350
+ ----------
351
+ X : array-like of shape (n_samples, n_features)
352
+ Data samples.
353
+
354
+ Returns
355
+ -------
356
+ X_embedded: ndarray of shape (n_samples, n_components)
357
+ The data samples transformed.
358
+
359
+ Raises
360
+ ------
361
+ NotFittedError
362
+ If :meth:`fit` has not been called before.
363
+ """
364
+
365
+ check_is_fitted(self)
366
+ X = self._validate_data(X, reset=False)
367
+
368
+ return np.dot(X, self.components_.T)
369
+
370
+ def _initialize(self, X, y, init):
371
+ """Initialize the transformation.
372
+
373
+ Parameters
374
+ ----------
375
+ X : array-like of shape (n_samples, n_features)
376
+ The training samples.
377
+
378
+ y : array-like of shape (n_samples,)
379
+ The training labels.
380
+
381
+ init : str or ndarray of shape (n_features_a, n_features_b)
382
+ The validated initialization of the linear transformation.
383
+
384
+ Returns
385
+ -------
386
+ transformation : ndarray of shape (n_components, n_features)
387
+ The initialized linear transformation.
388
+
389
+ """
390
+
391
+ transformation = init
392
+ if self.warm_start and hasattr(self, "components_"):
393
+ transformation = self.components_
394
+ elif isinstance(init, np.ndarray):
395
+ pass
396
+ else:
397
+ n_samples, n_features = X.shape
398
+ n_components = self.n_components or n_features
399
+ if init == "auto":
400
+ n_classes = len(np.unique(y))
401
+ if n_components <= min(n_features, n_classes - 1):
402
+ init = "lda"
403
+ elif n_components < min(n_features, n_samples):
404
+ init = "pca"
405
+ else:
406
+ init = "identity"
407
+ if init == "identity":
408
+ transformation = np.eye(n_components, X.shape[1])
409
+ elif init == "random":
410
+ transformation = self.random_state_.standard_normal(
411
+ size=(n_components, X.shape[1])
412
+ )
413
+ elif init in {"pca", "lda"}:
414
+ init_time = time.time()
415
+ if init == "pca":
416
+ pca = PCA(
417
+ n_components=n_components, random_state=self.random_state_
418
+ )
419
+ if self.verbose:
420
+ print("Finding principal components... ", end="")
421
+ sys.stdout.flush()
422
+ pca.fit(X)
423
+ transformation = pca.components_
424
+ elif init == "lda":
425
+ from ..discriminant_analysis import LinearDiscriminantAnalysis
426
+
427
+ lda = LinearDiscriminantAnalysis(n_components=n_components)
428
+ if self.verbose:
429
+ print("Finding most discriminative components... ", end="")
430
+ sys.stdout.flush()
431
+ lda.fit(X, y)
432
+ transformation = lda.scalings_.T[:n_components]
433
+ if self.verbose:
434
+ print("done in {:5.2f}s".format(time.time() - init_time))
435
+ return transformation
436
+
437
+ def _callback(self, transformation):
438
+ """Called after each iteration of the optimizer.
439
+
440
+ Parameters
441
+ ----------
442
+ transformation : ndarray of shape (n_components * n_features,)
443
+ The solution computed by the optimizer in this iteration.
444
+ """
445
+ if self.callback is not None:
446
+ self.callback(transformation, self.n_iter_)
447
+
448
+ self.n_iter_ += 1
449
+
450
+ def _loss_grad_lbfgs(self, transformation, X, same_class_mask, sign=1.0):
451
+ """Compute the loss and the loss gradient w.r.t. `transformation`.
452
+
453
+ Parameters
454
+ ----------
455
+ transformation : ndarray of shape (n_components * n_features,)
456
+ The raveled linear transformation on which to compute loss and
457
+ evaluate gradient.
458
+
459
+ X : ndarray of shape (n_samples, n_features)
460
+ The training samples.
461
+
462
+ same_class_mask : ndarray of shape (n_samples, n_samples)
463
+ A mask where `mask[i, j] == 1` if `X[i]` and `X[j]` belong
464
+ to the same class, and `0` otherwise.
465
+
466
+ Returns
467
+ -------
468
+ loss : float
469
+ The loss computed for the given transformation.
470
+
471
+ gradient : ndarray of shape (n_components * n_features,)
472
+ The new (flattened) gradient of the loss.
473
+ """
474
+
475
+ if self.n_iter_ == 0:
476
+ self.n_iter_ += 1
477
+ if self.verbose:
478
+ header_fields = ["Iteration", "Objective Value", "Time(s)"]
479
+ header_fmt = "{:>10} {:>20} {:>10}"
480
+ header = header_fmt.format(*header_fields)
481
+ cls_name = self.__class__.__name__
482
+ print("[{}]".format(cls_name))
483
+ print(
484
+ "[{}] {}\n[{}] {}".format(
485
+ cls_name, header, cls_name, "-" * len(header)
486
+ )
487
+ )
488
+
489
+ t_funcall = time.time()
490
+
491
+ transformation = transformation.reshape(-1, X.shape[1])
492
+ X_embedded = np.dot(X, transformation.T) # (n_samples, n_components)
493
+
494
+ # Compute softmax distances
495
+ p_ij = pairwise_distances(X_embedded, squared=True)
496
+ np.fill_diagonal(p_ij, np.inf)
497
+ p_ij = softmax(-p_ij) # (n_samples, n_samples)
498
+
499
+ # Compute loss
500
+ masked_p_ij = p_ij * same_class_mask
501
+ p = np.sum(masked_p_ij, axis=1, keepdims=True) # (n_samples, 1)
502
+ loss = np.sum(p)
503
+
504
+ # Compute gradient of loss w.r.t. `transform`
505
+ weighted_p_ij = masked_p_ij - p_ij * p
506
+ weighted_p_ij_sym = weighted_p_ij + weighted_p_ij.T
507
+ np.fill_diagonal(weighted_p_ij_sym, -weighted_p_ij.sum(axis=0))
508
+ gradient = 2 * X_embedded.T.dot(weighted_p_ij_sym).dot(X)
509
+ # time complexity of the gradient: O(n_components x n_samples x (
510
+ # n_samples + n_features))
511
+
512
+ if self.verbose:
513
+ t_funcall = time.time() - t_funcall
514
+ values_fmt = "[{}] {:>10} {:>20.6e} {:>10.2f}"
515
+ print(
516
+ values_fmt.format(
517
+ self.__class__.__name__, self.n_iter_, loss, t_funcall
518
+ )
519
+ )
520
+ sys.stdout.flush()
521
+
522
+ return sign * loss, sign * gradient.ravel()
523
+
524
+ def _more_tags(self):
525
+ return {"requires_y": True}
env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/_nearest_centroid.py ADDED
@@ -0,0 +1,261 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Nearest Centroid Classification
3
+ """
4
+
5
+ # Author: Robert Layton <[email protected]>
6
+ # Olivier Grisel <[email protected]>
7
+ #
8
+ # License: BSD 3 clause
9
+
10
+ import warnings
11
+ from numbers import Real
12
+
13
+ import numpy as np
14
+ from scipy import sparse as sp
15
+
16
+ from sklearn.metrics.pairwise import _VALID_METRICS
17
+
18
+ from ..base import BaseEstimator, ClassifierMixin, _fit_context
19
+ from ..metrics.pairwise import pairwise_distances_argmin
20
+ from ..preprocessing import LabelEncoder
21
+ from ..utils._param_validation import Interval, StrOptions
22
+ from ..utils.multiclass import check_classification_targets
23
+ from ..utils.sparsefuncs import csc_median_axis_0
24
+ from ..utils.validation import check_is_fitted
25
+
26
+
27
+ class NearestCentroid(ClassifierMixin, BaseEstimator):
28
+ """Nearest centroid classifier.
29
+
30
+ Each class is represented by its centroid, with test samples classified to
31
+ the class with the nearest centroid.
32
+
33
+ Read more in the :ref:`User Guide <nearest_centroid_classifier>`.
34
+
35
+ Parameters
36
+ ----------
37
+ metric : str or callable, default="euclidean"
38
+ Metric to use for distance computation. See the documentation of
39
+ `scipy.spatial.distance
40
+ <https://docs.scipy.org/doc/scipy/reference/spatial.distance.html>`_ and
41
+ the metrics listed in
42
+ :class:`~sklearn.metrics.pairwise.distance_metrics` for valid metric
43
+ values. Note that "wminkowski", "seuclidean" and "mahalanobis" are not
44
+ supported.
45
+
46
+ The centroids for the samples corresponding to each class is
47
+ the point from which the sum of the distances (according to the metric)
48
+ of all samples that belong to that particular class are minimized.
49
+ If the `"manhattan"` metric is provided, this centroid is the median
50
+ and for all other metrics, the centroid is now set to be the mean.
51
+
52
+ .. deprecated:: 1.3
53
+ Support for metrics other than `euclidean` and `manhattan` and for
54
+ callables was deprecated in version 1.3 and will be removed in
55
+ version 1.5.
56
+
57
+ .. versionchanged:: 0.19
58
+ `metric='precomputed'` was deprecated and now raises an error
59
+
60
+ shrink_threshold : float, default=None
61
+ Threshold for shrinking centroids to remove features.
62
+
63
+ Attributes
64
+ ----------
65
+ centroids_ : array-like of shape (n_classes, n_features)
66
+ Centroid of each class.
67
+
68
+ classes_ : array of shape (n_classes,)
69
+ The unique classes labels.
70
+
71
+ n_features_in_ : int
72
+ Number of features seen during :term:`fit`.
73
+
74
+ .. versionadded:: 0.24
75
+
76
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
77
+ Names of features seen during :term:`fit`. Defined only when `X`
78
+ has feature names that are all strings.
79
+
80
+ .. versionadded:: 1.0
81
+
82
+ See Also
83
+ --------
84
+ KNeighborsClassifier : Nearest neighbors classifier.
85
+
86
+ Notes
87
+ -----
88
+ When used for text classification with tf-idf vectors, this classifier is
89
+ also known as the Rocchio classifier.
90
+
91
+ References
92
+ ----------
93
+ Tibshirani, R., Hastie, T., Narasimhan, B., & Chu, G. (2002). Diagnosis of
94
+ multiple cancer types by shrunken centroids of gene expression. Proceedings
95
+ of the National Academy of Sciences of the United States of America,
96
+ 99(10), 6567-6572. The National Academy of Sciences.
97
+
98
+ Examples
99
+ --------
100
+ >>> from sklearn.neighbors import NearestCentroid
101
+ >>> import numpy as np
102
+ >>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
103
+ >>> y = np.array([1, 1, 1, 2, 2, 2])
104
+ >>> clf = NearestCentroid()
105
+ >>> clf.fit(X, y)
106
+ NearestCentroid()
107
+ >>> print(clf.predict([[-0.8, -1]]))
108
+ [1]
109
+ """
110
+
111
+ _valid_metrics = set(_VALID_METRICS) - {"mahalanobis", "seuclidean", "wminkowski"}
112
+
113
+ _parameter_constraints: dict = {
114
+ "metric": [
115
+ StrOptions(
116
+ _valid_metrics, deprecated=_valid_metrics - {"manhattan", "euclidean"}
117
+ ),
118
+ callable,
119
+ ],
120
+ "shrink_threshold": [Interval(Real, 0, None, closed="neither"), None],
121
+ }
122
+
123
+ def __init__(self, metric="euclidean", *, shrink_threshold=None):
124
+ self.metric = metric
125
+ self.shrink_threshold = shrink_threshold
126
+
127
+ @_fit_context(prefer_skip_nested_validation=True)
128
+ def fit(self, X, y):
129
+ """
130
+ Fit the NearestCentroid model according to the given training data.
131
+
132
+ Parameters
133
+ ----------
134
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
135
+ Training vector, where `n_samples` is the number of samples and
136
+ `n_features` is the number of features.
137
+ Note that centroid shrinking cannot be used with sparse matrices.
138
+ y : array-like of shape (n_samples,)
139
+ Target values.
140
+
141
+ Returns
142
+ -------
143
+ self : object
144
+ Fitted estimator.
145
+ """
146
+ if isinstance(self.metric, str) and self.metric not in (
147
+ "manhattan",
148
+ "euclidean",
149
+ ):
150
+ warnings.warn(
151
+ (
152
+ "Support for distance metrics other than euclidean and "
153
+ "manhattan and for callables was deprecated in version "
154
+ "1.3 and will be removed in version 1.5."
155
+ ),
156
+ FutureWarning,
157
+ )
158
+
159
+ # If X is sparse and the metric is "manhattan", store it in a csc
160
+ # format is easier to calculate the median.
161
+ if self.metric == "manhattan":
162
+ X, y = self._validate_data(X, y, accept_sparse=["csc"])
163
+ else:
164
+ X, y = self._validate_data(X, y, accept_sparse=["csr", "csc"])
165
+ is_X_sparse = sp.issparse(X)
166
+ if is_X_sparse and self.shrink_threshold:
167
+ raise ValueError("threshold shrinking not supported for sparse input")
168
+ check_classification_targets(y)
169
+
170
+ n_samples, n_features = X.shape
171
+ le = LabelEncoder()
172
+ y_ind = le.fit_transform(y)
173
+ self.classes_ = classes = le.classes_
174
+ n_classes = classes.size
175
+ if n_classes < 2:
176
+ raise ValueError(
177
+ "The number of classes has to be greater than one; got %d class"
178
+ % (n_classes)
179
+ )
180
+
181
+ # Mask mapping each class to its members.
182
+ self.centroids_ = np.empty((n_classes, n_features), dtype=np.float64)
183
+ # Number of clusters in each class.
184
+ nk = np.zeros(n_classes)
185
+
186
+ for cur_class in range(n_classes):
187
+ center_mask = y_ind == cur_class
188
+ nk[cur_class] = np.sum(center_mask)
189
+ if is_X_sparse:
190
+ center_mask = np.where(center_mask)[0]
191
+
192
+ if self.metric == "manhattan":
193
+ # NumPy does not calculate median of sparse matrices.
194
+ if not is_X_sparse:
195
+ self.centroids_[cur_class] = np.median(X[center_mask], axis=0)
196
+ else:
197
+ self.centroids_[cur_class] = csc_median_axis_0(X[center_mask])
198
+ else:
199
+ # TODO(1.5) remove warning when metric is only manhattan or euclidean
200
+ if self.metric != "euclidean":
201
+ warnings.warn(
202
+ "Averaging for metrics other than "
203
+ "euclidean and manhattan not supported. "
204
+ "The average is set to be the mean."
205
+ )
206
+ self.centroids_[cur_class] = X[center_mask].mean(axis=0)
207
+
208
+ if self.shrink_threshold:
209
+ if np.all(np.ptp(X, axis=0) == 0):
210
+ raise ValueError("All features have zero variance. Division by zero.")
211
+ dataset_centroid_ = np.mean(X, axis=0)
212
+
213
+ # m parameter for determining deviation
214
+ m = np.sqrt((1.0 / nk) - (1.0 / n_samples))
215
+ # Calculate deviation using the standard deviation of centroids.
216
+ variance = (X - self.centroids_[y_ind]) ** 2
217
+ variance = variance.sum(axis=0)
218
+ s = np.sqrt(variance / (n_samples - n_classes))
219
+ s += np.median(s) # To deter outliers from affecting the results.
220
+ mm = m.reshape(len(m), 1) # Reshape to allow broadcasting.
221
+ ms = mm * s
222
+ deviation = (self.centroids_ - dataset_centroid_) / ms
223
+ # Soft thresholding: if the deviation crosses 0 during shrinking,
224
+ # it becomes zero.
225
+ signs = np.sign(deviation)
226
+ deviation = np.abs(deviation) - self.shrink_threshold
227
+ np.clip(deviation, 0, None, out=deviation)
228
+ deviation *= signs
229
+ # Now adjust the centroids using the deviation
230
+ msd = ms * deviation
231
+ self.centroids_ = dataset_centroid_[np.newaxis, :] + msd
232
+ return self
233
+
234
+ # TODO(1.5) remove note about precomputed metric
235
+ def predict(self, X):
236
+ """Perform classification on an array of test vectors `X`.
237
+
238
+ The predicted class `C` for each sample in `X` is returned.
239
+
240
+ Parameters
241
+ ----------
242
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
243
+ Test samples.
244
+
245
+ Returns
246
+ -------
247
+ C : ndarray of shape (n_samples,)
248
+ The predicted classes.
249
+
250
+ Notes
251
+ -----
252
+ If the metric constructor parameter is `"precomputed"`, `X` is assumed
253
+ to be the distance matrix between the data to be predicted and
254
+ `self.centroids_`.
255
+ """
256
+ check_is_fitted(self)
257
+
258
+ X = self._validate_data(X, accept_sparse="csr", reset=False)
259
+ return self.classes_[
260
+ pairwise_distances_argmin(X, self.centroids_, metric=self.metric)
261
+ ]
env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/_partition_nodes.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (45.3 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/_partition_nodes.pxd ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ from cython cimport floating
2
+ from ..utils._typedefs cimport float64_t, intp_t
3
+
4
+ cdef int partition_node_indices(
5
+ const floating *data,
6
+ intp_t *node_indices,
7
+ intp_t split_dim,
8
+ intp_t split_index,
9
+ intp_t n_features,
10
+ intp_t n_points) except -1
env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/_quad_tree.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (315 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/_quad_tree.pxd ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Author: Thomas Moreau <[email protected]>
2
+ # Author: Olivier Grisel <[email protected]>
3
+
4
+ # See quad_tree.pyx for details.
5
+
6
+ cimport numpy as cnp
7
+ from ..utils._typedefs cimport float32_t, intp_t
8
+
9
+ # This is effectively an ifdef statement in Cython
10
+ # It allows us to write printf debugging lines
11
+ # and remove them at compile time
12
+ cdef enum:
13
+ DEBUGFLAG = 0
14
+
15
+ cdef float EPSILON = 1e-6
16
+
17
+ # XXX: Careful to not change the order of the arguments. It is important to
18
+ # have is_leaf and max_width consecutive as it permits to avoid padding by
19
+ # the compiler and keep the size coherent for both C and numpy data structures.
20
+ cdef struct Cell:
21
+ # Base storage structure for cells in a QuadTree object
22
+
23
+ # Tree structure
24
+ intp_t parent # Parent cell of this cell
25
+ intp_t[8] children # Array pointing to children of this cell
26
+
27
+ # Cell description
28
+ intp_t cell_id # Id of the cell in the cells array in the Tree
29
+ intp_t point_index # Index of the point at this cell (only defined
30
+ # # in non empty leaf)
31
+ bint is_leaf # Does this cell have children?
32
+ float32_t squared_max_width # Squared value of the maximum width w
33
+ intp_t depth # Depth of the cell in the tree
34
+ intp_t cumulative_size # Number of points included in the subtree with
35
+ # # this cell as a root.
36
+
37
+ # Internal constants
38
+ float32_t[3] center # Store the center for quick split of cells
39
+ float32_t[3] barycenter # Keep track of the center of mass of the cell
40
+
41
+ # Cell boundaries
42
+ float32_t[3] min_bounds # Inferior boundaries of this cell (inclusive)
43
+ float32_t[3] max_bounds # Superior boundaries of this cell (exclusive)
44
+
45
+
46
+ cdef class _QuadTree:
47
+ # The QuadTree object is a quad tree structure constructed by inserting
48
+ # recursively points in the tree and splitting cells in 4 so that each
49
+ # leaf cell contains at most one point.
50
+ # This structure also handle 3D data, inserted in trees with 8 children
51
+ # for each node.
52
+
53
+ # Parameters of the tree
54
+ cdef public int n_dimensions # Number of dimensions in X
55
+ cdef public int verbose # Verbosity of the output
56
+ cdef intp_t n_cells_per_cell # Number of children per node. (2 ** n_dimension)
57
+
58
+ # Tree inner structure
59
+ cdef public intp_t max_depth # Max depth of the tree
60
+ cdef public intp_t cell_count # Counter for node IDs
61
+ cdef public intp_t capacity # Capacity of tree, in terms of nodes
62
+ cdef public intp_t n_points # Total number of points
63
+ cdef Cell* cells # Array of nodes
64
+
65
+ # Point insertion methods
66
+ cdef int insert_point(self, float32_t[3] point, intp_t point_index,
67
+ intp_t cell_id=*) except -1 nogil
68
+ cdef intp_t _insert_point_in_new_child(self, float32_t[3] point, Cell* cell,
69
+ intp_t point_index, intp_t size=*
70
+ ) noexcept nogil
71
+ cdef intp_t _select_child(self, float32_t[3] point, Cell* cell) noexcept nogil
72
+ cdef bint _is_duplicate(self, float32_t[3] point1, float32_t[3] point2) noexcept nogil
73
+
74
+ # Create a summary of the Tree compare to a query point
75
+ cdef long summarize(self, float32_t[3] point, float32_t* results,
76
+ float squared_theta=*, intp_t cell_id=*, long idx=*
77
+ ) noexcept nogil
78
+
79
+ # Internal cell initialization methods
80
+ cdef void _init_cell(self, Cell* cell, intp_t parent, intp_t depth) noexcept nogil
81
+ cdef void _init_root(self, float32_t[3] min_bounds, float32_t[3] max_bounds
82
+ ) noexcept nogil
83
+
84
+ # Private methods
85
+ cdef int _check_point_in_cell(self, float32_t[3] point, Cell* cell
86
+ ) except -1 nogil
87
+
88
+ # Private array manipulation to manage the ``cells`` array
89
+ cdef int _resize(self, intp_t capacity) except -1 nogil
90
+ cdef int _resize_c(self, intp_t capacity=*) except -1 nogil
91
+ cdef int _get_cell(self, float32_t[3] point, intp_t cell_id=*) except -1 nogil
92
+ cdef Cell[:] _get_cell_ndarray(self)
env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/_regression.py ADDED
@@ -0,0 +1,510 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Nearest Neighbor Regression."""
2
+
3
+ # Authors: Jake Vanderplas <[email protected]>
4
+ # Fabian Pedregosa <[email protected]>
5
+ # Alexandre Gramfort <[email protected]>
6
+ # Sparseness support by Lars Buitinck
7
+ # Multi-output support by Arnaud Joly <[email protected]>
8
+ # Empty radius support by Andreas Bjerre-Nielsen
9
+ #
10
+ # License: BSD 3 clause (C) INRIA, University of Amsterdam,
11
+ # University of Copenhagen
12
+
13
+ import warnings
14
+
15
+ import numpy as np
16
+
17
+ from ..base import RegressorMixin, _fit_context
18
+ from ..metrics import DistanceMetric
19
+ from ..utils._param_validation import StrOptions
20
+ from ._base import KNeighborsMixin, NeighborsBase, RadiusNeighborsMixin, _get_weights
21
+
22
+
23
+ class KNeighborsRegressor(KNeighborsMixin, RegressorMixin, NeighborsBase):
24
+ """Regression based on k-nearest neighbors.
25
+
26
+ The target is predicted by local interpolation of the targets
27
+ associated of the nearest neighbors in the training set.
28
+
29
+ Read more in the :ref:`User Guide <regression>`.
30
+
31
+ .. versionadded:: 0.9
32
+
33
+ Parameters
34
+ ----------
35
+ n_neighbors : int, default=5
36
+ Number of neighbors to use by default for :meth:`kneighbors` queries.
37
+
38
+ weights : {'uniform', 'distance'}, callable or None, default='uniform'
39
+ Weight function used in prediction. Possible values:
40
+
41
+ - 'uniform' : uniform weights. All points in each neighborhood
42
+ are weighted equally.
43
+ - 'distance' : weight points by the inverse of their distance.
44
+ in this case, closer neighbors of a query point will have a
45
+ greater influence than neighbors which are further away.
46
+ - [callable] : a user-defined function which accepts an
47
+ array of distances, and returns an array of the same shape
48
+ containing the weights.
49
+
50
+ Uniform weights are used by default.
51
+
52
+ algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto'
53
+ Algorithm used to compute the nearest neighbors:
54
+
55
+ - 'ball_tree' will use :class:`BallTree`
56
+ - 'kd_tree' will use :class:`KDTree`
57
+ - 'brute' will use a brute-force search.
58
+ - 'auto' will attempt to decide the most appropriate algorithm
59
+ based on the values passed to :meth:`fit` method.
60
+
61
+ Note: fitting on sparse input will override the setting of
62
+ this parameter, using brute force.
63
+
64
+ leaf_size : int, default=30
65
+ Leaf size passed to BallTree or KDTree. This can affect the
66
+ speed of the construction and query, as well as the memory
67
+ required to store the tree. The optimal value depends on the
68
+ nature of the problem.
69
+
70
+ p : float, default=2
71
+ Power parameter for the Minkowski metric. When p = 1, this is
72
+ equivalent to using manhattan_distance (l1), and euclidean_distance
73
+ (l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
74
+
75
+ metric : str, DistanceMetric object or callable, default='minkowski'
76
+ Metric to use for distance computation. Default is "minkowski", which
77
+ results in the standard Euclidean distance when p = 2. See the
78
+ documentation of `scipy.spatial.distance
79
+ <https://docs.scipy.org/doc/scipy/reference/spatial.distance.html>`_ and
80
+ the metrics listed in
81
+ :class:`~sklearn.metrics.pairwise.distance_metrics` for valid metric
82
+ values.
83
+
84
+ If metric is "precomputed", X is assumed to be a distance matrix and
85
+ must be square during fit. X may be a :term:`sparse graph`, in which
86
+ case only "nonzero" elements may be considered neighbors.
87
+
88
+ If metric is a callable function, it takes two arrays representing 1D
89
+ vectors as inputs and must return one value indicating the distance
90
+ between those vectors. This works for Scipy's metrics, but is less
91
+ efficient than passing the metric name as a string.
92
+
93
+ If metric is a DistanceMetric object, it will be passed directly to
94
+ the underlying computation routines.
95
+
96
+ metric_params : dict, default=None
97
+ Additional keyword arguments for the metric function.
98
+
99
+ n_jobs : int, default=None
100
+ The number of parallel jobs to run for neighbors search.
101
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
102
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
103
+ for more details.
104
+ Doesn't affect :meth:`fit` method.
105
+
106
+ Attributes
107
+ ----------
108
+ effective_metric_ : str or callable
109
+ The distance metric to use. It will be same as the `metric` parameter
110
+ or a synonym of it, e.g. 'euclidean' if the `metric` parameter set to
111
+ 'minkowski' and `p` parameter set to 2.
112
+
113
+ effective_metric_params_ : dict
114
+ Additional keyword arguments for the metric function. For most metrics
115
+ will be same with `metric_params` parameter, but may also contain the
116
+ `p` parameter value if the `effective_metric_` attribute is set to
117
+ 'minkowski'.
118
+
119
+ n_features_in_ : int
120
+ Number of features seen during :term:`fit`.
121
+
122
+ .. versionadded:: 0.24
123
+
124
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
125
+ Names of features seen during :term:`fit`. Defined only when `X`
126
+ has feature names that are all strings.
127
+
128
+ .. versionadded:: 1.0
129
+
130
+ n_samples_fit_ : int
131
+ Number of samples in the fitted data.
132
+
133
+ See Also
134
+ --------
135
+ NearestNeighbors : Unsupervised learner for implementing neighbor searches.
136
+ RadiusNeighborsRegressor : Regression based on neighbors within a fixed radius.
137
+ KNeighborsClassifier : Classifier implementing the k-nearest neighbors vote.
138
+ RadiusNeighborsClassifier : Classifier implementing
139
+ a vote among neighbors within a given radius.
140
+
141
+ Notes
142
+ -----
143
+ See :ref:`Nearest Neighbors <neighbors>` in the online documentation
144
+ for a discussion of the choice of ``algorithm`` and ``leaf_size``.
145
+
146
+ .. warning::
147
+
148
+ Regarding the Nearest Neighbors algorithms, if it is found that two
149
+ neighbors, neighbor `k+1` and `k`, have identical distances but
150
+ different labels, the results will depend on the ordering of the
151
+ training data.
152
+
153
+ https://en.wikipedia.org/wiki/K-nearest_neighbors_algorithm
154
+
155
+ Examples
156
+ --------
157
+ >>> X = [[0], [1], [2], [3]]
158
+ >>> y = [0, 0, 1, 1]
159
+ >>> from sklearn.neighbors import KNeighborsRegressor
160
+ >>> neigh = KNeighborsRegressor(n_neighbors=2)
161
+ >>> neigh.fit(X, y)
162
+ KNeighborsRegressor(...)
163
+ >>> print(neigh.predict([[1.5]]))
164
+ [0.5]
165
+ """
166
+
167
+ _parameter_constraints: dict = {
168
+ **NeighborsBase._parameter_constraints,
169
+ "weights": [StrOptions({"uniform", "distance"}), callable, None],
170
+ }
171
+ _parameter_constraints["metric"].append(DistanceMetric)
172
+ _parameter_constraints.pop("radius")
173
+
174
+ def __init__(
175
+ self,
176
+ n_neighbors=5,
177
+ *,
178
+ weights="uniform",
179
+ algorithm="auto",
180
+ leaf_size=30,
181
+ p=2,
182
+ metric="minkowski",
183
+ metric_params=None,
184
+ n_jobs=None,
185
+ ):
186
+ super().__init__(
187
+ n_neighbors=n_neighbors,
188
+ algorithm=algorithm,
189
+ leaf_size=leaf_size,
190
+ metric=metric,
191
+ p=p,
192
+ metric_params=metric_params,
193
+ n_jobs=n_jobs,
194
+ )
195
+ self.weights = weights
196
+
197
+ def _more_tags(self):
198
+ # For cross-validation routines to split data correctly
199
+ return {"pairwise": self.metric == "precomputed"}
200
+
201
+ @_fit_context(
202
+ # KNeighborsRegressor.metric is not validated yet
203
+ prefer_skip_nested_validation=False
204
+ )
205
+ def fit(self, X, y):
206
+ """Fit the k-nearest neighbors regressor from the training dataset.
207
+
208
+ Parameters
209
+ ----------
210
+ X : {array-like, sparse matrix} of shape (n_samples, n_features) or \
211
+ (n_samples, n_samples) if metric='precomputed'
212
+ Training data.
213
+
214
+ y : {array-like, sparse matrix} of shape (n_samples,) or \
215
+ (n_samples, n_outputs)
216
+ Target values.
217
+
218
+ Returns
219
+ -------
220
+ self : KNeighborsRegressor
221
+ The fitted k-nearest neighbors regressor.
222
+ """
223
+ return self._fit(X, y)
224
+
225
+ def predict(self, X):
226
+ """Predict the target for the provided data.
227
+
228
+ Parameters
229
+ ----------
230
+ X : {array-like, sparse matrix} of shape (n_queries, n_features), \
231
+ or (n_queries, n_indexed) if metric == 'precomputed'
232
+ Test samples.
233
+
234
+ Returns
235
+ -------
236
+ y : ndarray of shape (n_queries,) or (n_queries, n_outputs), dtype=int
237
+ Target values.
238
+ """
239
+ if self.weights == "uniform":
240
+ # In that case, we do not need the distances to perform
241
+ # the weighting so we do not compute them.
242
+ neigh_ind = self.kneighbors(X, return_distance=False)
243
+ neigh_dist = None
244
+ else:
245
+ neigh_dist, neigh_ind = self.kneighbors(X)
246
+
247
+ weights = _get_weights(neigh_dist, self.weights)
248
+
249
+ _y = self._y
250
+ if _y.ndim == 1:
251
+ _y = _y.reshape((-1, 1))
252
+
253
+ if weights is None:
254
+ y_pred = np.mean(_y[neigh_ind], axis=1)
255
+ else:
256
+ y_pred = np.empty((neigh_dist.shape[0], _y.shape[1]), dtype=np.float64)
257
+ denom = np.sum(weights, axis=1)
258
+
259
+ for j in range(_y.shape[1]):
260
+ num = np.sum(_y[neigh_ind, j] * weights, axis=1)
261
+ y_pred[:, j] = num / denom
262
+
263
+ if self._y.ndim == 1:
264
+ y_pred = y_pred.ravel()
265
+
266
+ return y_pred
267
+
268
+
269
+ class RadiusNeighborsRegressor(RadiusNeighborsMixin, RegressorMixin, NeighborsBase):
270
+ """Regression based on neighbors within a fixed radius.
271
+
272
+ The target is predicted by local interpolation of the targets
273
+ associated of the nearest neighbors in the training set.
274
+
275
+ Read more in the :ref:`User Guide <regression>`.
276
+
277
+ .. versionadded:: 0.9
278
+
279
+ Parameters
280
+ ----------
281
+ radius : float, default=1.0
282
+ Range of parameter space to use by default for :meth:`radius_neighbors`
283
+ queries.
284
+
285
+ weights : {'uniform', 'distance'}, callable or None, default='uniform'
286
+ Weight function used in prediction. Possible values:
287
+
288
+ - 'uniform' : uniform weights. All points in each neighborhood
289
+ are weighted equally.
290
+ - 'distance' : weight points by the inverse of their distance.
291
+ in this case, closer neighbors of a query point will have a
292
+ greater influence than neighbors which are further away.
293
+ - [callable] : a user-defined function which accepts an
294
+ array of distances, and returns an array of the same shape
295
+ containing the weights.
296
+
297
+ Uniform weights are used by default.
298
+
299
+ algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto'
300
+ Algorithm used to compute the nearest neighbors:
301
+
302
+ - 'ball_tree' will use :class:`BallTree`
303
+ - 'kd_tree' will use :class:`KDTree`
304
+ - 'brute' will use a brute-force search.
305
+ - 'auto' will attempt to decide the most appropriate algorithm
306
+ based on the values passed to :meth:`fit` method.
307
+
308
+ Note: fitting on sparse input will override the setting of
309
+ this parameter, using brute force.
310
+
311
+ leaf_size : int, default=30
312
+ Leaf size passed to BallTree or KDTree. This can affect the
313
+ speed of the construction and query, as well as the memory
314
+ required to store the tree. The optimal value depends on the
315
+ nature of the problem.
316
+
317
+ p : float, default=2
318
+ Power parameter for the Minkowski metric. When p = 1, this is
319
+ equivalent to using manhattan_distance (l1), and euclidean_distance
320
+ (l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
321
+
322
+ metric : str or callable, default='minkowski'
323
+ Metric to use for distance computation. Default is "minkowski", which
324
+ results in the standard Euclidean distance when p = 2. See the
325
+ documentation of `scipy.spatial.distance
326
+ <https://docs.scipy.org/doc/scipy/reference/spatial.distance.html>`_ and
327
+ the metrics listed in
328
+ :class:`~sklearn.metrics.pairwise.distance_metrics` for valid metric
329
+ values.
330
+
331
+ If metric is "precomputed", X is assumed to be a distance matrix and
332
+ must be square during fit. X may be a :term:`sparse graph`, in which
333
+ case only "nonzero" elements may be considered neighbors.
334
+
335
+ If metric is a callable function, it takes two arrays representing 1D
336
+ vectors as inputs and must return one value indicating the distance
337
+ between those vectors. This works for Scipy's metrics, but is less
338
+ efficient than passing the metric name as a string.
339
+
340
+ metric_params : dict, default=None
341
+ Additional keyword arguments for the metric function.
342
+
343
+ n_jobs : int, default=None
344
+ The number of parallel jobs to run for neighbors search.
345
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
346
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
347
+ for more details.
348
+
349
+ Attributes
350
+ ----------
351
+ effective_metric_ : str or callable
352
+ The distance metric to use. It will be same as the `metric` parameter
353
+ or a synonym of it, e.g. 'euclidean' if the `metric` parameter set to
354
+ 'minkowski' and `p` parameter set to 2.
355
+
356
+ effective_metric_params_ : dict
357
+ Additional keyword arguments for the metric function. For most metrics
358
+ will be same with `metric_params` parameter, but may also contain the
359
+ `p` parameter value if the `effective_metric_` attribute is set to
360
+ 'minkowski'.
361
+
362
+ n_features_in_ : int
363
+ Number of features seen during :term:`fit`.
364
+
365
+ .. versionadded:: 0.24
366
+
367
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
368
+ Names of features seen during :term:`fit`. Defined only when `X`
369
+ has feature names that are all strings.
370
+
371
+ .. versionadded:: 1.0
372
+
373
+ n_samples_fit_ : int
374
+ Number of samples in the fitted data.
375
+
376
+ See Also
377
+ --------
378
+ NearestNeighbors : Unsupervised learner for implementing neighbor searches.
379
+ KNeighborsRegressor : Regression based on k-nearest neighbors.
380
+ KNeighborsClassifier : Classifier based on the k-nearest neighbors.
381
+ RadiusNeighborsClassifier : Classifier based on neighbors within a given radius.
382
+
383
+ Notes
384
+ -----
385
+ See :ref:`Nearest Neighbors <neighbors>` in the online documentation
386
+ for a discussion of the choice of ``algorithm`` and ``leaf_size``.
387
+
388
+ https://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
389
+
390
+ Examples
391
+ --------
392
+ >>> X = [[0], [1], [2], [3]]
393
+ >>> y = [0, 0, 1, 1]
394
+ >>> from sklearn.neighbors import RadiusNeighborsRegressor
395
+ >>> neigh = RadiusNeighborsRegressor(radius=1.0)
396
+ >>> neigh.fit(X, y)
397
+ RadiusNeighborsRegressor(...)
398
+ >>> print(neigh.predict([[1.5]]))
399
+ [0.5]
400
+ """
401
+
402
+ _parameter_constraints: dict = {
403
+ **NeighborsBase._parameter_constraints,
404
+ "weights": [StrOptions({"uniform", "distance"}), callable, None],
405
+ }
406
+ _parameter_constraints.pop("n_neighbors")
407
+
408
+ def __init__(
409
+ self,
410
+ radius=1.0,
411
+ *,
412
+ weights="uniform",
413
+ algorithm="auto",
414
+ leaf_size=30,
415
+ p=2,
416
+ metric="minkowski",
417
+ metric_params=None,
418
+ n_jobs=None,
419
+ ):
420
+ super().__init__(
421
+ radius=radius,
422
+ algorithm=algorithm,
423
+ leaf_size=leaf_size,
424
+ p=p,
425
+ metric=metric,
426
+ metric_params=metric_params,
427
+ n_jobs=n_jobs,
428
+ )
429
+ self.weights = weights
430
+
431
+ @_fit_context(
432
+ # RadiusNeighborsRegressor.metric is not validated yet
433
+ prefer_skip_nested_validation=False
434
+ )
435
+ def fit(self, X, y):
436
+ """Fit the radius neighbors regressor from the training dataset.
437
+
438
+ Parameters
439
+ ----------
440
+ X : {array-like, sparse matrix} of shape (n_samples, n_features) or \
441
+ (n_samples, n_samples) if metric='precomputed'
442
+ Training data.
443
+
444
+ y : {array-like, sparse matrix} of shape (n_samples,) or \
445
+ (n_samples, n_outputs)
446
+ Target values.
447
+
448
+ Returns
449
+ -------
450
+ self : RadiusNeighborsRegressor
451
+ The fitted radius neighbors regressor.
452
+ """
453
+ return self._fit(X, y)
454
+
455
+ def predict(self, X):
456
+ """Predict the target for the provided data.
457
+
458
+ Parameters
459
+ ----------
460
+ X : {array-like, sparse matrix} of shape (n_queries, n_features), \
461
+ or (n_queries, n_indexed) if metric == 'precomputed'
462
+ Test samples.
463
+
464
+ Returns
465
+ -------
466
+ y : ndarray of shape (n_queries,) or (n_queries, n_outputs), \
467
+ dtype=double
468
+ Target values.
469
+ """
470
+ neigh_dist, neigh_ind = self.radius_neighbors(X)
471
+
472
+ weights = _get_weights(neigh_dist, self.weights)
473
+
474
+ _y = self._y
475
+ if _y.ndim == 1:
476
+ _y = _y.reshape((-1, 1))
477
+
478
+ empty_obs = np.full_like(_y[0], np.nan)
479
+
480
+ if weights is None:
481
+ y_pred = np.array(
482
+ [
483
+ np.mean(_y[ind, :], axis=0) if len(ind) else empty_obs
484
+ for (i, ind) in enumerate(neigh_ind)
485
+ ]
486
+ )
487
+
488
+ else:
489
+ y_pred = np.array(
490
+ [
491
+ (
492
+ np.average(_y[ind, :], axis=0, weights=weights[i])
493
+ if len(ind)
494
+ else empty_obs
495
+ )
496
+ for (i, ind) in enumerate(neigh_ind)
497
+ ]
498
+ )
499
+
500
+ if np.any(np.isnan(y_pred)):
501
+ empty_warning_msg = (
502
+ "One or more samples have no neighbors "
503
+ "within specified radius; predicting NaN."
504
+ )
505
+ warnings.warn(empty_warning_msg)
506
+
507
+ if self._y.ndim == 1:
508
+ y_pred = y_pred.ravel()
509
+
510
+ return y_pred
env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/_unsupervised.py ADDED
@@ -0,0 +1,175 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Unsupervised nearest neighbors learner"""
2
+ from ..base import _fit_context
3
+ from ._base import KNeighborsMixin, NeighborsBase, RadiusNeighborsMixin
4
+
5
+
6
+ class NearestNeighbors(KNeighborsMixin, RadiusNeighborsMixin, NeighborsBase):
7
+ """Unsupervised learner for implementing neighbor searches.
8
+
9
+ Read more in the :ref:`User Guide <unsupervised_neighbors>`.
10
+
11
+ .. versionadded:: 0.9
12
+
13
+ Parameters
14
+ ----------
15
+ n_neighbors : int, default=5
16
+ Number of neighbors to use by default for :meth:`kneighbors` queries.
17
+
18
+ radius : float, default=1.0
19
+ Range of parameter space to use by default for :meth:`radius_neighbors`
20
+ queries.
21
+
22
+ algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto'
23
+ Algorithm used to compute the nearest neighbors:
24
+
25
+ - 'ball_tree' will use :class:`BallTree`
26
+ - 'kd_tree' will use :class:`KDTree`
27
+ - 'brute' will use a brute-force search.
28
+ - 'auto' will attempt to decide the most appropriate algorithm
29
+ based on the values passed to :meth:`fit` method.
30
+
31
+ Note: fitting on sparse input will override the setting of
32
+ this parameter, using brute force.
33
+
34
+ leaf_size : int, default=30
35
+ Leaf size passed to BallTree or KDTree. This can affect the
36
+ speed of the construction and query, as well as the memory
37
+ required to store the tree. The optimal value depends on the
38
+ nature of the problem.
39
+
40
+ metric : str or callable, default='minkowski'
41
+ Metric to use for distance computation. Default is "minkowski", which
42
+ results in the standard Euclidean distance when p = 2. See the
43
+ documentation of `scipy.spatial.distance
44
+ <https://docs.scipy.org/doc/scipy/reference/spatial.distance.html>`_ and
45
+ the metrics listed in
46
+ :class:`~sklearn.metrics.pairwise.distance_metrics` for valid metric
47
+ values.
48
+
49
+ If metric is "precomputed", X is assumed to be a distance matrix and
50
+ must be square during fit. X may be a :term:`sparse graph`, in which
51
+ case only "nonzero" elements may be considered neighbors.
52
+
53
+ If metric is a callable function, it takes two arrays representing 1D
54
+ vectors as inputs and must return one value indicating the distance
55
+ between those vectors. This works for Scipy's metrics, but is less
56
+ efficient than passing the metric name as a string.
57
+
58
+ p : float (positive), default=2
59
+ Parameter for the Minkowski metric from
60
+ sklearn.metrics.pairwise.pairwise_distances. When p = 1, this is
61
+ equivalent to using manhattan_distance (l1), and euclidean_distance
62
+ (l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
63
+
64
+ metric_params : dict, default=None
65
+ Additional keyword arguments for the metric function.
66
+
67
+ n_jobs : int, default=None
68
+ The number of parallel jobs to run for neighbors search.
69
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
70
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
71
+ for more details.
72
+
73
+ Attributes
74
+ ----------
75
+ effective_metric_ : str
76
+ Metric used to compute distances to neighbors.
77
+
78
+ effective_metric_params_ : dict
79
+ Parameters for the metric used to compute distances to neighbors.
80
+
81
+ n_features_in_ : int
82
+ Number of features seen during :term:`fit`.
83
+
84
+ .. versionadded:: 0.24
85
+
86
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
87
+ Names of features seen during :term:`fit`. Defined only when `X`
88
+ has feature names that are all strings.
89
+
90
+ .. versionadded:: 1.0
91
+
92
+ n_samples_fit_ : int
93
+ Number of samples in the fitted data.
94
+
95
+ See Also
96
+ --------
97
+ KNeighborsClassifier : Classifier implementing the k-nearest neighbors
98
+ vote.
99
+ RadiusNeighborsClassifier : Classifier implementing a vote among neighbors
100
+ within a given radius.
101
+ KNeighborsRegressor : Regression based on k-nearest neighbors.
102
+ RadiusNeighborsRegressor : Regression based on neighbors within a fixed
103
+ radius.
104
+ BallTree : Space partitioning data structure for organizing points in a
105
+ multi-dimensional space, used for nearest neighbor search.
106
+
107
+ Notes
108
+ -----
109
+ See :ref:`Nearest Neighbors <neighbors>` in the online documentation
110
+ for a discussion of the choice of ``algorithm`` and ``leaf_size``.
111
+
112
+ https://en.wikipedia.org/wiki/K-nearest_neighbors_algorithm
113
+
114
+ Examples
115
+ --------
116
+ >>> import numpy as np
117
+ >>> from sklearn.neighbors import NearestNeighbors
118
+ >>> samples = [[0, 0, 2], [1, 0, 0], [0, 0, 1]]
119
+ >>> neigh = NearestNeighbors(n_neighbors=2, radius=0.4)
120
+ >>> neigh.fit(samples)
121
+ NearestNeighbors(...)
122
+ >>> neigh.kneighbors([[0, 0, 1.3]], 2, return_distance=False)
123
+ array([[2, 0]]...)
124
+ >>> nbrs = neigh.radius_neighbors(
125
+ ... [[0, 0, 1.3]], 0.4, return_distance=False
126
+ ... )
127
+ >>> np.asarray(nbrs[0][0])
128
+ array(2)
129
+ """
130
+
131
+ def __init__(
132
+ self,
133
+ *,
134
+ n_neighbors=5,
135
+ radius=1.0,
136
+ algorithm="auto",
137
+ leaf_size=30,
138
+ metric="minkowski",
139
+ p=2,
140
+ metric_params=None,
141
+ n_jobs=None,
142
+ ):
143
+ super().__init__(
144
+ n_neighbors=n_neighbors,
145
+ radius=radius,
146
+ algorithm=algorithm,
147
+ leaf_size=leaf_size,
148
+ metric=metric,
149
+ p=p,
150
+ metric_params=metric_params,
151
+ n_jobs=n_jobs,
152
+ )
153
+
154
+ @_fit_context(
155
+ # NearestNeighbors.metric is not validated yet
156
+ prefer_skip_nested_validation=False
157
+ )
158
+ def fit(self, X, y=None):
159
+ """Fit the nearest neighbors estimator from the training dataset.
160
+
161
+ Parameters
162
+ ----------
163
+ X : {array-like, sparse matrix} of shape (n_samples, n_features) or \
164
+ (n_samples, n_samples) if metric='precomputed'
165
+ Training data.
166
+
167
+ y : Ignored
168
+ Not used, present for API consistency by convention.
169
+
170
+ Returns
171
+ -------
172
+ self : NearestNeighbors
173
+ The fitted nearest neighbors estimator.
174
+ """
175
+ return self._fit(X)
env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/tests/__init__.py ADDED
File without changes
env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/tests/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (188 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/tests/__pycache__/test_ball_tree.cpython-310.pyc ADDED
Binary file (6.19 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/tests/__pycache__/test_kd_tree.cpython-310.pyc ADDED
Binary file (3.42 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/tests/__pycache__/test_lof.cpython-310.pyc ADDED
Binary file (9.09 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/tests/__pycache__/test_nca.cpython-310.pyc ADDED
Binary file (15.3 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/neighbors/tests/__pycache__/test_nearest_centroid.cpython-310.pyc ADDED
Binary file (4.7 kB). View file