applied-ai-018 commited on
Commit
0367fd6
·
verified ·
1 Parent(s): e5a8788

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. llmeval-env/lib/python3.10/site-packages/sklearn/utils/__init__.py +1299 -0
  2. llmeval-env/lib/python3.10/site-packages/sklearn/utils/_arpack.py +30 -0
  3. llmeval-env/lib/python3.10/site-packages/sklearn/utils/_available_if.py +93 -0
  4. llmeval-env/lib/python3.10/site-packages/sklearn/utils/_fast_dict.cpython-310-x86_64-linux-gnu.so +0 -0
  5. llmeval-env/lib/python3.10/site-packages/sklearn/utils/_heap.pxd +14 -0
  6. llmeval-env/lib/python3.10/site-packages/sklearn/utils/_openmp_helpers.cpython-310-x86_64-linux-gnu.so +0 -0
  7. llmeval-env/lib/python3.10/site-packages/sklearn/utils/_param_validation.py +905 -0
  8. llmeval-env/lib/python3.10/site-packages/sklearn/utils/_pprint.py +463 -0
  9. llmeval-env/lib/python3.10/site-packages/sklearn/utils/_response.py +298 -0
  10. llmeval-env/lib/python3.10/site-packages/sklearn/utils/_seq_dataset.pxd +104 -0
  11. llmeval-env/lib/python3.10/site-packages/sklearn/utils/_sorting.pxd +9 -0
  12. llmeval-env/lib/python3.10/site-packages/sklearn/utils/_vector_sentinel.cpython-310-x86_64-linux-gnu.so +0 -0
  13. llmeval-env/lib/python3.10/site-packages/sklearn/utils/_vector_sentinel.pxd +12 -0
  14. llmeval-env/lib/python3.10/site-packages/sklearn/utils/discovery.py +265 -0
  15. llmeval-env/lib/python3.10/site-packages/sklearn/utils/graph.py +166 -0
  16. llmeval-env/lib/python3.10/site-packages/sklearn/utils/metadata_routing.py +22 -0
  17. llmeval-env/lib/python3.10/site-packages/sklearn/utils/murmurhash.cpython-310-x86_64-linux-gnu.so +0 -0
  18. llmeval-env/lib/python3.10/site-packages/sklearn/utils/optimize.py +302 -0
  19. llmeval-env/lib/python3.10/site-packages/sklearn/utils/parallel.py +129 -0
  20. llmeval-env/lib/python3.10/site-packages/sklearn/utils/stats.py +69 -0
  21. llmeval-env/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/__init__.cpython-310.pyc +0 -0
  22. llmeval-env/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_arpack.cpython-310.pyc +0 -0
  23. llmeval-env/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_array_api.cpython-310.pyc +0 -0
  24. llmeval-env/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_arrayfuncs.cpython-310.pyc +0 -0
  25. llmeval-env/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_class_weight.cpython-310.pyc +0 -0
  26. llmeval-env/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_cython_templating.cpython-310.pyc +0 -0
  27. llmeval-env/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_encode.cpython-310.pyc +0 -0
  28. llmeval-env/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_estimator_html_repr.cpython-310.pyc +0 -0
  29. llmeval-env/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_extmath.cpython-310.pyc +0 -0
  30. llmeval-env/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_fast_dict.cpython-310.pyc +0 -0
  31. llmeval-env/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_fixes.cpython-310.pyc +0 -0
  32. llmeval-env/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_graph.cpython-310.pyc +0 -0
  33. llmeval-env/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_mocking.cpython-310.pyc +0 -0
  34. llmeval-env/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_multiclass.cpython-310.pyc +0 -0
  35. llmeval-env/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_murmurhash.cpython-310.pyc +0 -0
  36. llmeval-env/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_parallel.cpython-310.pyc +0 -0
  37. llmeval-env/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_param_validation.cpython-310.pyc +0 -0
  38. llmeval-env/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_plotting.cpython-310.pyc +0 -0
  39. llmeval-env/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_random.cpython-310.pyc +0 -0
  40. llmeval-env/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_response.cpython-310.pyc +0 -0
  41. llmeval-env/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_seq_dataset.cpython-310.pyc +0 -0
  42. llmeval-env/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_set_output.cpython-310.pyc +0 -0
  43. llmeval-env/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_shortest_path.cpython-310.pyc +0 -0
  44. llmeval-env/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_sparsefuncs.cpython-310.pyc +0 -0
  45. llmeval-env/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_stats.cpython-310.pyc +0 -0
  46. llmeval-env/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_tags.cpython-310.pyc +0 -0
  47. llmeval-env/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_testing.cpython-310.pyc +0 -0
  48. llmeval-env/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_typedefs.cpython-310.pyc +0 -0
  49. llmeval-env/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_utils.cpython-310.pyc +0 -0
  50. llmeval-env/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_validation.cpython-310.pyc +0 -0
llmeval-env/lib/python3.10/site-packages/sklearn/utils/__init__.py ADDED
@@ -0,0 +1,1299 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ The :mod:`sklearn.utils` module includes various utilities.
3
+ """
4
+
5
+ import math
6
+ import numbers
7
+ import platform
8
+ import struct
9
+ import timeit
10
+ import warnings
11
+ from collections.abc import Sequence
12
+ from contextlib import contextmanager, suppress
13
+ from itertools import compress, islice
14
+
15
+ import numpy as np
16
+ from scipy.sparse import issparse
17
+
18
+ from .. import get_config
19
+ from ..exceptions import DataConversionWarning
20
+ from . import _joblib, metadata_routing
21
+ from ._bunch import Bunch
22
+ from ._estimator_html_repr import estimator_html_repr
23
+ from ._param_validation import Integral, Interval, validate_params
24
+ from .class_weight import compute_class_weight, compute_sample_weight
25
+ from .deprecation import deprecated
26
+ from .discovery import all_estimators
27
+ from .fixes import parse_version, threadpool_info
28
+ from .murmurhash import murmurhash3_32
29
+ from .validation import (
30
+ _is_arraylike_not_scalar,
31
+ _is_pandas_df,
32
+ _is_polars_df,
33
+ _use_interchange_protocol,
34
+ as_float_array,
35
+ assert_all_finite,
36
+ check_array,
37
+ check_consistent_length,
38
+ check_random_state,
39
+ check_scalar,
40
+ check_symmetric,
41
+ check_X_y,
42
+ column_or_1d,
43
+ indexable,
44
+ )
45
+
46
+ # Do not deprecate parallel_backend and register_parallel_backend as they are
47
+ # needed to tune `scikit-learn` behavior and have different effect if called
48
+ # from the vendored version or or the site-package version. The other are
49
+ # utilities that are independent of scikit-learn so they are not part of
50
+ # scikit-learn public API.
51
+ parallel_backend = _joblib.parallel_backend
52
+ register_parallel_backend = _joblib.register_parallel_backend
53
+
54
+ __all__ = [
55
+ "murmurhash3_32",
56
+ "as_float_array",
57
+ "assert_all_finite",
58
+ "check_array",
59
+ "check_random_state",
60
+ "compute_class_weight",
61
+ "compute_sample_weight",
62
+ "column_or_1d",
63
+ "check_consistent_length",
64
+ "check_X_y",
65
+ "check_scalar",
66
+ "indexable",
67
+ "check_symmetric",
68
+ "indices_to_mask",
69
+ "deprecated",
70
+ "parallel_backend",
71
+ "register_parallel_backend",
72
+ "resample",
73
+ "shuffle",
74
+ "check_matplotlib_support",
75
+ "all_estimators",
76
+ "DataConversionWarning",
77
+ "estimator_html_repr",
78
+ "Bunch",
79
+ "metadata_routing",
80
+ ]
81
+
82
+ IS_PYPY = platform.python_implementation() == "PyPy"
83
+ _IS_32BIT = 8 * struct.calcsize("P") == 32
84
+ _IS_WASM = platform.machine() in ["wasm32", "wasm64"]
85
+
86
+
87
+ def _in_unstable_openblas_configuration():
88
+ """Return True if in an unstable configuration for OpenBLAS"""
89
+
90
+ # Import libraries which might load OpenBLAS.
91
+ import numpy # noqa
92
+ import scipy # noqa
93
+
94
+ modules_info = threadpool_info()
95
+
96
+ open_blas_used = any(info["internal_api"] == "openblas" for info in modules_info)
97
+ if not open_blas_used:
98
+ return False
99
+
100
+ # OpenBLAS 0.3.16 fixed instability for arm64, see:
101
+ # https://github.com/xianyi/OpenBLAS/blob/1b6db3dbba672b4f8af935bd43a1ff6cff4d20b7/Changelog.txt#L56-L58 # noqa
102
+ openblas_arm64_stable_version = parse_version("0.3.16")
103
+ for info in modules_info:
104
+ if info["internal_api"] != "openblas":
105
+ continue
106
+ openblas_version = info.get("version")
107
+ openblas_architecture = info.get("architecture")
108
+ if openblas_version is None or openblas_architecture is None:
109
+ # Cannot be sure that OpenBLAS is good enough. Assume unstable:
110
+ return True
111
+ if (
112
+ openblas_architecture == "neoversen1"
113
+ and parse_version(openblas_version) < openblas_arm64_stable_version
114
+ ):
115
+ # See discussions in https://github.com/numpy/numpy/issues/19411
116
+ return True
117
+ return False
118
+
119
+
120
+ @validate_params(
121
+ {
122
+ "X": ["array-like", "sparse matrix"],
123
+ "mask": ["array-like"],
124
+ },
125
+ prefer_skip_nested_validation=True,
126
+ )
127
+ def safe_mask(X, mask):
128
+ """Return a mask which is safe to use on X.
129
+
130
+ Parameters
131
+ ----------
132
+ X : {array-like, sparse matrix}
133
+ Data on which to apply mask.
134
+
135
+ mask : array-like
136
+ Mask to be used on X.
137
+
138
+ Returns
139
+ -------
140
+ mask : ndarray
141
+ Array that is safe to use on X.
142
+
143
+ Examples
144
+ --------
145
+ >>> from sklearn.utils import safe_mask
146
+ >>> from scipy.sparse import csr_matrix
147
+ >>> data = csr_matrix([[1], [2], [3], [4], [5]])
148
+ >>> condition = [False, True, True, False, True]
149
+ >>> mask = safe_mask(data, condition)
150
+ >>> data[mask].toarray()
151
+ array([[2],
152
+ [3],
153
+ [5]])
154
+ """
155
+ mask = np.asarray(mask)
156
+ if np.issubdtype(mask.dtype, np.signedinteger):
157
+ return mask
158
+
159
+ if hasattr(X, "toarray"):
160
+ ind = np.arange(mask.shape[0])
161
+ mask = ind[mask]
162
+ return mask
163
+
164
+
165
+ def axis0_safe_slice(X, mask, len_mask):
166
+ """Return a mask which is safer to use on X than safe_mask.
167
+
168
+ This mask is safer than safe_mask since it returns an
169
+ empty array, when a sparse matrix is sliced with a boolean mask
170
+ with all False, instead of raising an unhelpful error in older
171
+ versions of SciPy.
172
+
173
+ See: https://github.com/scipy/scipy/issues/5361
174
+
175
+ Also note that we can avoid doing the dot product by checking if
176
+ the len_mask is not zero in _huber_loss_and_gradient but this
177
+ is not going to be the bottleneck, since the number of outliers
178
+ and non_outliers are typically non-zero and it makes the code
179
+ tougher to follow.
180
+
181
+ Parameters
182
+ ----------
183
+ X : {array-like, sparse matrix}
184
+ Data on which to apply mask.
185
+
186
+ mask : ndarray
187
+ Mask to be used on X.
188
+
189
+ len_mask : int
190
+ The length of the mask.
191
+
192
+ Returns
193
+ -------
194
+ mask : ndarray
195
+ Array that is safe to use on X.
196
+ """
197
+ if len_mask != 0:
198
+ return X[safe_mask(X, mask), :]
199
+ return np.zeros(shape=(0, X.shape[1]))
200
+
201
+
202
+ def _array_indexing(array, key, key_dtype, axis):
203
+ """Index an array or scipy.sparse consistently across NumPy version."""
204
+ if issparse(array) and key_dtype == "bool":
205
+ key = np.asarray(key)
206
+ if isinstance(key, tuple):
207
+ key = list(key)
208
+ return array[key, ...] if axis == 0 else array[:, key]
209
+
210
+
211
+ def _pandas_indexing(X, key, key_dtype, axis):
212
+ """Index a pandas dataframe or a series."""
213
+ if _is_arraylike_not_scalar(key):
214
+ key = np.asarray(key)
215
+
216
+ if key_dtype == "int" and not (isinstance(key, slice) or np.isscalar(key)):
217
+ # using take() instead of iloc[] ensures the return value is a "proper"
218
+ # copy that will not raise SettingWithCopyWarning
219
+ return X.take(key, axis=axis)
220
+ else:
221
+ # check whether we should index with loc or iloc
222
+ indexer = X.iloc if key_dtype == "int" else X.loc
223
+ return indexer[:, key] if axis else indexer[key]
224
+
225
+
226
+ def _list_indexing(X, key, key_dtype):
227
+ """Index a Python list."""
228
+ if np.isscalar(key) or isinstance(key, slice):
229
+ # key is a slice or a scalar
230
+ return X[key]
231
+ if key_dtype == "bool":
232
+ # key is a boolean array-like
233
+ return list(compress(X, key))
234
+ # key is a integer array-like of key
235
+ return [X[idx] for idx in key]
236
+
237
+
238
+ def _polars_indexing(X, key, key_dtype, axis):
239
+ """Indexing X with polars interchange protocol."""
240
+ # Polars behavior is more consistent with lists
241
+ if isinstance(key, np.ndarray):
242
+ key = key.tolist()
243
+
244
+ if axis == 1:
245
+ return X[:, key]
246
+ else:
247
+ return X[key]
248
+
249
+
250
+ def _determine_key_type(key, accept_slice=True):
251
+ """Determine the data type of key.
252
+
253
+ Parameters
254
+ ----------
255
+ key : scalar, slice or array-like
256
+ The key from which we want to infer the data type.
257
+
258
+ accept_slice : bool, default=True
259
+ Whether or not to raise an error if the key is a slice.
260
+
261
+ Returns
262
+ -------
263
+ dtype : {'int', 'str', 'bool', None}
264
+ Returns the data type of key.
265
+ """
266
+ err_msg = (
267
+ "No valid specification of the columns. Only a scalar, list or "
268
+ "slice of all integers or all strings, or boolean mask is "
269
+ "allowed"
270
+ )
271
+
272
+ dtype_to_str = {int: "int", str: "str", bool: "bool", np.bool_: "bool"}
273
+ array_dtype_to_str = {
274
+ "i": "int",
275
+ "u": "int",
276
+ "b": "bool",
277
+ "O": "str",
278
+ "U": "str",
279
+ "S": "str",
280
+ }
281
+
282
+ if key is None:
283
+ return None
284
+ if isinstance(key, tuple(dtype_to_str.keys())):
285
+ try:
286
+ return dtype_to_str[type(key)]
287
+ except KeyError:
288
+ raise ValueError(err_msg)
289
+ if isinstance(key, slice):
290
+ if not accept_slice:
291
+ raise TypeError(
292
+ "Only array-like or scalar are supported. A Python slice was given."
293
+ )
294
+ if key.start is None and key.stop is None:
295
+ return None
296
+ key_start_type = _determine_key_type(key.start)
297
+ key_stop_type = _determine_key_type(key.stop)
298
+ if key_start_type is not None and key_stop_type is not None:
299
+ if key_start_type != key_stop_type:
300
+ raise ValueError(err_msg)
301
+ if key_start_type is not None:
302
+ return key_start_type
303
+ return key_stop_type
304
+ if isinstance(key, (list, tuple)):
305
+ unique_key = set(key)
306
+ key_type = {_determine_key_type(elt) for elt in unique_key}
307
+ if not key_type:
308
+ return None
309
+ if len(key_type) != 1:
310
+ raise ValueError(err_msg)
311
+ return key_type.pop()
312
+ if hasattr(key, "dtype"):
313
+ try:
314
+ return array_dtype_to_str[key.dtype.kind]
315
+ except KeyError:
316
+ raise ValueError(err_msg)
317
+ raise ValueError(err_msg)
318
+
319
+
320
+ def _safe_indexing(X, indices, *, axis=0):
321
+ """Return rows, items or columns of X using indices.
322
+
323
+ .. warning::
324
+
325
+ This utility is documented, but **private**. This means that
326
+ backward compatibility might be broken without any deprecation
327
+ cycle.
328
+
329
+ Parameters
330
+ ----------
331
+ X : array-like, sparse-matrix, list, pandas.DataFrame, pandas.Series
332
+ Data from which to sample rows, items or columns. `list` are only
333
+ supported when `axis=0`.
334
+ indices : bool, int, str, slice, array-like
335
+ - If `axis=0`, boolean and integer array-like, integer slice,
336
+ and scalar integer are supported.
337
+ - If `axis=1`:
338
+ - to select a single column, `indices` can be of `int` type for
339
+ all `X` types and `str` only for dataframe. The selected subset
340
+ will be 1D, unless `X` is a sparse matrix in which case it will
341
+ be 2D.
342
+ - to select multiples columns, `indices` can be one of the
343
+ following: `list`, `array`, `slice`. The type used in
344
+ these containers can be one of the following: `int`, 'bool' and
345
+ `str`. However, `str` is only supported when `X` is a dataframe.
346
+ The selected subset will be 2D.
347
+ axis : int, default=0
348
+ The axis along which `X` will be subsampled. `axis=0` will select
349
+ rows while `axis=1` will select columns.
350
+
351
+ Returns
352
+ -------
353
+ subset
354
+ Subset of X on axis 0 or 1.
355
+
356
+ Notes
357
+ -----
358
+ CSR, CSC, and LIL sparse matrices are supported. COO sparse matrices are
359
+ not supported.
360
+
361
+ Examples
362
+ --------
363
+ >>> import numpy as np
364
+ >>> from sklearn.utils import _safe_indexing
365
+ >>> data = np.array([[1, 2], [3, 4], [5, 6]])
366
+ >>> _safe_indexing(data, 0, axis=0) # select the first row
367
+ array([1, 2])
368
+ >>> _safe_indexing(data, 0, axis=1) # select the first column
369
+ array([1, 3, 5])
370
+ """
371
+ if indices is None:
372
+ return X
373
+
374
+ if axis not in (0, 1):
375
+ raise ValueError(
376
+ "'axis' should be either 0 (to index rows) or 1 (to index "
377
+ " column). Got {} instead.".format(axis)
378
+ )
379
+
380
+ indices_dtype = _determine_key_type(indices)
381
+
382
+ if axis == 0 and indices_dtype == "str":
383
+ raise ValueError("String indexing is not supported with 'axis=0'")
384
+
385
+ if axis == 1 and isinstance(X, list):
386
+ raise ValueError("axis=1 is not supported for lists")
387
+
388
+ if axis == 1 and hasattr(X, "ndim") and X.ndim != 2:
389
+ raise ValueError(
390
+ "'X' should be a 2D NumPy array, 2D sparse matrix or pandas "
391
+ "dataframe when indexing the columns (i.e. 'axis=1'). "
392
+ "Got {} instead with {} dimension(s).".format(type(X), X.ndim)
393
+ )
394
+
395
+ if (
396
+ axis == 1
397
+ and indices_dtype == "str"
398
+ and not (_is_pandas_df(X) or _use_interchange_protocol(X))
399
+ ):
400
+ raise ValueError(
401
+ "Specifying the columns using strings is only supported for dataframes."
402
+ )
403
+
404
+ if hasattr(X, "iloc"):
405
+ # TODO: we should probably use _is_pandas_df(X) instead but this would
406
+ # require updating some tests such as test_train_test_split_mock_pandas.
407
+ return _pandas_indexing(X, indices, indices_dtype, axis=axis)
408
+ elif _is_polars_df(X):
409
+ return _polars_indexing(X, indices, indices_dtype, axis=axis)
410
+ elif hasattr(X, "shape"):
411
+ return _array_indexing(X, indices, indices_dtype, axis=axis)
412
+ else:
413
+ return _list_indexing(X, indices, indices_dtype)
414
+
415
+
416
+ def _safe_assign(X, values, *, row_indexer=None, column_indexer=None):
417
+ """Safe assignment to a numpy array, sparse matrix, or pandas dataframe.
418
+
419
+ Parameters
420
+ ----------
421
+ X : {ndarray, sparse-matrix, dataframe}
422
+ Array to be modified. It is expected to be 2-dimensional.
423
+
424
+ values : ndarray
425
+ The values to be assigned to `X`.
426
+
427
+ row_indexer : array-like, dtype={int, bool}, default=None
428
+ A 1-dimensional array to select the rows of interest. If `None`, all
429
+ rows are selected.
430
+
431
+ column_indexer : array-like, dtype={int, bool}, default=None
432
+ A 1-dimensional array to select the columns of interest. If `None`, all
433
+ columns are selected.
434
+ """
435
+ row_indexer = slice(None, None, None) if row_indexer is None else row_indexer
436
+ column_indexer = (
437
+ slice(None, None, None) if column_indexer is None else column_indexer
438
+ )
439
+
440
+ if hasattr(X, "iloc"): # pandas dataframe
441
+ with warnings.catch_warnings():
442
+ # pandas >= 1.5 raises a warning when using iloc to set values in a column
443
+ # that does not have the same type as the column being set. It happens
444
+ # for instance when setting a categorical column with a string.
445
+ # In the future the behavior won't change and the warning should disappear.
446
+ # TODO(1.3): check if the warning is still raised or remove the filter.
447
+ warnings.simplefilter("ignore", FutureWarning)
448
+ X.iloc[row_indexer, column_indexer] = values
449
+ else: # numpy array or sparse matrix
450
+ X[row_indexer, column_indexer] = values
451
+
452
+
453
+ def _get_column_indices_for_bool_or_int(key, n_columns):
454
+ # Convert key into list of positive integer indexes
455
+ try:
456
+ idx = _safe_indexing(np.arange(n_columns), key)
457
+ except IndexError as e:
458
+ raise ValueError(
459
+ f"all features must be in [0, {n_columns - 1}] or [-{n_columns}, 0]"
460
+ ) from e
461
+ return np.atleast_1d(idx).tolist()
462
+
463
+
464
+ def _get_column_indices(X, key):
465
+ """Get feature column indices for input data X and key.
466
+
467
+ For accepted values of `key`, see the docstring of
468
+ :func:`_safe_indexing`.
469
+ """
470
+ key_dtype = _determine_key_type(key)
471
+ if _use_interchange_protocol(X):
472
+ return _get_column_indices_interchange(X.__dataframe__(), key, key_dtype)
473
+
474
+ n_columns = X.shape[1]
475
+ if isinstance(key, (list, tuple)) and not key:
476
+ # we get an empty list
477
+ return []
478
+ elif key_dtype in ("bool", "int"):
479
+ return _get_column_indices_for_bool_or_int(key, n_columns)
480
+ else:
481
+ try:
482
+ all_columns = X.columns
483
+ except AttributeError:
484
+ raise ValueError(
485
+ "Specifying the columns using strings is only supported for dataframes."
486
+ )
487
+ if isinstance(key, str):
488
+ columns = [key]
489
+ elif isinstance(key, slice):
490
+ start, stop = key.start, key.stop
491
+ if start is not None:
492
+ start = all_columns.get_loc(start)
493
+ if stop is not None:
494
+ # pandas indexing with strings is endpoint included
495
+ stop = all_columns.get_loc(stop) + 1
496
+ else:
497
+ stop = n_columns + 1
498
+ return list(islice(range(n_columns), start, stop))
499
+ else:
500
+ columns = list(key)
501
+
502
+ try:
503
+ column_indices = []
504
+ for col in columns:
505
+ col_idx = all_columns.get_loc(col)
506
+ if not isinstance(col_idx, numbers.Integral):
507
+ raise ValueError(
508
+ f"Selected columns, {columns}, are not unique in dataframe"
509
+ )
510
+ column_indices.append(col_idx)
511
+
512
+ except KeyError as e:
513
+ raise ValueError("A given column is not a column of the dataframe") from e
514
+
515
+ return column_indices
516
+
517
+
518
+ def _get_column_indices_interchange(X_interchange, key, key_dtype):
519
+ """Same as _get_column_indices but for X with __dataframe__ protocol."""
520
+
521
+ n_columns = X_interchange.num_columns()
522
+
523
+ if isinstance(key, (list, tuple)) and not key:
524
+ # we get an empty list
525
+ return []
526
+ elif key_dtype in ("bool", "int"):
527
+ return _get_column_indices_for_bool_or_int(key, n_columns)
528
+ else:
529
+ column_names = list(X_interchange.column_names())
530
+
531
+ if isinstance(key, slice):
532
+ if key.step not in [1, None]:
533
+ raise NotImplementedError("key.step must be 1 or None")
534
+ start, stop = key.start, key.stop
535
+ if start is not None:
536
+ start = column_names.index(start)
537
+
538
+ if stop is not None:
539
+ stop = column_names.index(stop) + 1
540
+ else:
541
+ stop = n_columns + 1
542
+ return list(islice(range(n_columns), start, stop))
543
+
544
+ selected_columns = [key] if np.isscalar(key) else key
545
+
546
+ try:
547
+ return [column_names.index(col) for col in selected_columns]
548
+ except ValueError as e:
549
+ raise ValueError("A given column is not a column of the dataframe") from e
550
+
551
+
552
+ @validate_params(
553
+ {
554
+ "replace": ["boolean"],
555
+ "n_samples": [Interval(numbers.Integral, 1, None, closed="left"), None],
556
+ "random_state": ["random_state"],
557
+ "stratify": ["array-like", None],
558
+ },
559
+ prefer_skip_nested_validation=True,
560
+ )
561
+ def resample(*arrays, replace=True, n_samples=None, random_state=None, stratify=None):
562
+ """Resample arrays or sparse matrices in a consistent way.
563
+
564
+ The default strategy implements one step of the bootstrapping
565
+ procedure.
566
+
567
+ Parameters
568
+ ----------
569
+ *arrays : sequence of array-like of shape (n_samples,) or \
570
+ (n_samples, n_outputs)
571
+ Indexable data-structures can be arrays, lists, dataframes or scipy
572
+ sparse matrices with consistent first dimension.
573
+
574
+ replace : bool, default=True
575
+ Implements resampling with replacement. If False, this will implement
576
+ (sliced) random permutations.
577
+
578
+ n_samples : int, default=None
579
+ Number of samples to generate. If left to None this is
580
+ automatically set to the first dimension of the arrays.
581
+ If replace is False it should not be larger than the length of
582
+ arrays.
583
+
584
+ random_state : int, RandomState instance or None, default=None
585
+ Determines random number generation for shuffling
586
+ the data.
587
+ Pass an int for reproducible results across multiple function calls.
588
+ See :term:`Glossary <random_state>`.
589
+
590
+ stratify : array-like of shape (n_samples,) or (n_samples, n_outputs), \
591
+ default=None
592
+ If not None, data is split in a stratified fashion, using this as
593
+ the class labels.
594
+
595
+ Returns
596
+ -------
597
+ resampled_arrays : sequence of array-like of shape (n_samples,) or \
598
+ (n_samples, n_outputs)
599
+ Sequence of resampled copies of the collections. The original arrays
600
+ are not impacted.
601
+
602
+ See Also
603
+ --------
604
+ shuffle : Shuffle arrays or sparse matrices in a consistent way.
605
+
606
+ Examples
607
+ --------
608
+ It is possible to mix sparse and dense arrays in the same run::
609
+
610
+ >>> import numpy as np
611
+ >>> X = np.array([[1., 0.], [2., 1.], [0., 0.]])
612
+ >>> y = np.array([0, 1, 2])
613
+
614
+ >>> from scipy.sparse import coo_matrix
615
+ >>> X_sparse = coo_matrix(X)
616
+
617
+ >>> from sklearn.utils import resample
618
+ >>> X, X_sparse, y = resample(X, X_sparse, y, random_state=0)
619
+ >>> X
620
+ array([[1., 0.],
621
+ [2., 1.],
622
+ [1., 0.]])
623
+
624
+ >>> X_sparse
625
+ <3x2 sparse matrix of type '<... 'numpy.float64'>'
626
+ with 4 stored elements in Compressed Sparse Row format>
627
+
628
+ >>> X_sparse.toarray()
629
+ array([[1., 0.],
630
+ [2., 1.],
631
+ [1., 0.]])
632
+
633
+ >>> y
634
+ array([0, 1, 0])
635
+
636
+ >>> resample(y, n_samples=2, random_state=0)
637
+ array([0, 1])
638
+
639
+ Example using stratification::
640
+
641
+ >>> y = [0, 0, 1, 1, 1, 1, 1, 1, 1]
642
+ >>> resample(y, n_samples=5, replace=False, stratify=y,
643
+ ... random_state=0)
644
+ [1, 1, 1, 0, 1]
645
+ """
646
+ max_n_samples = n_samples
647
+ random_state = check_random_state(random_state)
648
+
649
+ if len(arrays) == 0:
650
+ return None
651
+
652
+ first = arrays[0]
653
+ n_samples = first.shape[0] if hasattr(first, "shape") else len(first)
654
+
655
+ if max_n_samples is None:
656
+ max_n_samples = n_samples
657
+ elif (max_n_samples > n_samples) and (not replace):
658
+ raise ValueError(
659
+ "Cannot sample %d out of arrays with dim %d when replace is False"
660
+ % (max_n_samples, n_samples)
661
+ )
662
+
663
+ check_consistent_length(*arrays)
664
+
665
+ if stratify is None:
666
+ if replace:
667
+ indices = random_state.randint(0, n_samples, size=(max_n_samples,))
668
+ else:
669
+ indices = np.arange(n_samples)
670
+ random_state.shuffle(indices)
671
+ indices = indices[:max_n_samples]
672
+ else:
673
+ # Code adapted from StratifiedShuffleSplit()
674
+ y = check_array(stratify, ensure_2d=False, dtype=None)
675
+ if y.ndim == 2:
676
+ # for multi-label y, map each distinct row to a string repr
677
+ # using join because str(row) uses an ellipsis if len(row) > 1000
678
+ y = np.array([" ".join(row.astype("str")) for row in y])
679
+
680
+ classes, y_indices = np.unique(y, return_inverse=True)
681
+ n_classes = classes.shape[0]
682
+
683
+ class_counts = np.bincount(y_indices)
684
+
685
+ # Find the sorted list of instances for each class:
686
+ # (np.unique above performs a sort, so code is O(n logn) already)
687
+ class_indices = np.split(
688
+ np.argsort(y_indices, kind="mergesort"), np.cumsum(class_counts)[:-1]
689
+ )
690
+
691
+ n_i = _approximate_mode(class_counts, max_n_samples, random_state)
692
+
693
+ indices = []
694
+
695
+ for i in range(n_classes):
696
+ indices_i = random_state.choice(class_indices[i], n_i[i], replace=replace)
697
+ indices.extend(indices_i)
698
+
699
+ indices = random_state.permutation(indices)
700
+
701
+ # convert sparse matrices to CSR for row-based indexing
702
+ arrays = [a.tocsr() if issparse(a) else a for a in arrays]
703
+ resampled_arrays = [_safe_indexing(a, indices) for a in arrays]
704
+ if len(resampled_arrays) == 1:
705
+ # syntactic sugar for the unit argument case
706
+ return resampled_arrays[0]
707
+ else:
708
+ return resampled_arrays
709
+
710
+
711
+ def shuffle(*arrays, random_state=None, n_samples=None):
712
+ """Shuffle arrays or sparse matrices in a consistent way.
713
+
714
+ This is a convenience alias to ``resample(*arrays, replace=False)`` to do
715
+ random permutations of the collections.
716
+
717
+ Parameters
718
+ ----------
719
+ *arrays : sequence of indexable data-structures
720
+ Indexable data-structures can be arrays, lists, dataframes or scipy
721
+ sparse matrices with consistent first dimension.
722
+
723
+ random_state : int, RandomState instance or None, default=None
724
+ Determines random number generation for shuffling
725
+ the data.
726
+ Pass an int for reproducible results across multiple function calls.
727
+ See :term:`Glossary <random_state>`.
728
+
729
+ n_samples : int, default=None
730
+ Number of samples to generate. If left to None this is
731
+ automatically set to the first dimension of the arrays. It should
732
+ not be larger than the length of arrays.
733
+
734
+ Returns
735
+ -------
736
+ shuffled_arrays : sequence of indexable data-structures
737
+ Sequence of shuffled copies of the collections. The original arrays
738
+ are not impacted.
739
+
740
+ See Also
741
+ --------
742
+ resample : Resample arrays or sparse matrices in a consistent way.
743
+
744
+ Examples
745
+ --------
746
+ It is possible to mix sparse and dense arrays in the same run::
747
+
748
+ >>> import numpy as np
749
+ >>> X = np.array([[1., 0.], [2., 1.], [0., 0.]])
750
+ >>> y = np.array([0, 1, 2])
751
+
752
+ >>> from scipy.sparse import coo_matrix
753
+ >>> X_sparse = coo_matrix(X)
754
+
755
+ >>> from sklearn.utils import shuffle
756
+ >>> X, X_sparse, y = shuffle(X, X_sparse, y, random_state=0)
757
+ >>> X
758
+ array([[0., 0.],
759
+ [2., 1.],
760
+ [1., 0.]])
761
+
762
+ >>> X_sparse
763
+ <3x2 sparse matrix of type '<... 'numpy.float64'>'
764
+ with 3 stored elements in Compressed Sparse Row format>
765
+
766
+ >>> X_sparse.toarray()
767
+ array([[0., 0.],
768
+ [2., 1.],
769
+ [1., 0.]])
770
+
771
+ >>> y
772
+ array([2, 1, 0])
773
+
774
+ >>> shuffle(y, n_samples=2, random_state=0)
775
+ array([0, 1])
776
+ """
777
+ return resample(
778
+ *arrays, replace=False, n_samples=n_samples, random_state=random_state
779
+ )
780
+
781
+
782
+ def safe_sqr(X, *, copy=True):
783
+ """Element wise squaring of array-likes and sparse matrices.
784
+
785
+ Parameters
786
+ ----------
787
+ X : {array-like, ndarray, sparse matrix}
788
+
789
+ copy : bool, default=True
790
+ Whether to create a copy of X and operate on it or to perform
791
+ inplace computation (default behaviour).
792
+
793
+ Returns
794
+ -------
795
+ X ** 2 : element wise square
796
+ Return the element-wise square of the input.
797
+
798
+ Examples
799
+ --------
800
+ >>> from sklearn.utils import safe_sqr
801
+ >>> safe_sqr([1, 2, 3])
802
+ array([1, 4, 9])
803
+ """
804
+ X = check_array(X, accept_sparse=["csr", "csc", "coo"], ensure_2d=False)
805
+ if issparse(X):
806
+ if copy:
807
+ X = X.copy()
808
+ X.data **= 2
809
+ else:
810
+ if copy:
811
+ X = X**2
812
+ else:
813
+ X **= 2
814
+ return X
815
+
816
+
817
+ def _chunk_generator(gen, chunksize):
818
+ """Chunk generator, ``gen`` into lists of length ``chunksize``. The last
819
+ chunk may have a length less than ``chunksize``."""
820
+ while True:
821
+ chunk = list(islice(gen, chunksize))
822
+ if chunk:
823
+ yield chunk
824
+ else:
825
+ return
826
+
827
+
828
+ @validate_params(
829
+ {
830
+ "n": [Interval(numbers.Integral, 1, None, closed="left")],
831
+ "batch_size": [Interval(numbers.Integral, 1, None, closed="left")],
832
+ "min_batch_size": [Interval(numbers.Integral, 0, None, closed="left")],
833
+ },
834
+ prefer_skip_nested_validation=True,
835
+ )
836
+ def gen_batches(n, batch_size, *, min_batch_size=0):
837
+ """Generator to create slices containing `batch_size` elements from 0 to `n`.
838
+
839
+ The last slice may contain less than `batch_size` elements, when
840
+ `batch_size` does not divide `n`.
841
+
842
+ Parameters
843
+ ----------
844
+ n : int
845
+ Size of the sequence.
846
+ batch_size : int
847
+ Number of elements in each batch.
848
+ min_batch_size : int, default=0
849
+ Minimum number of elements in each batch.
850
+
851
+ Yields
852
+ ------
853
+ slice of `batch_size` elements
854
+
855
+ See Also
856
+ --------
857
+ gen_even_slices: Generator to create n_packs slices going up to n.
858
+
859
+ Examples
860
+ --------
861
+ >>> from sklearn.utils import gen_batches
862
+ >>> list(gen_batches(7, 3))
863
+ [slice(0, 3, None), slice(3, 6, None), slice(6, 7, None)]
864
+ >>> list(gen_batches(6, 3))
865
+ [slice(0, 3, None), slice(3, 6, None)]
866
+ >>> list(gen_batches(2, 3))
867
+ [slice(0, 2, None)]
868
+ >>> list(gen_batches(7, 3, min_batch_size=0))
869
+ [slice(0, 3, None), slice(3, 6, None), slice(6, 7, None)]
870
+ >>> list(gen_batches(7, 3, min_batch_size=2))
871
+ [slice(0, 3, None), slice(3, 7, None)]
872
+ """
873
+ start = 0
874
+ for _ in range(int(n // batch_size)):
875
+ end = start + batch_size
876
+ if end + min_batch_size > n:
877
+ continue
878
+ yield slice(start, end)
879
+ start = end
880
+ if start < n:
881
+ yield slice(start, n)
882
+
883
+
884
+ @validate_params(
885
+ {
886
+ "n": [Interval(Integral, 1, None, closed="left")],
887
+ "n_packs": [Interval(Integral, 1, None, closed="left")],
888
+ "n_samples": [Interval(Integral, 1, None, closed="left"), None],
889
+ },
890
+ prefer_skip_nested_validation=True,
891
+ )
892
+ def gen_even_slices(n, n_packs, *, n_samples=None):
893
+ """Generator to create `n_packs` evenly spaced slices going up to `n`.
894
+
895
+ If `n_packs` does not divide `n`, except for the first `n % n_packs`
896
+ slices, remaining slices may contain fewer elements.
897
+
898
+ Parameters
899
+ ----------
900
+ n : int
901
+ Size of the sequence.
902
+ n_packs : int
903
+ Number of slices to generate.
904
+ n_samples : int, default=None
905
+ Number of samples. Pass `n_samples` when the slices are to be used for
906
+ sparse matrix indexing; slicing off-the-end raises an exception, while
907
+ it works for NumPy arrays.
908
+
909
+ Yields
910
+ ------
911
+ `slice` representing a set of indices from 0 to n.
912
+
913
+ See Also
914
+ --------
915
+ gen_batches: Generator to create slices containing batch_size elements
916
+ from 0 to n.
917
+
918
+ Examples
919
+ --------
920
+ >>> from sklearn.utils import gen_even_slices
921
+ >>> list(gen_even_slices(10, 1))
922
+ [slice(0, 10, None)]
923
+ >>> list(gen_even_slices(10, 10))
924
+ [slice(0, 1, None), slice(1, 2, None), ..., slice(9, 10, None)]
925
+ >>> list(gen_even_slices(10, 5))
926
+ [slice(0, 2, None), slice(2, 4, None), ..., slice(8, 10, None)]
927
+ >>> list(gen_even_slices(10, 3))
928
+ [slice(0, 4, None), slice(4, 7, None), slice(7, 10, None)]
929
+ """
930
+ start = 0
931
+ for pack_num in range(n_packs):
932
+ this_n = n // n_packs
933
+ if pack_num < n % n_packs:
934
+ this_n += 1
935
+ if this_n > 0:
936
+ end = start + this_n
937
+ if n_samples is not None:
938
+ end = min(n_samples, end)
939
+ yield slice(start, end, None)
940
+ start = end
941
+
942
+
943
+ def tosequence(x):
944
+ """Cast iterable x to a Sequence, avoiding a copy if possible.
945
+
946
+ Parameters
947
+ ----------
948
+ x : iterable
949
+ The iterable to be converted.
950
+
951
+ Returns
952
+ -------
953
+ x : Sequence
954
+ If `x` is a NumPy array, it returns it as a `ndarray`. If `x`
955
+ is a `Sequence`, `x` is returned as-is. If `x` is from any other
956
+ type, `x` is returned casted as a list.
957
+ """
958
+ if isinstance(x, np.ndarray):
959
+ return np.asarray(x)
960
+ elif isinstance(x, Sequence):
961
+ return x
962
+ else:
963
+ return list(x)
964
+
965
+
966
+ def _to_object_array(sequence):
967
+ """Convert sequence to a 1-D NumPy array of object dtype.
968
+
969
+ numpy.array constructor has a similar use but it's output
970
+ is ambiguous. It can be 1-D NumPy array of object dtype if
971
+ the input is a ragged array, but if the input is a list of
972
+ equal length arrays, then the output is a 2D numpy.array.
973
+ _to_object_array solves this ambiguity by guarantying that
974
+ the output is a 1-D NumPy array of objects for any input.
975
+
976
+ Parameters
977
+ ----------
978
+ sequence : array-like of shape (n_elements,)
979
+ The sequence to be converted.
980
+
981
+ Returns
982
+ -------
983
+ out : ndarray of shape (n_elements,), dtype=object
984
+ The converted sequence into a 1-D NumPy array of object dtype.
985
+
986
+ Examples
987
+ --------
988
+ >>> import numpy as np
989
+ >>> from sklearn.utils import _to_object_array
990
+ >>> _to_object_array([np.array([0]), np.array([1])])
991
+ array([array([0]), array([1])], dtype=object)
992
+ >>> _to_object_array([np.array([0]), np.array([1, 2])])
993
+ array([array([0]), array([1, 2])], dtype=object)
994
+ >>> _to_object_array([np.array([0]), np.array([1, 2])])
995
+ array([array([0]), array([1, 2])], dtype=object)
996
+ """
997
+ out = np.empty(len(sequence), dtype=object)
998
+ out[:] = sequence
999
+ return out
1000
+
1001
+
1002
+ def indices_to_mask(indices, mask_length):
1003
+ """Convert list of indices to boolean mask.
1004
+
1005
+ Parameters
1006
+ ----------
1007
+ indices : list-like
1008
+ List of integers treated as indices.
1009
+ mask_length : int
1010
+ Length of boolean mask to be generated.
1011
+ This parameter must be greater than max(indices).
1012
+
1013
+ Returns
1014
+ -------
1015
+ mask : 1d boolean nd-array
1016
+ Boolean array that is True where indices are present, else False.
1017
+
1018
+ Examples
1019
+ --------
1020
+ >>> from sklearn.utils import indices_to_mask
1021
+ >>> indices = [1, 2 , 3, 4]
1022
+ >>> indices_to_mask(indices, 5)
1023
+ array([False, True, True, True, True])
1024
+ """
1025
+ if mask_length <= np.max(indices):
1026
+ raise ValueError("mask_length must be greater than max(indices)")
1027
+
1028
+ mask = np.zeros(mask_length, dtype=bool)
1029
+ mask[indices] = True
1030
+
1031
+ return mask
1032
+
1033
+
1034
+ def _message_with_time(source, message, time):
1035
+ """Create one line message for logging purposes.
1036
+
1037
+ Parameters
1038
+ ----------
1039
+ source : str
1040
+ String indicating the source or the reference of the message.
1041
+
1042
+ message : str
1043
+ Short message.
1044
+
1045
+ time : int
1046
+ Time in seconds.
1047
+ """
1048
+ start_message = "[%s] " % source
1049
+
1050
+ # adapted from joblib.logger.short_format_time without the Windows -.1s
1051
+ # adjustment
1052
+ if time > 60:
1053
+ time_str = "%4.1fmin" % (time / 60)
1054
+ else:
1055
+ time_str = " %5.1fs" % time
1056
+ end_message = " %s, total=%s" % (message, time_str)
1057
+ dots_len = 70 - len(start_message) - len(end_message)
1058
+ return "%s%s%s" % (start_message, dots_len * ".", end_message)
1059
+
1060
+
1061
+ @contextmanager
1062
+ def _print_elapsed_time(source, message=None):
1063
+ """Log elapsed time to stdout when the context is exited.
1064
+
1065
+ Parameters
1066
+ ----------
1067
+ source : str
1068
+ String indicating the source or the reference of the message.
1069
+
1070
+ message : str, default=None
1071
+ Short message. If None, nothing will be printed.
1072
+
1073
+ Returns
1074
+ -------
1075
+ context_manager
1076
+ Prints elapsed time upon exit if verbose.
1077
+ """
1078
+ if message is None:
1079
+ yield
1080
+ else:
1081
+ start = timeit.default_timer()
1082
+ yield
1083
+ print(_message_with_time(source, message, timeit.default_timer() - start))
1084
+
1085
+
1086
+ def get_chunk_n_rows(row_bytes, *, max_n_rows=None, working_memory=None):
1087
+ """Calculate how many rows can be processed within `working_memory`.
1088
+
1089
+ Parameters
1090
+ ----------
1091
+ row_bytes : int
1092
+ The expected number of bytes of memory that will be consumed
1093
+ during the processing of each row.
1094
+ max_n_rows : int, default=None
1095
+ The maximum return value.
1096
+ working_memory : int or float, default=None
1097
+ The number of rows to fit inside this number of MiB will be
1098
+ returned. When None (default), the value of
1099
+ ``sklearn.get_config()['working_memory']`` is used.
1100
+
1101
+ Returns
1102
+ -------
1103
+ int
1104
+ The number of rows which can be processed within `working_memory`.
1105
+
1106
+ Warns
1107
+ -----
1108
+ Issues a UserWarning if `row_bytes exceeds `working_memory` MiB.
1109
+ """
1110
+
1111
+ if working_memory is None:
1112
+ working_memory = get_config()["working_memory"]
1113
+
1114
+ chunk_n_rows = int(working_memory * (2**20) // row_bytes)
1115
+ if max_n_rows is not None:
1116
+ chunk_n_rows = min(chunk_n_rows, max_n_rows)
1117
+ if chunk_n_rows < 1:
1118
+ warnings.warn(
1119
+ "Could not adhere to working_memory config. "
1120
+ "Currently %.0fMiB, %.0fMiB required."
1121
+ % (working_memory, np.ceil(row_bytes * 2**-20))
1122
+ )
1123
+ chunk_n_rows = 1
1124
+ return chunk_n_rows
1125
+
1126
+
1127
+ def _is_pandas_na(x):
1128
+ """Test if x is pandas.NA.
1129
+
1130
+ We intentionally do not use this function to return `True` for `pd.NA` in
1131
+ `is_scalar_nan`, because estimators that support `pd.NA` are the exception
1132
+ rather than the rule at the moment. When `pd.NA` is more universally
1133
+ supported, we may reconsider this decision.
1134
+
1135
+ Parameters
1136
+ ----------
1137
+ x : any type
1138
+
1139
+ Returns
1140
+ -------
1141
+ boolean
1142
+ """
1143
+ with suppress(ImportError):
1144
+ from pandas import NA
1145
+
1146
+ return x is NA
1147
+
1148
+ return False
1149
+
1150
+
1151
+ def is_scalar_nan(x):
1152
+ """Test if x is NaN.
1153
+
1154
+ This function is meant to overcome the issue that np.isnan does not allow
1155
+ non-numerical types as input, and that np.nan is not float('nan').
1156
+
1157
+ Parameters
1158
+ ----------
1159
+ x : any type
1160
+ Any scalar value.
1161
+
1162
+ Returns
1163
+ -------
1164
+ bool
1165
+ Returns true if x is NaN, and false otherwise.
1166
+
1167
+ Examples
1168
+ --------
1169
+ >>> import numpy as np
1170
+ >>> from sklearn.utils import is_scalar_nan
1171
+ >>> is_scalar_nan(np.nan)
1172
+ True
1173
+ >>> is_scalar_nan(float("nan"))
1174
+ True
1175
+ >>> is_scalar_nan(None)
1176
+ False
1177
+ >>> is_scalar_nan("")
1178
+ False
1179
+ >>> is_scalar_nan([np.nan])
1180
+ False
1181
+ """
1182
+ return (
1183
+ not isinstance(x, numbers.Integral)
1184
+ and isinstance(x, numbers.Real)
1185
+ and math.isnan(x)
1186
+ )
1187
+
1188
+
1189
+ def _approximate_mode(class_counts, n_draws, rng):
1190
+ """Computes approximate mode of multivariate hypergeometric.
1191
+
1192
+ This is an approximation to the mode of the multivariate
1193
+ hypergeometric given by class_counts and n_draws.
1194
+ It shouldn't be off by more than one.
1195
+
1196
+ It is the mostly likely outcome of drawing n_draws many
1197
+ samples from the population given by class_counts.
1198
+
1199
+ Parameters
1200
+ ----------
1201
+ class_counts : ndarray of int
1202
+ Population per class.
1203
+ n_draws : int
1204
+ Number of draws (samples to draw) from the overall population.
1205
+ rng : random state
1206
+ Used to break ties.
1207
+
1208
+ Returns
1209
+ -------
1210
+ sampled_classes : ndarray of int
1211
+ Number of samples drawn from each class.
1212
+ np.sum(sampled_classes) == n_draws
1213
+
1214
+ Examples
1215
+ --------
1216
+ >>> import numpy as np
1217
+ >>> from sklearn.utils import _approximate_mode
1218
+ >>> _approximate_mode(class_counts=np.array([4, 2]), n_draws=3, rng=0)
1219
+ array([2, 1])
1220
+ >>> _approximate_mode(class_counts=np.array([5, 2]), n_draws=4, rng=0)
1221
+ array([3, 1])
1222
+ >>> _approximate_mode(class_counts=np.array([2, 2, 2, 1]),
1223
+ ... n_draws=2, rng=0)
1224
+ array([0, 1, 1, 0])
1225
+ >>> _approximate_mode(class_counts=np.array([2, 2, 2, 1]),
1226
+ ... n_draws=2, rng=42)
1227
+ array([1, 1, 0, 0])
1228
+ """
1229
+ rng = check_random_state(rng)
1230
+ # this computes a bad approximation to the mode of the
1231
+ # multivariate hypergeometric given by class_counts and n_draws
1232
+ continuous = class_counts / class_counts.sum() * n_draws
1233
+ # floored means we don't overshoot n_samples, but probably undershoot
1234
+ floored = np.floor(continuous)
1235
+ # we add samples according to how much "left over" probability
1236
+ # they had, until we arrive at n_samples
1237
+ need_to_add = int(n_draws - floored.sum())
1238
+ if need_to_add > 0:
1239
+ remainder = continuous - floored
1240
+ values = np.sort(np.unique(remainder))[::-1]
1241
+ # add according to remainder, but break ties
1242
+ # randomly to avoid biases
1243
+ for value in values:
1244
+ (inds,) = np.where(remainder == value)
1245
+ # if we need_to_add less than what's in inds
1246
+ # we draw randomly from them.
1247
+ # if we need to add more, we add them all and
1248
+ # go to the next value
1249
+ add_now = min(len(inds), need_to_add)
1250
+ inds = rng.choice(inds, size=add_now, replace=False)
1251
+ floored[inds] += 1
1252
+ need_to_add -= add_now
1253
+ if need_to_add == 0:
1254
+ break
1255
+ return floored.astype(int)
1256
+
1257
+
1258
+ def check_matplotlib_support(caller_name):
1259
+ """Raise ImportError with detailed error message if mpl is not installed.
1260
+
1261
+ Plot utilities like any of the Display's plotting functions should lazily import
1262
+ matplotlib and call this helper before any computation.
1263
+
1264
+ Parameters
1265
+ ----------
1266
+ caller_name : str
1267
+ The name of the caller that requires matplotlib.
1268
+ """
1269
+ try:
1270
+ import matplotlib # noqa
1271
+ except ImportError as e:
1272
+ raise ImportError(
1273
+ "{} requires matplotlib. You can install matplotlib with "
1274
+ "`pip install matplotlib`".format(caller_name)
1275
+ ) from e
1276
+
1277
+
1278
+ def check_pandas_support(caller_name):
1279
+ """Raise ImportError with detailed error message if pandas is not installed.
1280
+
1281
+ Plot utilities like :func:`fetch_openml` should lazily import
1282
+ pandas and call this helper before any computation.
1283
+
1284
+ Parameters
1285
+ ----------
1286
+ caller_name : str
1287
+ The name of the caller that requires pandas.
1288
+
1289
+ Returns
1290
+ -------
1291
+ pandas
1292
+ The pandas package.
1293
+ """
1294
+ try:
1295
+ import pandas # noqa
1296
+
1297
+ return pandas
1298
+ except ImportError as e:
1299
+ raise ImportError("{} requires pandas.".format(caller_name)) from e
llmeval-env/lib/python3.10/site-packages/sklearn/utils/_arpack.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .validation import check_random_state
2
+
3
+
4
+ def _init_arpack_v0(size, random_state):
5
+ """Initialize the starting vector for iteration in ARPACK functions.
6
+
7
+ Initialize a ndarray with values sampled from the uniform distribution on
8
+ [-1, 1]. This initialization model has been chosen to be consistent with
9
+ the ARPACK one as another initialization can lead to convergence issues.
10
+
11
+ Parameters
12
+ ----------
13
+ size : int
14
+ The size of the eigenvalue vector to be initialized.
15
+
16
+ random_state : int, RandomState instance or None, default=None
17
+ The seed of the pseudo random number generator used to generate a
18
+ uniform distribution. If int, random_state is the seed used by the
19
+ random number generator; If RandomState instance, random_state is the
20
+ random number generator; If None, the random number generator is the
21
+ RandomState instance used by `np.random`.
22
+
23
+ Returns
24
+ -------
25
+ v0 : ndarray of shape (size,)
26
+ The initialized vector.
27
+ """
28
+ random_state = check_random_state(random_state)
29
+ v0 = random_state.uniform(-1, 1, size)
30
+ return v0
llmeval-env/lib/python3.10/site-packages/sklearn/utils/_available_if.py ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from functools import update_wrapper, wraps
2
+ from types import MethodType
3
+
4
+
5
+ class _AvailableIfDescriptor:
6
+ """Implements a conditional property using the descriptor protocol.
7
+
8
+ Using this class to create a decorator will raise an ``AttributeError``
9
+ if check(self) returns a falsey value. Note that if check raises an error
10
+ this will also result in hasattr returning false.
11
+
12
+ See https://docs.python.org/3/howto/descriptor.html for an explanation of
13
+ descriptors.
14
+ """
15
+
16
+ def __init__(self, fn, check, attribute_name):
17
+ self.fn = fn
18
+ self.check = check
19
+ self.attribute_name = attribute_name
20
+
21
+ # update the docstring of the descriptor
22
+ update_wrapper(self, fn)
23
+
24
+ def _check(self, obj, owner):
25
+ attr_err_msg = (
26
+ f"This {repr(owner.__name__)} has no attribute {repr(self.attribute_name)}"
27
+ )
28
+ try:
29
+ check_result = self.check(obj)
30
+ except Exception as e:
31
+ raise AttributeError(attr_err_msg) from e
32
+
33
+ if not check_result:
34
+ raise AttributeError(attr_err_msg)
35
+
36
+ def __get__(self, obj, owner=None):
37
+ if obj is not None:
38
+ # delegate only on instances, not the classes.
39
+ # this is to allow access to the docstrings.
40
+ self._check(obj, owner=owner)
41
+ out = MethodType(self.fn, obj)
42
+
43
+ else:
44
+ # This makes it possible to use the decorated method as an unbound method,
45
+ # for instance when monkeypatching.
46
+ @wraps(self.fn)
47
+ def out(*args, **kwargs):
48
+ self._check(args[0], owner=owner)
49
+ return self.fn(*args, **kwargs)
50
+
51
+ return out
52
+
53
+
54
+ def available_if(check):
55
+ """An attribute that is available only if check returns a truthy value.
56
+
57
+ Parameters
58
+ ----------
59
+ check : callable
60
+ When passed the object with the decorated method, this should return
61
+ a truthy value if the attribute is available, and either return False
62
+ or raise an AttributeError if not available.
63
+
64
+ Returns
65
+ -------
66
+ callable
67
+ Callable makes the decorated method available if `check` returns
68
+ a truthy value, otherwise the decorated method is unavailable.
69
+
70
+ Examples
71
+ --------
72
+ >>> from sklearn.utils.metaestimators import available_if
73
+ >>> class HelloIfEven:
74
+ ... def __init__(self, x):
75
+ ... self.x = x
76
+ ...
77
+ ... def _x_is_even(self):
78
+ ... return self.x % 2 == 0
79
+ ...
80
+ ... @available_if(_x_is_even)
81
+ ... def say_hello(self):
82
+ ... print("Hello")
83
+ ...
84
+ >>> obj = HelloIfEven(1)
85
+ >>> hasattr(obj, "say_hello")
86
+ False
87
+ >>> obj.x = 2
88
+ >>> hasattr(obj, "say_hello")
89
+ True
90
+ >>> obj.say_hello()
91
+ Hello
92
+ """
93
+ return lambda fn: _AvailableIfDescriptor(fn, check, attribute_name=fn.__name__)
llmeval-env/lib/python3.10/site-packages/sklearn/utils/_fast_dict.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (288 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/utils/_heap.pxd ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Heap routines, used in various Cython implementations.
2
+
3
+ from cython cimport floating
4
+
5
+ from ._typedefs cimport intp_t
6
+
7
+
8
+ cdef int heap_push(
9
+ floating* values,
10
+ intp_t* indices,
11
+ intp_t size,
12
+ floating val,
13
+ intp_t val_idx,
14
+ ) noexcept nogil
llmeval-env/lib/python3.10/site-packages/sklearn/utils/_openmp_helpers.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (80.6 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/utils/_param_validation.py ADDED
@@ -0,0 +1,905 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import functools
2
+ import math
3
+ import operator
4
+ import re
5
+ from abc import ABC, abstractmethod
6
+ from collections.abc import Iterable
7
+ from inspect import signature
8
+ from numbers import Integral, Real
9
+
10
+ import numpy as np
11
+ from scipy.sparse import csr_matrix, issparse
12
+
13
+ from .._config import config_context, get_config
14
+ from .validation import _is_arraylike_not_scalar
15
+
16
+
17
+ class InvalidParameterError(ValueError, TypeError):
18
+ """Custom exception to be raised when the parameter of a class/method/function
19
+ does not have a valid type or value.
20
+ """
21
+
22
+ # Inherits from ValueError and TypeError to keep backward compatibility.
23
+
24
+
25
+ def validate_parameter_constraints(parameter_constraints, params, caller_name):
26
+ """Validate types and values of given parameters.
27
+
28
+ Parameters
29
+ ----------
30
+ parameter_constraints : dict or {"no_validation"}
31
+ If "no_validation", validation is skipped for this parameter.
32
+
33
+ If a dict, it must be a dictionary `param_name: list of constraints`.
34
+ A parameter is valid if it satisfies one of the constraints from the list.
35
+ Constraints can be:
36
+ - an Interval object, representing a continuous or discrete range of numbers
37
+ - the string "array-like"
38
+ - the string "sparse matrix"
39
+ - the string "random_state"
40
+ - callable
41
+ - None, meaning that None is a valid value for the parameter
42
+ - any type, meaning that any instance of this type is valid
43
+ - an Options object, representing a set of elements of a given type
44
+ - a StrOptions object, representing a set of strings
45
+ - the string "boolean"
46
+ - the string "verbose"
47
+ - the string "cv_object"
48
+ - the string "nan"
49
+ - a MissingValues object representing markers for missing values
50
+ - a HasMethods object, representing method(s) an object must have
51
+ - a Hidden object, representing a constraint not meant to be exposed to the user
52
+
53
+ params : dict
54
+ A dictionary `param_name: param_value`. The parameters to validate against the
55
+ constraints.
56
+
57
+ caller_name : str
58
+ The name of the estimator or function or method that called this function.
59
+ """
60
+ for param_name, param_val in params.items():
61
+ # We allow parameters to not have a constraint so that third party estimators
62
+ # can inherit from sklearn estimators without having to necessarily use the
63
+ # validation tools.
64
+ if param_name not in parameter_constraints:
65
+ continue
66
+
67
+ constraints = parameter_constraints[param_name]
68
+
69
+ if constraints == "no_validation":
70
+ continue
71
+
72
+ constraints = [make_constraint(constraint) for constraint in constraints]
73
+
74
+ for constraint in constraints:
75
+ if constraint.is_satisfied_by(param_val):
76
+ # this constraint is satisfied, no need to check further.
77
+ break
78
+ else:
79
+ # No constraint is satisfied, raise with an informative message.
80
+
81
+ # Ignore constraints that we don't want to expose in the error message,
82
+ # i.e. options that are for internal purpose or not officially supported.
83
+ constraints = [
84
+ constraint for constraint in constraints if not constraint.hidden
85
+ ]
86
+
87
+ if len(constraints) == 1:
88
+ constraints_str = f"{constraints[0]}"
89
+ else:
90
+ constraints_str = (
91
+ f"{', '.join([str(c) for c in constraints[:-1]])} or"
92
+ f" {constraints[-1]}"
93
+ )
94
+
95
+ raise InvalidParameterError(
96
+ f"The {param_name!r} parameter of {caller_name} must be"
97
+ f" {constraints_str}. Got {param_val!r} instead."
98
+ )
99
+
100
+
101
+ def make_constraint(constraint):
102
+ """Convert the constraint into the appropriate Constraint object.
103
+
104
+ Parameters
105
+ ----------
106
+ constraint : object
107
+ The constraint to convert.
108
+
109
+ Returns
110
+ -------
111
+ constraint : instance of _Constraint
112
+ The converted constraint.
113
+ """
114
+ if isinstance(constraint, str) and constraint == "array-like":
115
+ return _ArrayLikes()
116
+ if isinstance(constraint, str) and constraint == "sparse matrix":
117
+ return _SparseMatrices()
118
+ if isinstance(constraint, str) and constraint == "random_state":
119
+ return _RandomStates()
120
+ if constraint is callable:
121
+ return _Callables()
122
+ if constraint is None:
123
+ return _NoneConstraint()
124
+ if isinstance(constraint, type):
125
+ return _InstancesOf(constraint)
126
+ if isinstance(
127
+ constraint, (Interval, StrOptions, Options, HasMethods, MissingValues)
128
+ ):
129
+ return constraint
130
+ if isinstance(constraint, str) and constraint == "boolean":
131
+ return _Booleans()
132
+ if isinstance(constraint, str) and constraint == "verbose":
133
+ return _VerboseHelper()
134
+ if isinstance(constraint, str) and constraint == "cv_object":
135
+ return _CVObjects()
136
+ if isinstance(constraint, Hidden):
137
+ constraint = make_constraint(constraint.constraint)
138
+ constraint.hidden = True
139
+ return constraint
140
+ if isinstance(constraint, str) and constraint == "nan":
141
+ return _NanConstraint()
142
+ raise ValueError(f"Unknown constraint type: {constraint}")
143
+
144
+
145
+ def validate_params(parameter_constraints, *, prefer_skip_nested_validation):
146
+ """Decorator to validate types and values of functions and methods.
147
+
148
+ Parameters
149
+ ----------
150
+ parameter_constraints : dict
151
+ A dictionary `param_name: list of constraints`. See the docstring of
152
+ `validate_parameter_constraints` for a description of the accepted constraints.
153
+
154
+ Note that the *args and **kwargs parameters are not validated and must not be
155
+ present in the parameter_constraints dictionary.
156
+
157
+ prefer_skip_nested_validation : bool
158
+ If True, the validation of parameters of inner estimators or functions
159
+ called by the decorated function will be skipped.
160
+
161
+ This is useful to avoid validating many times the parameters passed by the
162
+ user from the public facing API. It's also useful to avoid validating
163
+ parameters that we pass internally to inner functions that are guaranteed to
164
+ be valid by the test suite.
165
+
166
+ It should be set to True for most functions, except for those that receive
167
+ non-validated objects as parameters or that are just wrappers around classes
168
+ because they only perform a partial validation.
169
+
170
+ Returns
171
+ -------
172
+ decorated_function : function or method
173
+ The decorated function.
174
+ """
175
+
176
+ def decorator(func):
177
+ # The dict of parameter constraints is set as an attribute of the function
178
+ # to make it possible to dynamically introspect the constraints for
179
+ # automatic testing.
180
+ setattr(func, "_skl_parameter_constraints", parameter_constraints)
181
+
182
+ @functools.wraps(func)
183
+ def wrapper(*args, **kwargs):
184
+ global_skip_validation = get_config()["skip_parameter_validation"]
185
+ if global_skip_validation:
186
+ return func(*args, **kwargs)
187
+
188
+ func_sig = signature(func)
189
+
190
+ # Map *args/**kwargs to the function signature
191
+ params = func_sig.bind(*args, **kwargs)
192
+ params.apply_defaults()
193
+
194
+ # ignore self/cls and positional/keyword markers
195
+ to_ignore = [
196
+ p.name
197
+ for p in func_sig.parameters.values()
198
+ if p.kind in (p.VAR_POSITIONAL, p.VAR_KEYWORD)
199
+ ]
200
+ to_ignore += ["self", "cls"]
201
+ params = {k: v for k, v in params.arguments.items() if k not in to_ignore}
202
+
203
+ validate_parameter_constraints(
204
+ parameter_constraints, params, caller_name=func.__qualname__
205
+ )
206
+
207
+ try:
208
+ with config_context(
209
+ skip_parameter_validation=(
210
+ prefer_skip_nested_validation or global_skip_validation
211
+ )
212
+ ):
213
+ return func(*args, **kwargs)
214
+ except InvalidParameterError as e:
215
+ # When the function is just a wrapper around an estimator, we allow
216
+ # the function to delegate validation to the estimator, but we replace
217
+ # the name of the estimator by the name of the function in the error
218
+ # message to avoid confusion.
219
+ msg = re.sub(
220
+ r"parameter of \w+ must be",
221
+ f"parameter of {func.__qualname__} must be",
222
+ str(e),
223
+ )
224
+ raise InvalidParameterError(msg) from e
225
+
226
+ return wrapper
227
+
228
+ return decorator
229
+
230
+
231
+ class RealNotInt(Real):
232
+ """A type that represents reals that are not instances of int.
233
+
234
+ Behaves like float, but also works with values extracted from numpy arrays.
235
+ isintance(1, RealNotInt) -> False
236
+ isinstance(1.0, RealNotInt) -> True
237
+ """
238
+
239
+
240
+ RealNotInt.register(float)
241
+
242
+
243
+ def _type_name(t):
244
+ """Convert type into human readable string."""
245
+ module = t.__module__
246
+ qualname = t.__qualname__
247
+ if module == "builtins":
248
+ return qualname
249
+ elif t == Real:
250
+ return "float"
251
+ elif t == Integral:
252
+ return "int"
253
+ return f"{module}.{qualname}"
254
+
255
+
256
+ class _Constraint(ABC):
257
+ """Base class for the constraint objects."""
258
+
259
+ def __init__(self):
260
+ self.hidden = False
261
+
262
+ @abstractmethod
263
+ def is_satisfied_by(self, val):
264
+ """Whether or not a value satisfies the constraint.
265
+
266
+ Parameters
267
+ ----------
268
+ val : object
269
+ The value to check.
270
+
271
+ Returns
272
+ -------
273
+ is_satisfied : bool
274
+ Whether or not the constraint is satisfied by this value.
275
+ """
276
+
277
+ @abstractmethod
278
+ def __str__(self):
279
+ """A human readable representational string of the constraint."""
280
+
281
+
282
+ class _InstancesOf(_Constraint):
283
+ """Constraint representing instances of a given type.
284
+
285
+ Parameters
286
+ ----------
287
+ type : type
288
+ The valid type.
289
+ """
290
+
291
+ def __init__(self, type):
292
+ super().__init__()
293
+ self.type = type
294
+
295
+ def is_satisfied_by(self, val):
296
+ return isinstance(val, self.type)
297
+
298
+ def __str__(self):
299
+ return f"an instance of {_type_name(self.type)!r}"
300
+
301
+
302
+ class _NoneConstraint(_Constraint):
303
+ """Constraint representing the None singleton."""
304
+
305
+ def is_satisfied_by(self, val):
306
+ return val is None
307
+
308
+ def __str__(self):
309
+ return "None"
310
+
311
+
312
+ class _NanConstraint(_Constraint):
313
+ """Constraint representing the indicator `np.nan`."""
314
+
315
+ def is_satisfied_by(self, val):
316
+ return (
317
+ not isinstance(val, Integral) and isinstance(val, Real) and math.isnan(val)
318
+ )
319
+
320
+ def __str__(self):
321
+ return "numpy.nan"
322
+
323
+
324
+ class _PandasNAConstraint(_Constraint):
325
+ """Constraint representing the indicator `pd.NA`."""
326
+
327
+ def is_satisfied_by(self, val):
328
+ try:
329
+ import pandas as pd
330
+
331
+ return isinstance(val, type(pd.NA)) and pd.isna(val)
332
+ except ImportError:
333
+ return False
334
+
335
+ def __str__(self):
336
+ return "pandas.NA"
337
+
338
+
339
+ class Options(_Constraint):
340
+ """Constraint representing a finite set of instances of a given type.
341
+
342
+ Parameters
343
+ ----------
344
+ type : type
345
+
346
+ options : set
347
+ The set of valid scalars.
348
+
349
+ deprecated : set or None, default=None
350
+ A subset of the `options` to mark as deprecated in the string
351
+ representation of the constraint.
352
+ """
353
+
354
+ def __init__(self, type, options, *, deprecated=None):
355
+ super().__init__()
356
+ self.type = type
357
+ self.options = options
358
+ self.deprecated = deprecated or set()
359
+
360
+ if self.deprecated - self.options:
361
+ raise ValueError("The deprecated options must be a subset of the options.")
362
+
363
+ def is_satisfied_by(self, val):
364
+ return isinstance(val, self.type) and val in self.options
365
+
366
+ def _mark_if_deprecated(self, option):
367
+ """Add a deprecated mark to an option if needed."""
368
+ option_str = f"{option!r}"
369
+ if option in self.deprecated:
370
+ option_str = f"{option_str} (deprecated)"
371
+ return option_str
372
+
373
+ def __str__(self):
374
+ options_str = (
375
+ f"{', '.join([self._mark_if_deprecated(o) for o in self.options])}"
376
+ )
377
+ return f"a {_type_name(self.type)} among {{{options_str}}}"
378
+
379
+
380
+ class StrOptions(Options):
381
+ """Constraint representing a finite set of strings.
382
+
383
+ Parameters
384
+ ----------
385
+ options : set of str
386
+ The set of valid strings.
387
+
388
+ deprecated : set of str or None, default=None
389
+ A subset of the `options` to mark as deprecated in the string
390
+ representation of the constraint.
391
+ """
392
+
393
+ def __init__(self, options, *, deprecated=None):
394
+ super().__init__(type=str, options=options, deprecated=deprecated)
395
+
396
+
397
+ class Interval(_Constraint):
398
+ """Constraint representing a typed interval.
399
+
400
+ Parameters
401
+ ----------
402
+ type : {numbers.Integral, numbers.Real, RealNotInt}
403
+ The set of numbers in which to set the interval.
404
+
405
+ If RealNotInt, only reals that don't have the integer type
406
+ are allowed. For example 1.0 is allowed but 1 is not.
407
+
408
+ left : float or int or None
409
+ The left bound of the interval. None means left bound is -∞.
410
+
411
+ right : float, int or None
412
+ The right bound of the interval. None means right bound is +∞.
413
+
414
+ closed : {"left", "right", "both", "neither"}
415
+ Whether the interval is open or closed. Possible choices are:
416
+
417
+ - `"left"`: the interval is closed on the left and open on the right.
418
+ It is equivalent to the interval `[ left, right )`.
419
+ - `"right"`: the interval is closed on the right and open on the left.
420
+ It is equivalent to the interval `( left, right ]`.
421
+ - `"both"`: the interval is closed.
422
+ It is equivalent to the interval `[ left, right ]`.
423
+ - `"neither"`: the interval is open.
424
+ It is equivalent to the interval `( left, right )`.
425
+
426
+ Notes
427
+ -----
428
+ Setting a bound to `None` and setting the interval closed is valid. For instance,
429
+ strictly speaking, `Interval(Real, 0, None, closed="both")` corresponds to
430
+ `[0, +∞) U {+∞}`.
431
+ """
432
+
433
+ def __init__(self, type, left, right, *, closed):
434
+ super().__init__()
435
+ self.type = type
436
+ self.left = left
437
+ self.right = right
438
+ self.closed = closed
439
+
440
+ self._check_params()
441
+
442
+ def _check_params(self):
443
+ if self.type not in (Integral, Real, RealNotInt):
444
+ raise ValueError(
445
+ "type must be either numbers.Integral, numbers.Real or RealNotInt."
446
+ f" Got {self.type} instead."
447
+ )
448
+
449
+ if self.closed not in ("left", "right", "both", "neither"):
450
+ raise ValueError(
451
+ "closed must be either 'left', 'right', 'both' or 'neither'. "
452
+ f"Got {self.closed} instead."
453
+ )
454
+
455
+ if self.type is Integral:
456
+ suffix = "for an interval over the integers."
457
+ if self.left is not None and not isinstance(self.left, Integral):
458
+ raise TypeError(f"Expecting left to be an int {suffix}")
459
+ if self.right is not None and not isinstance(self.right, Integral):
460
+ raise TypeError(f"Expecting right to be an int {suffix}")
461
+ if self.left is None and self.closed in ("left", "both"):
462
+ raise ValueError(
463
+ f"left can't be None when closed == {self.closed} {suffix}"
464
+ )
465
+ if self.right is None and self.closed in ("right", "both"):
466
+ raise ValueError(
467
+ f"right can't be None when closed == {self.closed} {suffix}"
468
+ )
469
+ else:
470
+ if self.left is not None and not isinstance(self.left, Real):
471
+ raise TypeError("Expecting left to be a real number.")
472
+ if self.right is not None and not isinstance(self.right, Real):
473
+ raise TypeError("Expecting right to be a real number.")
474
+
475
+ if self.right is not None and self.left is not None and self.right <= self.left:
476
+ raise ValueError(
477
+ f"right can't be less than left. Got left={self.left} and "
478
+ f"right={self.right}"
479
+ )
480
+
481
+ def __contains__(self, val):
482
+ if not isinstance(val, Integral) and np.isnan(val):
483
+ return False
484
+
485
+ left_cmp = operator.lt if self.closed in ("left", "both") else operator.le
486
+ right_cmp = operator.gt if self.closed in ("right", "both") else operator.ge
487
+
488
+ left = -np.inf if self.left is None else self.left
489
+ right = np.inf if self.right is None else self.right
490
+
491
+ if left_cmp(val, left):
492
+ return False
493
+ if right_cmp(val, right):
494
+ return False
495
+ return True
496
+
497
+ def is_satisfied_by(self, val):
498
+ if not isinstance(val, self.type):
499
+ return False
500
+
501
+ return val in self
502
+
503
+ def __str__(self):
504
+ type_str = "an int" if self.type is Integral else "a float"
505
+ left_bracket = "[" if self.closed in ("left", "both") else "("
506
+ left_bound = "-inf" if self.left is None else self.left
507
+ right_bound = "inf" if self.right is None else self.right
508
+ right_bracket = "]" if self.closed in ("right", "both") else ")"
509
+
510
+ # better repr if the bounds were given as integers
511
+ if not self.type == Integral and isinstance(self.left, Real):
512
+ left_bound = float(left_bound)
513
+ if not self.type == Integral and isinstance(self.right, Real):
514
+ right_bound = float(right_bound)
515
+
516
+ return (
517
+ f"{type_str} in the range "
518
+ f"{left_bracket}{left_bound}, {right_bound}{right_bracket}"
519
+ )
520
+
521
+
522
+ class _ArrayLikes(_Constraint):
523
+ """Constraint representing array-likes"""
524
+
525
+ def is_satisfied_by(self, val):
526
+ return _is_arraylike_not_scalar(val)
527
+
528
+ def __str__(self):
529
+ return "an array-like"
530
+
531
+
532
+ class _SparseMatrices(_Constraint):
533
+ """Constraint representing sparse matrices."""
534
+
535
+ def is_satisfied_by(self, val):
536
+ return issparse(val)
537
+
538
+ def __str__(self):
539
+ return "a sparse matrix"
540
+
541
+
542
+ class _Callables(_Constraint):
543
+ """Constraint representing callables."""
544
+
545
+ def is_satisfied_by(self, val):
546
+ return callable(val)
547
+
548
+ def __str__(self):
549
+ return "a callable"
550
+
551
+
552
+ class _RandomStates(_Constraint):
553
+ """Constraint representing random states.
554
+
555
+ Convenience class for
556
+ [Interval(Integral, 0, 2**32 - 1, closed="both"), np.random.RandomState, None]
557
+ """
558
+
559
+ def __init__(self):
560
+ super().__init__()
561
+ self._constraints = [
562
+ Interval(Integral, 0, 2**32 - 1, closed="both"),
563
+ _InstancesOf(np.random.RandomState),
564
+ _NoneConstraint(),
565
+ ]
566
+
567
+ def is_satisfied_by(self, val):
568
+ return any(c.is_satisfied_by(val) for c in self._constraints)
569
+
570
+ def __str__(self):
571
+ return (
572
+ f"{', '.join([str(c) for c in self._constraints[:-1]])} or"
573
+ f" {self._constraints[-1]}"
574
+ )
575
+
576
+
577
+ class _Booleans(_Constraint):
578
+ """Constraint representing boolean likes.
579
+
580
+ Convenience class for
581
+ [bool, np.bool_, Integral (deprecated)]
582
+ """
583
+
584
+ def __init__(self):
585
+ super().__init__()
586
+ self._constraints = [
587
+ _InstancesOf(bool),
588
+ _InstancesOf(np.bool_),
589
+ ]
590
+
591
+ def is_satisfied_by(self, val):
592
+ return any(c.is_satisfied_by(val) for c in self._constraints)
593
+
594
+ def __str__(self):
595
+ return (
596
+ f"{', '.join([str(c) for c in self._constraints[:-1]])} or"
597
+ f" {self._constraints[-1]}"
598
+ )
599
+
600
+
601
+ class _VerboseHelper(_Constraint):
602
+ """Helper constraint for the verbose parameter.
603
+
604
+ Convenience class for
605
+ [Interval(Integral, 0, None, closed="left"), bool, numpy.bool_]
606
+ """
607
+
608
+ def __init__(self):
609
+ super().__init__()
610
+ self._constraints = [
611
+ Interval(Integral, 0, None, closed="left"),
612
+ _InstancesOf(bool),
613
+ _InstancesOf(np.bool_),
614
+ ]
615
+
616
+ def is_satisfied_by(self, val):
617
+ return any(c.is_satisfied_by(val) for c in self._constraints)
618
+
619
+ def __str__(self):
620
+ return (
621
+ f"{', '.join([str(c) for c in self._constraints[:-1]])} or"
622
+ f" {self._constraints[-1]}"
623
+ )
624
+
625
+
626
+ class MissingValues(_Constraint):
627
+ """Helper constraint for the `missing_values` parameters.
628
+
629
+ Convenience for
630
+ [
631
+ Integral,
632
+ Interval(Real, None, None, closed="both"),
633
+ str, # when numeric_only is False
634
+ None, # when numeric_only is False
635
+ _NanConstraint(),
636
+ _PandasNAConstraint(),
637
+ ]
638
+
639
+ Parameters
640
+ ----------
641
+ numeric_only : bool, default=False
642
+ Whether to consider only numeric missing value markers.
643
+
644
+ """
645
+
646
+ def __init__(self, numeric_only=False):
647
+ super().__init__()
648
+
649
+ self.numeric_only = numeric_only
650
+
651
+ self._constraints = [
652
+ _InstancesOf(Integral),
653
+ # we use an interval of Real to ignore np.nan that has its own constraint
654
+ Interval(Real, None, None, closed="both"),
655
+ _NanConstraint(),
656
+ _PandasNAConstraint(),
657
+ ]
658
+ if not self.numeric_only:
659
+ self._constraints.extend([_InstancesOf(str), _NoneConstraint()])
660
+
661
+ def is_satisfied_by(self, val):
662
+ return any(c.is_satisfied_by(val) for c in self._constraints)
663
+
664
+ def __str__(self):
665
+ return (
666
+ f"{', '.join([str(c) for c in self._constraints[:-1]])} or"
667
+ f" {self._constraints[-1]}"
668
+ )
669
+
670
+
671
+ class HasMethods(_Constraint):
672
+ """Constraint representing objects that expose specific methods.
673
+
674
+ It is useful for parameters following a protocol and where we don't want to impose
675
+ an affiliation to a specific module or class.
676
+
677
+ Parameters
678
+ ----------
679
+ methods : str or list of str
680
+ The method(s) that the object is expected to expose.
681
+ """
682
+
683
+ @validate_params(
684
+ {"methods": [str, list]},
685
+ prefer_skip_nested_validation=True,
686
+ )
687
+ def __init__(self, methods):
688
+ super().__init__()
689
+ if isinstance(methods, str):
690
+ methods = [methods]
691
+ self.methods = methods
692
+
693
+ def is_satisfied_by(self, val):
694
+ return all(callable(getattr(val, method, None)) for method in self.methods)
695
+
696
+ def __str__(self):
697
+ if len(self.methods) == 1:
698
+ methods = f"{self.methods[0]!r}"
699
+ else:
700
+ methods = (
701
+ f"{', '.join([repr(m) for m in self.methods[:-1]])} and"
702
+ f" {self.methods[-1]!r}"
703
+ )
704
+ return f"an object implementing {methods}"
705
+
706
+
707
+ class _IterablesNotString(_Constraint):
708
+ """Constraint representing iterables that are not strings."""
709
+
710
+ def is_satisfied_by(self, val):
711
+ return isinstance(val, Iterable) and not isinstance(val, str)
712
+
713
+ def __str__(self):
714
+ return "an iterable"
715
+
716
+
717
+ class _CVObjects(_Constraint):
718
+ """Constraint representing cv objects.
719
+
720
+ Convenient class for
721
+ [
722
+ Interval(Integral, 2, None, closed="left"),
723
+ HasMethods(["split", "get_n_splits"]),
724
+ _IterablesNotString(),
725
+ None,
726
+ ]
727
+ """
728
+
729
+ def __init__(self):
730
+ super().__init__()
731
+ self._constraints = [
732
+ Interval(Integral, 2, None, closed="left"),
733
+ HasMethods(["split", "get_n_splits"]),
734
+ _IterablesNotString(),
735
+ _NoneConstraint(),
736
+ ]
737
+
738
+ def is_satisfied_by(self, val):
739
+ return any(c.is_satisfied_by(val) for c in self._constraints)
740
+
741
+ def __str__(self):
742
+ return (
743
+ f"{', '.join([str(c) for c in self._constraints[:-1]])} or"
744
+ f" {self._constraints[-1]}"
745
+ )
746
+
747
+
748
+ class Hidden:
749
+ """Class encapsulating a constraint not meant to be exposed to the user.
750
+
751
+ Parameters
752
+ ----------
753
+ constraint : str or _Constraint instance
754
+ The constraint to be used internally.
755
+ """
756
+
757
+ def __init__(self, constraint):
758
+ self.constraint = constraint
759
+
760
+
761
+ def generate_invalid_param_val(constraint):
762
+ """Return a value that does not satisfy the constraint.
763
+
764
+ Raises a NotImplementedError if there exists no invalid value for this constraint.
765
+
766
+ This is only useful for testing purpose.
767
+
768
+ Parameters
769
+ ----------
770
+ constraint : _Constraint instance
771
+ The constraint to generate a value for.
772
+
773
+ Returns
774
+ -------
775
+ val : object
776
+ A value that does not satisfy the constraint.
777
+ """
778
+ if isinstance(constraint, StrOptions):
779
+ return f"not {' or '.join(constraint.options)}"
780
+
781
+ if isinstance(constraint, MissingValues):
782
+ return np.array([1, 2, 3])
783
+
784
+ if isinstance(constraint, _VerboseHelper):
785
+ return -1
786
+
787
+ if isinstance(constraint, HasMethods):
788
+ return type("HasNotMethods", (), {})()
789
+
790
+ if isinstance(constraint, _IterablesNotString):
791
+ return "a string"
792
+
793
+ if isinstance(constraint, _CVObjects):
794
+ return "not a cv object"
795
+
796
+ if isinstance(constraint, Interval) and constraint.type is Integral:
797
+ if constraint.left is not None:
798
+ return constraint.left - 1
799
+ if constraint.right is not None:
800
+ return constraint.right + 1
801
+
802
+ # There's no integer outside (-inf, +inf)
803
+ raise NotImplementedError
804
+
805
+ if isinstance(constraint, Interval) and constraint.type in (Real, RealNotInt):
806
+ if constraint.left is not None:
807
+ return constraint.left - 1e-6
808
+ if constraint.right is not None:
809
+ return constraint.right + 1e-6
810
+
811
+ # bounds are -inf, +inf
812
+ if constraint.closed in ("right", "neither"):
813
+ return -np.inf
814
+ if constraint.closed in ("left", "neither"):
815
+ return np.inf
816
+
817
+ # interval is [-inf, +inf]
818
+ return np.nan
819
+
820
+ raise NotImplementedError
821
+
822
+
823
+ def generate_valid_param(constraint):
824
+ """Return a value that does satisfy a constraint.
825
+
826
+ This is only useful for testing purpose.
827
+
828
+ Parameters
829
+ ----------
830
+ constraint : Constraint instance
831
+ The constraint to generate a value for.
832
+
833
+ Returns
834
+ -------
835
+ val : object
836
+ A value that does satisfy the constraint.
837
+ """
838
+ if isinstance(constraint, _ArrayLikes):
839
+ return np.array([1, 2, 3])
840
+
841
+ if isinstance(constraint, _SparseMatrices):
842
+ return csr_matrix([[0, 1], [1, 0]])
843
+
844
+ if isinstance(constraint, _RandomStates):
845
+ return np.random.RandomState(42)
846
+
847
+ if isinstance(constraint, _Callables):
848
+ return lambda x: x
849
+
850
+ if isinstance(constraint, _NoneConstraint):
851
+ return None
852
+
853
+ if isinstance(constraint, _InstancesOf):
854
+ if constraint.type is np.ndarray:
855
+ # special case for ndarray since it can't be instantiated without arguments
856
+ return np.array([1, 2, 3])
857
+
858
+ if constraint.type in (Integral, Real):
859
+ # special case for Integral and Real since they are abstract classes
860
+ return 1
861
+
862
+ return constraint.type()
863
+
864
+ if isinstance(constraint, _Booleans):
865
+ return True
866
+
867
+ if isinstance(constraint, _VerboseHelper):
868
+ return 1
869
+
870
+ if isinstance(constraint, MissingValues) and constraint.numeric_only:
871
+ return np.nan
872
+
873
+ if isinstance(constraint, MissingValues) and not constraint.numeric_only:
874
+ return "missing"
875
+
876
+ if isinstance(constraint, HasMethods):
877
+ return type(
878
+ "ValidHasMethods", (), {m: lambda self: None for m in constraint.methods}
879
+ )()
880
+
881
+ if isinstance(constraint, _IterablesNotString):
882
+ return [1, 2, 3]
883
+
884
+ if isinstance(constraint, _CVObjects):
885
+ return 5
886
+
887
+ if isinstance(constraint, Options): # includes StrOptions
888
+ for option in constraint.options:
889
+ return option
890
+
891
+ if isinstance(constraint, Interval):
892
+ interval = constraint
893
+ if interval.left is None and interval.right is None:
894
+ return 0
895
+ elif interval.left is None:
896
+ return interval.right - 1
897
+ elif interval.right is None:
898
+ return interval.left + 1
899
+ else:
900
+ if interval.type is Real:
901
+ return (interval.left + interval.right) / 2
902
+ else:
903
+ return interval.left + 1
904
+
905
+ raise ValueError(f"Unknown constraint type: {constraint}")
llmeval-env/lib/python3.10/site-packages/sklearn/utils/_pprint.py ADDED
@@ -0,0 +1,463 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """This module contains the _EstimatorPrettyPrinter class used in
2
+ BaseEstimator.__repr__ for pretty-printing estimators"""
3
+
4
+ # Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
5
+ # 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018 Python Software Foundation;
6
+ # All Rights Reserved
7
+
8
+ # Authors: Fred L. Drake, Jr. <[email protected]> (built-in CPython pprint module)
9
+ # Nicolas Hug (scikit-learn specific changes)
10
+
11
+ # License: PSF License version 2 (see below)
12
+
13
+ # PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
14
+ # --------------------------------------------
15
+
16
+ # 1. This LICENSE AGREEMENT is between the Python Software Foundation ("PSF"),
17
+ # and the Individual or Organization ("Licensee") accessing and otherwise
18
+ # using this software ("Python") in source or binary form and its associated
19
+ # documentation.
20
+
21
+ # 2. Subject to the terms and conditions of this License Agreement, PSF hereby
22
+ # grants Licensee a nonexclusive, royalty-free, world-wide license to
23
+ # reproduce, analyze, test, perform and/or display publicly, prepare
24
+ # derivative works, distribute, and otherwise use Python alone or in any
25
+ # derivative version, provided, however, that PSF's License Agreement and
26
+ # PSF's notice of copyright, i.e., "Copyright (c) 2001, 2002, 2003, 2004,
27
+ # 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016,
28
+ # 2017, 2018 Python Software Foundation; All Rights Reserved" are retained in
29
+ # Python alone or in any derivative version prepared by Licensee.
30
+
31
+ # 3. In the event Licensee prepares a derivative work that is based on or
32
+ # incorporates Python or any part thereof, and wants to make the derivative
33
+ # work available to others as provided herein, then Licensee hereby agrees to
34
+ # include in any such work a brief summary of the changes made to Python.
35
+
36
+ # 4. PSF is making Python available to Licensee on an "AS IS" basis. PSF MAKES
37
+ # NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED. BY WAY OF EXAMPLE, BUT
38
+ # NOT LIMITATION, PSF MAKES NO AND DISCLAIMS ANY REPRESENTATION OR WARRANTY OF
39
+ # MERCHANTABILITY OR FITNESS FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF
40
+ # PYTHON WILL NOT INFRINGE ANY THIRD PARTY RIGHTS.
41
+
42
+ # 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON FOR ANY
43
+ # INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS A RESULT OF
44
+ # MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON, OR ANY DERIVATIVE
45
+ # THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
46
+
47
+ # 6. This License Agreement will automatically terminate upon a material
48
+ # breach of its terms and conditions.
49
+
50
+ # 7. Nothing in this License Agreement shall be deemed to create any
51
+ # relationship of agency, partnership, or joint venture between PSF and
52
+ # Licensee. This License Agreement does not grant permission to use PSF
53
+ # trademarks or trade name in a trademark sense to endorse or promote products
54
+ # or services of Licensee, or any third party.
55
+
56
+ # 8. By copying, installing or otherwise using Python, Licensee agrees to be
57
+ # bound by the terms and conditions of this License Agreement.
58
+
59
+
60
+ # Brief summary of changes to original code:
61
+ # - "compact" parameter is supported for dicts, not just lists or tuples
62
+ # - estimators have a custom handler, they're not just treated as objects
63
+ # - long sequences (lists, tuples, dict items) with more than N elements are
64
+ # shortened using ellipsis (', ...') at the end.
65
+
66
+ import inspect
67
+ import pprint
68
+ from collections import OrderedDict
69
+
70
+ from .._config import get_config
71
+ from ..base import BaseEstimator
72
+ from . import is_scalar_nan
73
+
74
+
75
+ class KeyValTuple(tuple):
76
+ """Dummy class for correctly rendering key-value tuples from dicts."""
77
+
78
+ def __repr__(self):
79
+ # needed for _dispatch[tuple.__repr__] not to be overridden
80
+ return super().__repr__()
81
+
82
+
83
+ class KeyValTupleParam(KeyValTuple):
84
+ """Dummy class for correctly rendering key-value tuples from parameters."""
85
+
86
+ pass
87
+
88
+
89
+ def _changed_params(estimator):
90
+ """Return dict (param_name: value) of parameters that were given to
91
+ estimator with non-default values."""
92
+
93
+ params = estimator.get_params(deep=False)
94
+ init_func = getattr(estimator.__init__, "deprecated_original", estimator.__init__)
95
+ init_params = inspect.signature(init_func).parameters
96
+ init_params = {name: param.default for name, param in init_params.items()}
97
+
98
+ def has_changed(k, v):
99
+ if k not in init_params: # happens if k is part of a **kwargs
100
+ return True
101
+ if init_params[k] == inspect._empty: # k has no default value
102
+ return True
103
+ # try to avoid calling repr on nested estimators
104
+ if isinstance(v, BaseEstimator) and v.__class__ != init_params[k].__class__:
105
+ return True
106
+ # Use repr as a last resort. It may be expensive.
107
+ if repr(v) != repr(init_params[k]) and not (
108
+ is_scalar_nan(init_params[k]) and is_scalar_nan(v)
109
+ ):
110
+ return True
111
+ return False
112
+
113
+ return {k: v for k, v in params.items() if has_changed(k, v)}
114
+
115
+
116
+ class _EstimatorPrettyPrinter(pprint.PrettyPrinter):
117
+ """Pretty Printer class for estimator objects.
118
+
119
+ This extends the pprint.PrettyPrinter class, because:
120
+ - we need estimators to be printed with their parameters, e.g.
121
+ Estimator(param1=value1, ...) which is not supported by default.
122
+ - the 'compact' parameter of PrettyPrinter is ignored for dicts, which
123
+ may lead to very long representations that we want to avoid.
124
+
125
+ Quick overview of pprint.PrettyPrinter (see also
126
+ https://stackoverflow.com/questions/49565047/pprint-with-hex-numbers):
127
+
128
+ - the entry point is the _format() method which calls format() (overridden
129
+ here)
130
+ - format() directly calls _safe_repr() for a first try at rendering the
131
+ object
132
+ - _safe_repr formats the whole object recursively, only calling itself,
133
+ not caring about line length or anything
134
+ - back to _format(), if the output string is too long, _format() then calls
135
+ the appropriate _pprint_TYPE() method (e.g. _pprint_list()) depending on
136
+ the type of the object. This where the line length and the compact
137
+ parameters are taken into account.
138
+ - those _pprint_TYPE() methods will internally use the format() method for
139
+ rendering the nested objects of an object (e.g. the elements of a list)
140
+
141
+ In the end, everything has to be implemented twice: in _safe_repr and in
142
+ the custom _pprint_TYPE methods. Unfortunately PrettyPrinter is really not
143
+ straightforward to extend (especially when we want a compact output), so
144
+ the code is a bit convoluted.
145
+
146
+ This class overrides:
147
+ - format() to support the changed_only parameter
148
+ - _safe_repr to support printing of estimators (for when they fit on a
149
+ single line)
150
+ - _format_dict_items so that dict are correctly 'compacted'
151
+ - _format_items so that ellipsis is used on long lists and tuples
152
+
153
+ When estimators cannot be printed on a single line, the builtin _format()
154
+ will call _pprint_estimator() because it was registered to do so (see
155
+ _dispatch[BaseEstimator.__repr__] = _pprint_estimator).
156
+
157
+ both _format_dict_items() and _pprint_estimator() use the
158
+ _format_params_or_dict_items() method that will format parameters and
159
+ key-value pairs respecting the compact parameter. This method needs another
160
+ subroutine _pprint_key_val_tuple() used when a parameter or a key-value
161
+ pair is too long to fit on a single line. This subroutine is called in
162
+ _format() and is registered as well in the _dispatch dict (just like
163
+ _pprint_estimator). We had to create the two classes KeyValTuple and
164
+ KeyValTupleParam for this.
165
+ """
166
+
167
+ def __init__(
168
+ self,
169
+ indent=1,
170
+ width=80,
171
+ depth=None,
172
+ stream=None,
173
+ *,
174
+ compact=False,
175
+ indent_at_name=True,
176
+ n_max_elements_to_show=None,
177
+ ):
178
+ super().__init__(indent, width, depth, stream, compact=compact)
179
+ self._indent_at_name = indent_at_name
180
+ if self._indent_at_name:
181
+ self._indent_per_level = 1 # ignore indent param
182
+ self._changed_only = get_config()["print_changed_only"]
183
+ # Max number of elements in a list, dict, tuple until we start using
184
+ # ellipsis. This also affects the number of arguments of an estimators
185
+ # (they are treated as dicts)
186
+ self.n_max_elements_to_show = n_max_elements_to_show
187
+
188
+ def format(self, object, context, maxlevels, level):
189
+ return _safe_repr(
190
+ object, context, maxlevels, level, changed_only=self._changed_only
191
+ )
192
+
193
+ def _pprint_estimator(self, object, stream, indent, allowance, context, level):
194
+ stream.write(object.__class__.__name__ + "(")
195
+ if self._indent_at_name:
196
+ indent += len(object.__class__.__name__)
197
+
198
+ if self._changed_only:
199
+ params = _changed_params(object)
200
+ else:
201
+ params = object.get_params(deep=False)
202
+
203
+ params = OrderedDict((name, val) for (name, val) in sorted(params.items()))
204
+
205
+ self._format_params(
206
+ params.items(), stream, indent, allowance + 1, context, level
207
+ )
208
+ stream.write(")")
209
+
210
+ def _format_dict_items(self, items, stream, indent, allowance, context, level):
211
+ return self._format_params_or_dict_items(
212
+ items, stream, indent, allowance, context, level, is_dict=True
213
+ )
214
+
215
+ def _format_params(self, items, stream, indent, allowance, context, level):
216
+ return self._format_params_or_dict_items(
217
+ items, stream, indent, allowance, context, level, is_dict=False
218
+ )
219
+
220
+ def _format_params_or_dict_items(
221
+ self, object, stream, indent, allowance, context, level, is_dict
222
+ ):
223
+ """Format dict items or parameters respecting the compact=True
224
+ parameter. For some reason, the builtin rendering of dict items doesn't
225
+ respect compact=True and will use one line per key-value if all cannot
226
+ fit in a single line.
227
+ Dict items will be rendered as <'key': value> while params will be
228
+ rendered as <key=value>. The implementation is mostly copy/pasting from
229
+ the builtin _format_items().
230
+ This also adds ellipsis if the number of items is greater than
231
+ self.n_max_elements_to_show.
232
+ """
233
+ write = stream.write
234
+ indent += self._indent_per_level
235
+ delimnl = ",\n" + " " * indent
236
+ delim = ""
237
+ width = max_width = self._width - indent + 1
238
+ it = iter(object)
239
+ try:
240
+ next_ent = next(it)
241
+ except StopIteration:
242
+ return
243
+ last = False
244
+ n_items = 0
245
+ while not last:
246
+ if n_items == self.n_max_elements_to_show:
247
+ write(", ...")
248
+ break
249
+ n_items += 1
250
+ ent = next_ent
251
+ try:
252
+ next_ent = next(it)
253
+ except StopIteration:
254
+ last = True
255
+ max_width -= allowance
256
+ width -= allowance
257
+ if self._compact:
258
+ k, v = ent
259
+ krepr = self._repr(k, context, level)
260
+ vrepr = self._repr(v, context, level)
261
+ if not is_dict:
262
+ krepr = krepr.strip("'")
263
+ middle = ": " if is_dict else "="
264
+ rep = krepr + middle + vrepr
265
+ w = len(rep) + 2
266
+ if width < w:
267
+ width = max_width
268
+ if delim:
269
+ delim = delimnl
270
+ if width >= w:
271
+ width -= w
272
+ write(delim)
273
+ delim = ", "
274
+ write(rep)
275
+ continue
276
+ write(delim)
277
+ delim = delimnl
278
+ class_ = KeyValTuple if is_dict else KeyValTupleParam
279
+ self._format(
280
+ class_(ent), stream, indent, allowance if last else 1, context, level
281
+ )
282
+
283
+ def _format_items(self, items, stream, indent, allowance, context, level):
284
+ """Format the items of an iterable (list, tuple...). Same as the
285
+ built-in _format_items, with support for ellipsis if the number of
286
+ elements is greater than self.n_max_elements_to_show.
287
+ """
288
+ write = stream.write
289
+ indent += self._indent_per_level
290
+ if self._indent_per_level > 1:
291
+ write((self._indent_per_level - 1) * " ")
292
+ delimnl = ",\n" + " " * indent
293
+ delim = ""
294
+ width = max_width = self._width - indent + 1
295
+ it = iter(items)
296
+ try:
297
+ next_ent = next(it)
298
+ except StopIteration:
299
+ return
300
+ last = False
301
+ n_items = 0
302
+ while not last:
303
+ if n_items == self.n_max_elements_to_show:
304
+ write(", ...")
305
+ break
306
+ n_items += 1
307
+ ent = next_ent
308
+ try:
309
+ next_ent = next(it)
310
+ except StopIteration:
311
+ last = True
312
+ max_width -= allowance
313
+ width -= allowance
314
+ if self._compact:
315
+ rep = self._repr(ent, context, level)
316
+ w = len(rep) + 2
317
+ if width < w:
318
+ width = max_width
319
+ if delim:
320
+ delim = delimnl
321
+ if width >= w:
322
+ width -= w
323
+ write(delim)
324
+ delim = ", "
325
+ write(rep)
326
+ continue
327
+ write(delim)
328
+ delim = delimnl
329
+ self._format(ent, stream, indent, allowance if last else 1, context, level)
330
+
331
+ def _pprint_key_val_tuple(self, object, stream, indent, allowance, context, level):
332
+ """Pretty printing for key-value tuples from dict or parameters."""
333
+ k, v = object
334
+ rep = self._repr(k, context, level)
335
+ if isinstance(object, KeyValTupleParam):
336
+ rep = rep.strip("'")
337
+ middle = "="
338
+ else:
339
+ middle = ": "
340
+ stream.write(rep)
341
+ stream.write(middle)
342
+ self._format(
343
+ v, stream, indent + len(rep) + len(middle), allowance, context, level
344
+ )
345
+
346
+ # Note: need to copy _dispatch to prevent instances of the builtin
347
+ # PrettyPrinter class to call methods of _EstimatorPrettyPrinter (see issue
348
+ # 12906)
349
+ # mypy error: "Type[PrettyPrinter]" has no attribute "_dispatch"
350
+ _dispatch = pprint.PrettyPrinter._dispatch.copy() # type: ignore
351
+ _dispatch[BaseEstimator.__repr__] = _pprint_estimator
352
+ _dispatch[KeyValTuple.__repr__] = _pprint_key_val_tuple
353
+
354
+
355
+ def _safe_repr(object, context, maxlevels, level, changed_only=False):
356
+ """Same as the builtin _safe_repr, with added support for Estimator
357
+ objects."""
358
+ typ = type(object)
359
+
360
+ if typ in pprint._builtin_scalars:
361
+ return repr(object), True, False
362
+
363
+ r = getattr(typ, "__repr__", None)
364
+ if issubclass(typ, dict) and r is dict.__repr__:
365
+ if not object:
366
+ return "{}", True, False
367
+ objid = id(object)
368
+ if maxlevels and level >= maxlevels:
369
+ return "{...}", False, objid in context
370
+ if objid in context:
371
+ return pprint._recursion(object), False, True
372
+ context[objid] = 1
373
+ readable = True
374
+ recursive = False
375
+ components = []
376
+ append = components.append
377
+ level += 1
378
+ saferepr = _safe_repr
379
+ items = sorted(object.items(), key=pprint._safe_tuple)
380
+ for k, v in items:
381
+ krepr, kreadable, krecur = saferepr(
382
+ k, context, maxlevels, level, changed_only=changed_only
383
+ )
384
+ vrepr, vreadable, vrecur = saferepr(
385
+ v, context, maxlevels, level, changed_only=changed_only
386
+ )
387
+ append("%s: %s" % (krepr, vrepr))
388
+ readable = readable and kreadable and vreadable
389
+ if krecur or vrecur:
390
+ recursive = True
391
+ del context[objid]
392
+ return "{%s}" % ", ".join(components), readable, recursive
393
+
394
+ if (issubclass(typ, list) and r is list.__repr__) or (
395
+ issubclass(typ, tuple) and r is tuple.__repr__
396
+ ):
397
+ if issubclass(typ, list):
398
+ if not object:
399
+ return "[]", True, False
400
+ format = "[%s]"
401
+ elif len(object) == 1:
402
+ format = "(%s,)"
403
+ else:
404
+ if not object:
405
+ return "()", True, False
406
+ format = "(%s)"
407
+ objid = id(object)
408
+ if maxlevels and level >= maxlevels:
409
+ return format % "...", False, objid in context
410
+ if objid in context:
411
+ return pprint._recursion(object), False, True
412
+ context[objid] = 1
413
+ readable = True
414
+ recursive = False
415
+ components = []
416
+ append = components.append
417
+ level += 1
418
+ for o in object:
419
+ orepr, oreadable, orecur = _safe_repr(
420
+ o, context, maxlevels, level, changed_only=changed_only
421
+ )
422
+ append(orepr)
423
+ if not oreadable:
424
+ readable = False
425
+ if orecur:
426
+ recursive = True
427
+ del context[objid]
428
+ return format % ", ".join(components), readable, recursive
429
+
430
+ if issubclass(typ, BaseEstimator):
431
+ objid = id(object)
432
+ if maxlevels and level >= maxlevels:
433
+ return "{...}", False, objid in context
434
+ if objid in context:
435
+ return pprint._recursion(object), False, True
436
+ context[objid] = 1
437
+ readable = True
438
+ recursive = False
439
+ if changed_only:
440
+ params = _changed_params(object)
441
+ else:
442
+ params = object.get_params(deep=False)
443
+ components = []
444
+ append = components.append
445
+ level += 1
446
+ saferepr = _safe_repr
447
+ items = sorted(params.items(), key=pprint._safe_tuple)
448
+ for k, v in items:
449
+ krepr, kreadable, krecur = saferepr(
450
+ k, context, maxlevels, level, changed_only=changed_only
451
+ )
452
+ vrepr, vreadable, vrecur = saferepr(
453
+ v, context, maxlevels, level, changed_only=changed_only
454
+ )
455
+ append("%s=%s" % (krepr.strip("'"), vrepr))
456
+ readable = readable and kreadable and vreadable
457
+ if krecur or vrecur:
458
+ recursive = True
459
+ del context[objid]
460
+ return ("%s(%s)" % (typ.__name__, ", ".join(components)), readable, recursive)
461
+
462
+ rep = repr(object)
463
+ return rep, (rep and not rep.startswith("<")), False
llmeval-env/lib/python3.10/site-packages/sklearn/utils/_response.py ADDED
@@ -0,0 +1,298 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Utilities to get the response values of a classifier or a regressor.
2
+
3
+ It allows to make uniform checks and validation.
4
+ """
5
+ import numpy as np
6
+
7
+ from ..base import is_classifier
8
+ from .multiclass import type_of_target
9
+ from .validation import _check_response_method, check_is_fitted
10
+
11
+
12
+ def _process_predict_proba(*, y_pred, target_type, classes, pos_label):
13
+ """Get the response values when the response method is `predict_proba`.
14
+
15
+ This function process the `y_pred` array in the binary and multi-label cases.
16
+ In the binary case, it selects the column corresponding to the positive
17
+ class. In the multi-label case, it stacks the predictions if they are not
18
+ in the "compressed" format `(n_samples, n_outputs)`.
19
+
20
+ Parameters
21
+ ----------
22
+ y_pred : ndarray
23
+ Output of `estimator.predict_proba`. The shape depends on the target type:
24
+
25
+ - for binary classification, it is a 2d array of shape `(n_samples, 2)`;
26
+ - for multiclass classification, it is a 2d array of shape
27
+ `(n_samples, n_classes)`;
28
+ - for multilabel classification, it is either a list of 2d arrays of shape
29
+ `(n_samples, 2)` (e.g. `RandomForestClassifier` or `KNeighborsClassifier`) or
30
+ an array of shape `(n_samples, n_outputs)` (e.g. `MLPClassifier` or
31
+ `RidgeClassifier`).
32
+
33
+ target_type : {"binary", "multiclass", "multilabel-indicator"}
34
+ Type of the target.
35
+
36
+ classes : ndarray of shape (n_classes,) or list of such arrays
37
+ Class labels as reported by `estimator.classes_`.
38
+
39
+ pos_label : int, float, bool or str
40
+ Only used with binary and multiclass targets.
41
+
42
+ Returns
43
+ -------
44
+ y_pred : ndarray of shape (n_samples,), (n_samples, n_classes) or \
45
+ (n_samples, n_output)
46
+ Compressed predictions format as requested by the metrics.
47
+ """
48
+ if target_type == "binary" and y_pred.shape[1] < 2:
49
+ # We don't handle classifiers trained on a single class.
50
+ raise ValueError(
51
+ f"Got predict_proba of shape {y_pred.shape}, but need "
52
+ "classifier with two classes."
53
+ )
54
+
55
+ if target_type == "binary":
56
+ col_idx = np.flatnonzero(classes == pos_label)[0]
57
+ return y_pred[:, col_idx]
58
+ elif target_type == "multilabel-indicator":
59
+ # Use a compress format of shape `(n_samples, n_output)`.
60
+ # Only `MLPClassifier` and `RidgeClassifier` return an array of shape
61
+ # `(n_samples, n_outputs)`.
62
+ if isinstance(y_pred, list):
63
+ # list of arrays of shape `(n_samples, 2)`
64
+ return np.vstack([p[:, -1] for p in y_pred]).T
65
+ else:
66
+ # array of shape `(n_samples, n_outputs)`
67
+ return y_pred
68
+
69
+ return y_pred
70
+
71
+
72
+ def _process_decision_function(*, y_pred, target_type, classes, pos_label):
73
+ """Get the response values when the response method is `decision_function`.
74
+
75
+ This function process the `y_pred` array in the binary and multi-label cases.
76
+ In the binary case, it inverts the sign of the score if the positive label
77
+ is not `classes[1]`. In the multi-label case, it stacks the predictions if
78
+ they are not in the "compressed" format `(n_samples, n_outputs)`.
79
+
80
+ Parameters
81
+ ----------
82
+ y_pred : ndarray
83
+ Output of `estimator.predict_proba`. The shape depends on the target type:
84
+
85
+ - for binary classification, it is a 1d array of shape `(n_samples,)` where the
86
+ sign is assuming that `classes[1]` is the positive class;
87
+ - for multiclass classification, it is a 2d array of shape
88
+ `(n_samples, n_classes)`;
89
+ - for multilabel classification, it is a 2d array of shape `(n_samples,
90
+ n_outputs)`.
91
+
92
+ target_type : {"binary", "multiclass", "multilabel-indicator"}
93
+ Type of the target.
94
+
95
+ classes : ndarray of shape (n_classes,) or list of such arrays
96
+ Class labels as reported by `estimator.classes_`.
97
+
98
+ pos_label : int, float, bool or str
99
+ Only used with binary and multiclass targets.
100
+
101
+ Returns
102
+ -------
103
+ y_pred : ndarray of shape (n_samples,), (n_samples, n_classes) or \
104
+ (n_samples, n_output)
105
+ Compressed predictions format as requested by the metrics.
106
+ """
107
+ if target_type == "binary" and pos_label == classes[0]:
108
+ return -1 * y_pred
109
+ return y_pred
110
+
111
+
112
+ def _get_response_values(
113
+ estimator,
114
+ X,
115
+ response_method,
116
+ pos_label=None,
117
+ return_response_method_used=False,
118
+ ):
119
+ """Compute the response values of a classifier, an outlier detector, or a regressor.
120
+
121
+ The response values are predictions such that it follows the following shape:
122
+
123
+ - for binary classification, it is a 1d array of shape `(n_samples,)`;
124
+ - for multiclass classification, it is a 2d array of shape `(n_samples, n_classes)`;
125
+ - for multilabel classification, it is a 2d array of shape `(n_samples, n_outputs)`;
126
+ - for outlier detection, it is a 1d array of shape `(n_samples,)`;
127
+ - for regression, it is a 1d array of shape `(n_samples,)`.
128
+
129
+ If `estimator` is a binary classifier, also return the label for the
130
+ effective positive class.
131
+
132
+ This utility is used primarily in the displays and the scikit-learn scorers.
133
+
134
+ .. versionadded:: 1.3
135
+
136
+ Parameters
137
+ ----------
138
+ estimator : estimator instance
139
+ Fitted classifier, outlier detector, or regressor or a
140
+ fitted :class:`~sklearn.pipeline.Pipeline` in which the last estimator is a
141
+ classifier, an outlier detector, or a regressor.
142
+
143
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
144
+ Input values.
145
+
146
+ response_method : {"predict_proba", "predict_log_proba", "decision_function", \
147
+ "predict"} or list of such str
148
+ Specifies the response method to use get prediction from an estimator
149
+ (i.e. :term:`predict_proba`, :term:`predict_log_proba`,
150
+ :term:`decision_function` or :term:`predict`). Possible choices are:
151
+
152
+ - if `str`, it corresponds to the name to the method to return;
153
+ - if a list of `str`, it provides the method names in order of
154
+ preference. The method returned corresponds to the first method in
155
+ the list and which is implemented by `estimator`.
156
+
157
+ pos_label : int, float, bool or str, default=None
158
+ The class considered as the positive class when computing
159
+ the metrics. If `None` and target is 'binary', `estimators.classes_[1]` is
160
+ considered as the positive class.
161
+
162
+ return_response_method_used : bool, default=False
163
+ Whether to return the response method used to compute the response
164
+ values.
165
+
166
+ .. versionadded:: 1.4
167
+
168
+ Returns
169
+ -------
170
+ y_pred : ndarray of shape (n_samples,), (n_samples, n_classes) or \
171
+ (n_samples, n_outputs)
172
+ Target scores calculated from the provided `response_method`
173
+ and `pos_label`.
174
+
175
+ pos_label : int, float, bool, str or None
176
+ The class considered as the positive class when computing
177
+ the metrics. Returns `None` if `estimator` is a regressor or an outlier
178
+ detector.
179
+
180
+ response_method_used : str
181
+ The response method used to compute the response values. Only returned
182
+ if `return_response_method_used` is `True`.
183
+
184
+ .. versionadded:: 1.4
185
+
186
+ Raises
187
+ ------
188
+ ValueError
189
+ If `pos_label` is not a valid label.
190
+ If the shape of `y_pred` is not consistent for binary classifier.
191
+ If the response method can be applied to a classifier only and
192
+ `estimator` is a regressor.
193
+ """
194
+ from sklearn.base import is_classifier, is_outlier_detector # noqa
195
+
196
+ if is_classifier(estimator):
197
+ prediction_method = _check_response_method(estimator, response_method)
198
+ classes = estimator.classes_
199
+ target_type = type_of_target(classes)
200
+
201
+ if target_type in ("binary", "multiclass"):
202
+ if pos_label is not None and pos_label not in classes.tolist():
203
+ raise ValueError(
204
+ f"pos_label={pos_label} is not a valid label: It should be "
205
+ f"one of {classes}"
206
+ )
207
+ elif pos_label is None and target_type == "binary":
208
+ pos_label = classes[-1]
209
+
210
+ y_pred = prediction_method(X)
211
+
212
+ if prediction_method.__name__ in ("predict_proba", "predict_log_proba"):
213
+ y_pred = _process_predict_proba(
214
+ y_pred=y_pred,
215
+ target_type=target_type,
216
+ classes=classes,
217
+ pos_label=pos_label,
218
+ )
219
+ elif prediction_method.__name__ == "decision_function":
220
+ y_pred = _process_decision_function(
221
+ y_pred=y_pred,
222
+ target_type=target_type,
223
+ classes=classes,
224
+ pos_label=pos_label,
225
+ )
226
+ elif is_outlier_detector(estimator):
227
+ prediction_method = _check_response_method(estimator, response_method)
228
+ y_pred, pos_label = prediction_method(X), None
229
+ else: # estimator is a regressor
230
+ if response_method != "predict":
231
+ raise ValueError(
232
+ f"{estimator.__class__.__name__} should either be a classifier to be "
233
+ f"used with response_method={response_method} or the response_method "
234
+ "should be 'predict'. Got a regressor with response_method="
235
+ f"{response_method} instead."
236
+ )
237
+ prediction_method = estimator.predict
238
+ y_pred, pos_label = prediction_method(X), None
239
+
240
+ if return_response_method_used:
241
+ return y_pred, pos_label, prediction_method.__name__
242
+ return y_pred, pos_label
243
+
244
+
245
+ def _get_response_values_binary(estimator, X, response_method, pos_label=None):
246
+ """Compute the response values of a binary classifier.
247
+
248
+ Parameters
249
+ ----------
250
+ estimator : estimator instance
251
+ Fitted classifier or a fitted :class:`~sklearn.pipeline.Pipeline`
252
+ in which the last estimator is a binary classifier.
253
+
254
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
255
+ Input values.
256
+
257
+ response_method : {'auto', 'predict_proba', 'decision_function'}
258
+ Specifies whether to use :term:`predict_proba` or
259
+ :term:`decision_function` as the target response. If set to 'auto',
260
+ :term:`predict_proba` is tried first and if it does not exist
261
+ :term:`decision_function` is tried next.
262
+
263
+ pos_label : int, float, bool or str, default=None
264
+ The class considered as the positive class when computing
265
+ the metrics. By default, `estimators.classes_[1]` is
266
+ considered as the positive class.
267
+
268
+ Returns
269
+ -------
270
+ y_pred : ndarray of shape (n_samples,)
271
+ Target scores calculated from the provided response_method
272
+ and pos_label.
273
+
274
+ pos_label : int, float, bool or str
275
+ The class considered as the positive class when computing
276
+ the metrics.
277
+ """
278
+ classification_error = "Expected 'estimator' to be a binary classifier."
279
+
280
+ check_is_fitted(estimator)
281
+ if not is_classifier(estimator):
282
+ raise ValueError(
283
+ classification_error + f" Got {estimator.__class__.__name__} instead."
284
+ )
285
+ elif len(estimator.classes_) != 2:
286
+ raise ValueError(
287
+ classification_error + f" Got {len(estimator.classes_)} classes instead."
288
+ )
289
+
290
+ if response_method == "auto":
291
+ response_method = ["predict_proba", "decision_function"]
292
+
293
+ return _get_response_values(
294
+ estimator,
295
+ X,
296
+ response_method,
297
+ pos_label=pos_label,
298
+ )
llmeval-env/lib/python3.10/site-packages/sklearn/utils/_seq_dataset.pxd ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # WARNING: Do not edit this file directly.
2
+ # It is automatically generated from 'sklearn/utils/_seq_dataset.pxd.tp'.
3
+ # Changes must be made there.
4
+
5
+ """Dataset abstractions for sequential data access."""
6
+
7
+ cimport numpy as cnp
8
+
9
+ # SequentialDataset and its two concrete subclasses are (optionally randomized)
10
+ # iterators over the rows of a matrix X and corresponding target values y.
11
+
12
+ #------------------------------------------------------------------------------
13
+
14
+ cdef class SequentialDataset64:
15
+ cdef int current_index
16
+ cdef int[::1] index
17
+ cdef int *index_data_ptr
18
+ cdef Py_ssize_t n_samples
19
+ cdef cnp.uint32_t seed
20
+
21
+ cdef void shuffle(self, cnp.uint32_t seed) noexcept nogil
22
+ cdef int _get_next_index(self) noexcept nogil
23
+ cdef int _get_random_index(self) noexcept nogil
24
+
25
+ cdef void _sample(self, double **x_data_ptr, int **x_ind_ptr,
26
+ int *nnz, double *y, double *sample_weight,
27
+ int current_index) noexcept nogil
28
+ cdef void next(self, double **x_data_ptr, int **x_ind_ptr,
29
+ int *nnz, double *y, double *sample_weight) noexcept nogil
30
+ cdef int random(self, double **x_data_ptr, int **x_ind_ptr,
31
+ int *nnz, double *y, double *sample_weight) noexcept nogil
32
+
33
+
34
+ cdef class ArrayDataset64(SequentialDataset64):
35
+ cdef const double[:, ::1] X
36
+ cdef const double[::1] Y
37
+ cdef const double[::1] sample_weights
38
+ cdef Py_ssize_t n_features
39
+ cdef cnp.npy_intp X_stride
40
+ cdef double *X_data_ptr
41
+ cdef double *Y_data_ptr
42
+ cdef const int[::1] feature_indices
43
+ cdef int *feature_indices_ptr
44
+ cdef double *sample_weight_data
45
+
46
+
47
+ cdef class CSRDataset64(SequentialDataset64):
48
+ cdef const double[::1] X_data
49
+ cdef const int[::1] X_indptr
50
+ cdef const int[::1] X_indices
51
+ cdef const double[::1] Y
52
+ cdef const double[::1] sample_weights
53
+ cdef double *X_data_ptr
54
+ cdef int *X_indptr_ptr
55
+ cdef int *X_indices_ptr
56
+ cdef double *Y_data_ptr
57
+ cdef double *sample_weight_data
58
+
59
+ #------------------------------------------------------------------------------
60
+
61
+ cdef class SequentialDataset32:
62
+ cdef int current_index
63
+ cdef int[::1] index
64
+ cdef int *index_data_ptr
65
+ cdef Py_ssize_t n_samples
66
+ cdef cnp.uint32_t seed
67
+
68
+ cdef void shuffle(self, cnp.uint32_t seed) noexcept nogil
69
+ cdef int _get_next_index(self) noexcept nogil
70
+ cdef int _get_random_index(self) noexcept nogil
71
+
72
+ cdef void _sample(self, float **x_data_ptr, int **x_ind_ptr,
73
+ int *nnz, float *y, float *sample_weight,
74
+ int current_index) noexcept nogil
75
+ cdef void next(self, float **x_data_ptr, int **x_ind_ptr,
76
+ int *nnz, float *y, float *sample_weight) noexcept nogil
77
+ cdef int random(self, float **x_data_ptr, int **x_ind_ptr,
78
+ int *nnz, float *y, float *sample_weight) noexcept nogil
79
+
80
+
81
+ cdef class ArrayDataset32(SequentialDataset32):
82
+ cdef const float[:, ::1] X
83
+ cdef const float[::1] Y
84
+ cdef const float[::1] sample_weights
85
+ cdef Py_ssize_t n_features
86
+ cdef cnp.npy_intp X_stride
87
+ cdef float *X_data_ptr
88
+ cdef float *Y_data_ptr
89
+ cdef const int[::1] feature_indices
90
+ cdef int *feature_indices_ptr
91
+ cdef float *sample_weight_data
92
+
93
+
94
+ cdef class CSRDataset32(SequentialDataset32):
95
+ cdef const float[::1] X_data
96
+ cdef const int[::1] X_indptr
97
+ cdef const int[::1] X_indices
98
+ cdef const float[::1] Y
99
+ cdef const float[::1] sample_weights
100
+ cdef float *X_data_ptr
101
+ cdef int *X_indptr_ptr
102
+ cdef int *X_indices_ptr
103
+ cdef float *Y_data_ptr
104
+ cdef float *sample_weight_data
llmeval-env/lib/python3.10/site-packages/sklearn/utils/_sorting.pxd ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ from ._typedefs cimport intp_t
2
+
3
+ from cython cimport floating
4
+
5
+ cdef int simultaneous_sort(
6
+ floating *dist,
7
+ intp_t *idx,
8
+ intp_t size,
9
+ ) noexcept nogil
llmeval-env/lib/python3.10/site-packages/sklearn/utils/_vector_sentinel.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (176 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/utils/_vector_sentinel.pxd ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ cimport numpy as cnp
2
+
3
+ from libcpp.vector cimport vector
4
+ from ..utils._typedefs cimport intp_t, float64_t, int32_t, int64_t
5
+
6
+ ctypedef fused vector_typed:
7
+ vector[float64_t]
8
+ vector[intp_t]
9
+ vector[int32_t]
10
+ vector[int64_t]
11
+
12
+ cdef cnp.ndarray vector_to_nd_array(vector_typed * vect_ptr)
llmeval-env/lib/python3.10/site-packages/sklearn/utils/discovery.py ADDED
@@ -0,0 +1,265 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ The :mod:`sklearn.utils.discovery` module includes utilities to discover
3
+ objects (i.e. estimators, displays, functions) from the `sklearn` package.
4
+ """
5
+
6
+ import inspect
7
+ import pkgutil
8
+ from importlib import import_module
9
+ from operator import itemgetter
10
+ from pathlib import Path
11
+
12
+ _MODULE_TO_IGNORE = {
13
+ "tests",
14
+ "externals",
15
+ "setup",
16
+ "conftest",
17
+ "experimental",
18
+ "estimator_checks",
19
+ }
20
+
21
+
22
+ def all_estimators(type_filter=None):
23
+ """Get a list of all estimators from `sklearn`.
24
+
25
+ This function crawls the module and gets all classes that inherit
26
+ from BaseEstimator. Classes that are defined in test-modules are not
27
+ included.
28
+
29
+ Parameters
30
+ ----------
31
+ type_filter : {"classifier", "regressor", "cluster", "transformer"} \
32
+ or list of such str, default=None
33
+ Which kind of estimators should be returned. If None, no filter is
34
+ applied and all estimators are returned. Possible values are
35
+ 'classifier', 'regressor', 'cluster' and 'transformer' to get
36
+ estimators only of these specific types, or a list of these to
37
+ get the estimators that fit at least one of the types.
38
+
39
+ Returns
40
+ -------
41
+ estimators : list of tuples
42
+ List of (name, class), where ``name`` is the class name as string
43
+ and ``class`` is the actual type of the class.
44
+
45
+ Examples
46
+ --------
47
+ >>> from sklearn.utils.discovery import all_estimators
48
+ >>> estimators = all_estimators()
49
+ >>> type(estimators)
50
+ <class 'list'>
51
+ >>> type(estimators[0])
52
+ <class 'tuple'>
53
+ >>> estimators[:2]
54
+ [('ARDRegression', <class 'sklearn.linear_model._bayes.ARDRegression'>),
55
+ ('AdaBoostClassifier',
56
+ <class 'sklearn.ensemble._weight_boosting.AdaBoostClassifier'>)]
57
+ >>> classifiers = all_estimators(type_filter="classifier")
58
+ >>> classifiers[:2]
59
+ [('AdaBoostClassifier',
60
+ <class 'sklearn.ensemble._weight_boosting.AdaBoostClassifier'>),
61
+ ('BaggingClassifier', <class 'sklearn.ensemble._bagging.BaggingClassifier'>)]
62
+ >>> regressors = all_estimators(type_filter="regressor")
63
+ >>> regressors[:2]
64
+ [('ARDRegression', <class 'sklearn.linear_model._bayes.ARDRegression'>),
65
+ ('AdaBoostRegressor',
66
+ <class 'sklearn.ensemble._weight_boosting.AdaBoostRegressor'>)]
67
+ >>> both = all_estimators(type_filter=["classifier", "regressor"])
68
+ >>> both[:2]
69
+ [('ARDRegression', <class 'sklearn.linear_model._bayes.ARDRegression'>),
70
+ ('AdaBoostClassifier',
71
+ <class 'sklearn.ensemble._weight_boosting.AdaBoostClassifier'>)]
72
+ """
73
+ # lazy import to avoid circular imports from sklearn.base
74
+ from ..base import (
75
+ BaseEstimator,
76
+ ClassifierMixin,
77
+ ClusterMixin,
78
+ RegressorMixin,
79
+ TransformerMixin,
80
+ )
81
+ from . import IS_PYPY
82
+ from ._testing import ignore_warnings
83
+
84
+ def is_abstract(c):
85
+ if not (hasattr(c, "__abstractmethods__")):
86
+ return False
87
+ if not len(c.__abstractmethods__):
88
+ return False
89
+ return True
90
+
91
+ all_classes = []
92
+ root = str(Path(__file__).parent.parent) # sklearn package
93
+ # Ignore deprecation warnings triggered at import time and from walking
94
+ # packages
95
+ with ignore_warnings(category=FutureWarning):
96
+ for _, module_name, _ in pkgutil.walk_packages(path=[root], prefix="sklearn."):
97
+ module_parts = module_name.split(".")
98
+ if (
99
+ any(part in _MODULE_TO_IGNORE for part in module_parts)
100
+ or "._" in module_name
101
+ ):
102
+ continue
103
+ module = import_module(module_name)
104
+ classes = inspect.getmembers(module, inspect.isclass)
105
+ classes = [
106
+ (name, est_cls) for name, est_cls in classes if not name.startswith("_")
107
+ ]
108
+
109
+ # TODO: Remove when FeatureHasher is implemented in PYPY
110
+ # Skips FeatureHasher for PYPY
111
+ if IS_PYPY and "feature_extraction" in module_name:
112
+ classes = [
113
+ (name, est_cls)
114
+ for name, est_cls in classes
115
+ if name == "FeatureHasher"
116
+ ]
117
+
118
+ all_classes.extend(classes)
119
+
120
+ all_classes = set(all_classes)
121
+
122
+ estimators = [
123
+ c
124
+ for c in all_classes
125
+ if (issubclass(c[1], BaseEstimator) and c[0] != "BaseEstimator")
126
+ ]
127
+ # get rid of abstract base classes
128
+ estimators = [c for c in estimators if not is_abstract(c[1])]
129
+
130
+ if type_filter is not None:
131
+ if not isinstance(type_filter, list):
132
+ type_filter = [type_filter]
133
+ else:
134
+ type_filter = list(type_filter) # copy
135
+ filtered_estimators = []
136
+ filters = {
137
+ "classifier": ClassifierMixin,
138
+ "regressor": RegressorMixin,
139
+ "transformer": TransformerMixin,
140
+ "cluster": ClusterMixin,
141
+ }
142
+ for name, mixin in filters.items():
143
+ if name in type_filter:
144
+ type_filter.remove(name)
145
+ filtered_estimators.extend(
146
+ [est for est in estimators if issubclass(est[1], mixin)]
147
+ )
148
+ estimators = filtered_estimators
149
+ if type_filter:
150
+ raise ValueError(
151
+ "Parameter type_filter must be 'classifier', "
152
+ "'regressor', 'transformer', 'cluster' or "
153
+ "None, got"
154
+ f" {repr(type_filter)}."
155
+ )
156
+
157
+ # drop duplicates, sort for reproducibility
158
+ # itemgetter is used to ensure the sort does not extend to the 2nd item of
159
+ # the tuple
160
+ return sorted(set(estimators), key=itemgetter(0))
161
+
162
+
163
+ def all_displays():
164
+ """Get a list of all displays from `sklearn`.
165
+
166
+ Returns
167
+ -------
168
+ displays : list of tuples
169
+ List of (name, class), where ``name`` is the display class name as
170
+ string and ``class`` is the actual type of the class.
171
+
172
+ Examples
173
+ --------
174
+ >>> from sklearn.utils.discovery import all_displays
175
+ >>> displays = all_displays()
176
+ >>> displays[0]
177
+ ('CalibrationDisplay', <class 'sklearn.calibration.CalibrationDisplay'>)
178
+ """
179
+ # lazy import to avoid circular imports from sklearn.base
180
+ from ._testing import ignore_warnings
181
+
182
+ all_classes = []
183
+ root = str(Path(__file__).parent.parent) # sklearn package
184
+ # Ignore deprecation warnings triggered at import time and from walking
185
+ # packages
186
+ with ignore_warnings(category=FutureWarning):
187
+ for _, module_name, _ in pkgutil.walk_packages(path=[root], prefix="sklearn."):
188
+ module_parts = module_name.split(".")
189
+ if (
190
+ any(part in _MODULE_TO_IGNORE for part in module_parts)
191
+ or "._" in module_name
192
+ ):
193
+ continue
194
+ module = import_module(module_name)
195
+ classes = inspect.getmembers(module, inspect.isclass)
196
+ classes = [
197
+ (name, display_class)
198
+ for name, display_class in classes
199
+ if not name.startswith("_") and name.endswith("Display")
200
+ ]
201
+ all_classes.extend(classes)
202
+
203
+ return sorted(set(all_classes), key=itemgetter(0))
204
+
205
+
206
+ def _is_checked_function(item):
207
+ if not inspect.isfunction(item):
208
+ return False
209
+
210
+ if item.__name__.startswith("_"):
211
+ return False
212
+
213
+ mod = item.__module__
214
+ if not mod.startswith("sklearn.") or mod.endswith("estimator_checks"):
215
+ return False
216
+
217
+ return True
218
+
219
+
220
+ def all_functions():
221
+ """Get a list of all functions from `sklearn`.
222
+
223
+ Returns
224
+ -------
225
+ functions : list of tuples
226
+ List of (name, function), where ``name`` is the function name as
227
+ string and ``function`` is the actual function.
228
+
229
+ Examples
230
+ --------
231
+ >>> from sklearn.utils.discovery import all_functions
232
+ >>> functions = all_functions()
233
+ >>> name, function = functions[0]
234
+ >>> name
235
+ 'accuracy_score'
236
+ """
237
+ # lazy import to avoid circular imports from sklearn.base
238
+ from ._testing import ignore_warnings
239
+
240
+ all_functions = []
241
+ root = str(Path(__file__).parent.parent) # sklearn package
242
+ # Ignore deprecation warnings triggered at import time and from walking
243
+ # packages
244
+ with ignore_warnings(category=FutureWarning):
245
+ for _, module_name, _ in pkgutil.walk_packages(path=[root], prefix="sklearn."):
246
+ module_parts = module_name.split(".")
247
+ if (
248
+ any(part in _MODULE_TO_IGNORE for part in module_parts)
249
+ or "._" in module_name
250
+ ):
251
+ continue
252
+
253
+ module = import_module(module_name)
254
+ functions = inspect.getmembers(module, _is_checked_function)
255
+ functions = [
256
+ (func.__name__, func)
257
+ for name, func in functions
258
+ if not name.startswith("_")
259
+ ]
260
+ all_functions.extend(functions)
261
+
262
+ # drop duplicates, sort for reproducibility
263
+ # itemgetter is used to ensure the sort does not extend to the 2nd item of
264
+ # the tuple
265
+ return sorted(set(all_functions), key=itemgetter(0))
llmeval-env/lib/python3.10/site-packages/sklearn/utils/graph.py ADDED
@@ -0,0 +1,166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ The :mod:`sklearn.utils.graph` module includes graph utilities and algorithms.
3
+ """
4
+
5
+ # Authors: Aric Hagberg <[email protected]>
6
+ # Gael Varoquaux <[email protected]>
7
+ # Jake Vanderplas <[email protected]>
8
+ # License: BSD 3 clause
9
+
10
+ import numpy as np
11
+ from scipy import sparse
12
+
13
+ from ..metrics.pairwise import pairwise_distances
14
+ from ._param_validation import Integral, Interval, validate_params
15
+
16
+
17
+ ###############################################################################
18
+ # Path and connected component analysis.
19
+ # Code adapted from networkx
20
+ @validate_params(
21
+ {
22
+ "graph": ["array-like", "sparse matrix"],
23
+ "source": [Interval(Integral, 0, None, closed="left")],
24
+ "cutoff": [Interval(Integral, 0, None, closed="left"), None],
25
+ },
26
+ prefer_skip_nested_validation=True,
27
+ )
28
+ def single_source_shortest_path_length(graph, source, *, cutoff=None):
29
+ """Return the length of the shortest path from source to all reachable nodes.
30
+
31
+ Parameters
32
+ ----------
33
+ graph : {array-like, sparse matrix} of shape (n_nodes, n_nodes)
34
+ Adjacency matrix of the graph. Sparse matrix of format LIL is
35
+ preferred.
36
+
37
+ source : int
38
+ Start node for path.
39
+
40
+ cutoff : int, default=None
41
+ Depth to stop the search - only paths of length <= cutoff are returned.
42
+
43
+ Returns
44
+ -------
45
+ paths : dict
46
+ Reachable end nodes mapped to length of path from source,
47
+ i.e. `{end: path_length}`.
48
+
49
+ Examples
50
+ --------
51
+ >>> from sklearn.utils.graph import single_source_shortest_path_length
52
+ >>> import numpy as np
53
+ >>> graph = np.array([[ 0, 1, 0, 0],
54
+ ... [ 1, 0, 1, 0],
55
+ ... [ 0, 1, 0, 0],
56
+ ... [ 0, 0, 0, 0]])
57
+ >>> single_source_shortest_path_length(graph, 0)
58
+ {0: 0, 1: 1, 2: 2}
59
+ >>> graph = np.ones((6, 6))
60
+ >>> sorted(single_source_shortest_path_length(graph, 2).items())
61
+ [(0, 1), (1, 1), (2, 0), (3, 1), (4, 1), (5, 1)]
62
+ """
63
+ if sparse.issparse(graph):
64
+ graph = graph.tolil()
65
+ else:
66
+ graph = sparse.lil_matrix(graph)
67
+ seen = {} # level (number of hops) when seen in BFS
68
+ level = 0 # the current level
69
+ next_level = [source] # dict of nodes to check at next level
70
+ while next_level:
71
+ this_level = next_level # advance to next level
72
+ next_level = set() # and start a new list (fringe)
73
+ for v in this_level:
74
+ if v not in seen:
75
+ seen[v] = level # set the level of vertex v
76
+ next_level.update(graph.rows[v])
77
+ if cutoff is not None and cutoff <= level:
78
+ break
79
+ level += 1
80
+ return seen # return all path lengths as dictionary
81
+
82
+
83
+ def _fix_connected_components(
84
+ X,
85
+ graph,
86
+ n_connected_components,
87
+ component_labels,
88
+ mode="distance",
89
+ metric="euclidean",
90
+ **kwargs,
91
+ ):
92
+ """Add connections to sparse graph to connect unconnected components.
93
+
94
+ For each pair of unconnected components, compute all pairwise distances
95
+ from one component to the other, and add a connection on the closest pair
96
+ of samples. This is a hacky way to get a graph with a single connected
97
+ component, which is necessary for example to compute a shortest path
98
+ between all pairs of samples in the graph.
99
+
100
+ Parameters
101
+ ----------
102
+ X : array of shape (n_samples, n_features) or (n_samples, n_samples)
103
+ Features to compute the pairwise distances. If `metric =
104
+ "precomputed"`, X is the matrix of pairwise distances.
105
+
106
+ graph : sparse matrix of shape (n_samples, n_samples)
107
+ Graph of connection between samples.
108
+
109
+ n_connected_components : int
110
+ Number of connected components, as computed by
111
+ `scipy.sparse.csgraph.connected_components`.
112
+
113
+ component_labels : array of shape (n_samples)
114
+ Labels of connected components, as computed by
115
+ `scipy.sparse.csgraph.connected_components`.
116
+
117
+ mode : {'connectivity', 'distance'}, default='distance'
118
+ Type of graph matrix: 'connectivity' corresponds to the connectivity
119
+ matrix with ones and zeros, and 'distance' corresponds to the distances
120
+ between neighbors according to the given metric.
121
+
122
+ metric : str
123
+ Metric used in `sklearn.metrics.pairwise.pairwise_distances`.
124
+
125
+ kwargs : kwargs
126
+ Keyword arguments passed to
127
+ `sklearn.metrics.pairwise.pairwise_distances`.
128
+
129
+ Returns
130
+ -------
131
+ graph : sparse matrix of shape (n_samples, n_samples)
132
+ Graph of connection between samples, with a single connected component.
133
+ """
134
+ if metric == "precomputed" and sparse.issparse(X):
135
+ raise RuntimeError(
136
+ "_fix_connected_components with metric='precomputed' requires the "
137
+ "full distance matrix in X, and does not work with a sparse "
138
+ "neighbors graph."
139
+ )
140
+
141
+ for i in range(n_connected_components):
142
+ idx_i = np.flatnonzero(component_labels == i)
143
+ Xi = X[idx_i]
144
+ for j in range(i):
145
+ idx_j = np.flatnonzero(component_labels == j)
146
+ Xj = X[idx_j]
147
+
148
+ if metric == "precomputed":
149
+ D = X[np.ix_(idx_i, idx_j)]
150
+ else:
151
+ D = pairwise_distances(Xi, Xj, metric=metric, **kwargs)
152
+
153
+ ii, jj = np.unravel_index(D.argmin(axis=None), D.shape)
154
+ if mode == "connectivity":
155
+ graph[idx_i[ii], idx_j[jj]] = 1
156
+ graph[idx_j[jj], idx_i[ii]] = 1
157
+ elif mode == "distance":
158
+ graph[idx_i[ii], idx_j[jj]] = D[ii, jj]
159
+ graph[idx_j[jj], idx_i[ii]] = D[ii, jj]
160
+ else:
161
+ raise ValueError(
162
+ "Unknown mode=%r, should be one of ['connectivity', 'distance']."
163
+ % mode
164
+ )
165
+
166
+ return graph
llmeval-env/lib/python3.10/site-packages/sklearn/utils/metadata_routing.py ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ The :mod:`sklearn.utils.metadata_routing` module includes utilities to route
3
+ metadata within scikit-learn estimators.
4
+ """
5
+
6
+ # This module is not a separate sub-folder since that would result in a circular
7
+ # import issue.
8
+ #
9
+ # Author: Adrin Jalali <[email protected]>
10
+ # License: BSD 3 clause
11
+
12
+ from ._metadata_requests import WARN, UNUSED, UNCHANGED # noqa
13
+ from ._metadata_requests import get_routing_for_object # noqa
14
+ from ._metadata_requests import MetadataRouter # noqa
15
+ from ._metadata_requests import MetadataRequest # noqa
16
+ from ._metadata_requests import MethodMapping # noqa
17
+ from ._metadata_requests import process_routing # noqa
18
+ from ._metadata_requests import _MetadataRequester # noqa
19
+ from ._metadata_requests import _routing_enabled # noqa
20
+ from ._metadata_requests import _raise_for_params # noqa
21
+ from ._metadata_requests import _RoutingNotSupportedMixin # noqa
22
+ from ._metadata_requests import _raise_for_unsupported_routing # noqa
llmeval-env/lib/python3.10/site-packages/sklearn/utils/murmurhash.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (254 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/utils/optimize.py ADDED
@@ -0,0 +1,302 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Our own implementation of the Newton algorithm
3
+
4
+ Unlike the scipy.optimize version, this version of the Newton conjugate
5
+ gradient solver uses only one function call to retrieve the
6
+ func value, the gradient value and a callable for the Hessian matvec
7
+ product. If the function call is very expensive (e.g. for logistic
8
+ regression with large design matrix), this approach gives very
9
+ significant speedups.
10
+ """
11
+ # This is a modified file from scipy.optimize
12
+ # Original authors: Travis Oliphant, Eric Jones
13
+ # Modifications by Gael Varoquaux, Mathieu Blondel and Tom Dupre la Tour
14
+ # License: BSD
15
+
16
+ import warnings
17
+
18
+ import numpy as np
19
+ import scipy
20
+
21
+ from ..exceptions import ConvergenceWarning
22
+ from .fixes import line_search_wolfe1, line_search_wolfe2
23
+
24
+
25
+ class _LineSearchError(RuntimeError):
26
+ pass
27
+
28
+
29
+ def _line_search_wolfe12(f, fprime, xk, pk, gfk, old_fval, old_old_fval, **kwargs):
30
+ """
31
+ Same as line_search_wolfe1, but fall back to line_search_wolfe2 if
32
+ suitable step length is not found, and raise an exception if a
33
+ suitable step length is not found.
34
+
35
+ Raises
36
+ ------
37
+ _LineSearchError
38
+ If no suitable step size is found.
39
+
40
+ """
41
+ ret = line_search_wolfe1(f, fprime, xk, pk, gfk, old_fval, old_old_fval, **kwargs)
42
+
43
+ if ret[0] is None:
44
+ # Have a look at the line_search method of our NewtonSolver class. We borrow
45
+ # the logic from there
46
+ # Deal with relative loss differences around machine precision.
47
+ args = kwargs.get("args", tuple())
48
+ fval = f(xk + pk, *args)
49
+ eps = 16 * np.finfo(np.asarray(old_fval).dtype).eps
50
+ tiny_loss = np.abs(old_fval * eps)
51
+ loss_improvement = fval - old_fval
52
+ check = np.abs(loss_improvement) <= tiny_loss
53
+ if check:
54
+ # 2.1 Check sum of absolute gradients as alternative condition.
55
+ sum_abs_grad_old = scipy.linalg.norm(gfk, ord=1)
56
+ grad = fprime(xk + pk, *args)
57
+ sum_abs_grad = scipy.linalg.norm(grad, ord=1)
58
+ check = sum_abs_grad < sum_abs_grad_old
59
+ if check:
60
+ ret = (
61
+ 1.0, # step size
62
+ ret[1] + 1, # number of function evaluations
63
+ ret[2] + 1, # number of gradient evaluations
64
+ fval,
65
+ old_fval,
66
+ grad,
67
+ )
68
+
69
+ if ret[0] is None:
70
+ # line search failed: try different one.
71
+ # TODO: It seems that the new check for the sum of absolute gradients above
72
+ # catches all cases that, earlier, ended up here. In fact, our tests never
73
+ # trigger this "if branch" here and we can consider to remove it.
74
+ ret = line_search_wolfe2(
75
+ f, fprime, xk, pk, gfk, old_fval, old_old_fval, **kwargs
76
+ )
77
+
78
+ if ret[0] is None:
79
+ raise _LineSearchError()
80
+
81
+ return ret
82
+
83
+
84
+ def _cg(fhess_p, fgrad, maxiter, tol):
85
+ """
86
+ Solve iteratively the linear system 'fhess_p . xsupi = fgrad'
87
+ with a conjugate gradient descent.
88
+
89
+ Parameters
90
+ ----------
91
+ fhess_p : callable
92
+ Function that takes the gradient as a parameter and returns the
93
+ matrix product of the Hessian and gradient.
94
+
95
+ fgrad : ndarray of shape (n_features,) or (n_features + 1,)
96
+ Gradient vector.
97
+
98
+ maxiter : int
99
+ Number of CG iterations.
100
+
101
+ tol : float
102
+ Stopping criterion.
103
+
104
+ Returns
105
+ -------
106
+ xsupi : ndarray of shape (n_features,) or (n_features + 1,)
107
+ Estimated solution.
108
+ """
109
+ xsupi = np.zeros(len(fgrad), dtype=fgrad.dtype)
110
+ ri = np.copy(fgrad)
111
+ psupi = -ri
112
+ i = 0
113
+ dri0 = np.dot(ri, ri)
114
+ # We also track of |p_i|^2.
115
+ psupi_norm2 = dri0
116
+
117
+ while i <= maxiter:
118
+ if np.sum(np.abs(ri)) <= tol:
119
+ break
120
+
121
+ Ap = fhess_p(psupi)
122
+ # check curvature
123
+ curv = np.dot(psupi, Ap)
124
+ if 0 <= curv <= 16 * np.finfo(np.float64).eps * psupi_norm2:
125
+ # See https://arxiv.org/abs/1803.02924, Algo 1 Capped Conjugate Gradient.
126
+ break
127
+ elif curv < 0:
128
+ if i > 0:
129
+ break
130
+ else:
131
+ # fall back to steepest descent direction
132
+ xsupi += dri0 / curv * psupi
133
+ break
134
+ alphai = dri0 / curv
135
+ xsupi += alphai * psupi
136
+ ri += alphai * Ap
137
+ dri1 = np.dot(ri, ri)
138
+ betai = dri1 / dri0
139
+ psupi = -ri + betai * psupi
140
+ # We use |p_i|^2 = |r_i|^2 + beta_i^2 |p_{i-1}|^2
141
+ psupi_norm2 = dri1 + betai**2 * psupi_norm2
142
+ i = i + 1
143
+ dri0 = dri1 # update np.dot(ri,ri) for next time.
144
+
145
+ return xsupi
146
+
147
+
148
+ def _newton_cg(
149
+ grad_hess,
150
+ func,
151
+ grad,
152
+ x0,
153
+ args=(),
154
+ tol=1e-4,
155
+ maxiter=100,
156
+ maxinner=200,
157
+ line_search=True,
158
+ warn=True,
159
+ ):
160
+ """
161
+ Minimization of scalar function of one or more variables using the
162
+ Newton-CG algorithm.
163
+
164
+ Parameters
165
+ ----------
166
+ grad_hess : callable
167
+ Should return the gradient and a callable returning the matvec product
168
+ of the Hessian.
169
+
170
+ func : callable
171
+ Should return the value of the function.
172
+
173
+ grad : callable
174
+ Should return the function value and the gradient. This is used
175
+ by the linesearch functions.
176
+
177
+ x0 : array of float
178
+ Initial guess.
179
+
180
+ args : tuple, default=()
181
+ Arguments passed to func_grad_hess, func and grad.
182
+
183
+ tol : float, default=1e-4
184
+ Stopping criterion. The iteration will stop when
185
+ ``max{|g_i | i = 1, ..., n} <= tol``
186
+ where ``g_i`` is the i-th component of the gradient.
187
+
188
+ maxiter : int, default=100
189
+ Number of Newton iterations.
190
+
191
+ maxinner : int, default=200
192
+ Number of CG iterations.
193
+
194
+ line_search : bool, default=True
195
+ Whether to use a line search or not.
196
+
197
+ warn : bool, default=True
198
+ Whether to warn when didn't converge.
199
+
200
+ Returns
201
+ -------
202
+ xk : ndarray of float
203
+ Estimated minimum.
204
+ """
205
+ x0 = np.asarray(x0).flatten()
206
+ xk = np.copy(x0)
207
+ k = 0
208
+
209
+ if line_search:
210
+ old_fval = func(x0, *args)
211
+ old_old_fval = None
212
+
213
+ # Outer loop: our Newton iteration
214
+ while k < maxiter:
215
+ # Compute a search direction pk by applying the CG method to
216
+ # del2 f(xk) p = - fgrad f(xk) starting from 0.
217
+ fgrad, fhess_p = grad_hess(xk, *args)
218
+
219
+ absgrad = np.abs(fgrad)
220
+ if np.max(absgrad) <= tol:
221
+ break
222
+
223
+ maggrad = np.sum(absgrad)
224
+ eta = min([0.5, np.sqrt(maggrad)])
225
+ termcond = eta * maggrad
226
+
227
+ # Inner loop: solve the Newton update by conjugate gradient, to
228
+ # avoid inverting the Hessian
229
+ xsupi = _cg(fhess_p, fgrad, maxiter=maxinner, tol=termcond)
230
+
231
+ alphak = 1.0
232
+
233
+ if line_search:
234
+ try:
235
+ alphak, fc, gc, old_fval, old_old_fval, gfkp1 = _line_search_wolfe12(
236
+ func, grad, xk, xsupi, fgrad, old_fval, old_old_fval, args=args
237
+ )
238
+ except _LineSearchError:
239
+ warnings.warn("Line Search failed")
240
+ break
241
+
242
+ xk += alphak * xsupi # upcast if necessary
243
+ k += 1
244
+
245
+ if warn and k >= maxiter:
246
+ warnings.warn(
247
+ "newton-cg failed to converge. Increase the number of iterations.",
248
+ ConvergenceWarning,
249
+ )
250
+ return xk, k
251
+
252
+
253
+ def _check_optimize_result(solver, result, max_iter=None, extra_warning_msg=None):
254
+ """Check the OptimizeResult for successful convergence
255
+
256
+ Parameters
257
+ ----------
258
+ solver : str
259
+ Solver name. Currently only `lbfgs` is supported.
260
+
261
+ result : OptimizeResult
262
+ Result of the scipy.optimize.minimize function.
263
+
264
+ max_iter : int, default=None
265
+ Expected maximum number of iterations.
266
+
267
+ extra_warning_msg : str, default=None
268
+ Extra warning message.
269
+
270
+ Returns
271
+ -------
272
+ n_iter : int
273
+ Number of iterations.
274
+ """
275
+ # handle both scipy and scikit-learn solver names
276
+ if solver == "lbfgs":
277
+ if result.status != 0:
278
+ try:
279
+ # The message is already decoded in scipy>=1.6.0
280
+ result_message = result.message.decode("latin1")
281
+ except AttributeError:
282
+ result_message = result.message
283
+ warning_msg = (
284
+ "{} failed to converge (status={}):\n{}.\n\n"
285
+ "Increase the number of iterations (max_iter) "
286
+ "or scale the data as shown in:\n"
287
+ " https://scikit-learn.org/stable/modules/"
288
+ "preprocessing.html"
289
+ ).format(solver, result.status, result_message)
290
+ if extra_warning_msg is not None:
291
+ warning_msg += "\n" + extra_warning_msg
292
+ warnings.warn(warning_msg, ConvergenceWarning, stacklevel=2)
293
+ if max_iter is not None:
294
+ # In scipy <= 1.0.0, nit may exceed maxiter for lbfgs.
295
+ # See https://github.com/scipy/scipy/issues/7854
296
+ n_iter_i = min(result.nit, max_iter)
297
+ else:
298
+ n_iter_i = result.nit
299
+ else:
300
+ raise NotImplementedError
301
+
302
+ return n_iter_i
llmeval-env/lib/python3.10/site-packages/sklearn/utils/parallel.py ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ The :mod:`sklearn.utils.parallel` customizes `joblib` tools for scikit-learn usage.
3
+ """
4
+
5
+ import functools
6
+ import warnings
7
+ from functools import update_wrapper
8
+
9
+ import joblib
10
+
11
+ from .._config import config_context, get_config
12
+
13
+
14
+ def _with_config(delayed_func, config):
15
+ """Helper function that intends to attach a config to a delayed function."""
16
+ if hasattr(delayed_func, "with_config"):
17
+ return delayed_func.with_config(config)
18
+ else:
19
+ warnings.warn(
20
+ (
21
+ "`sklearn.utils.parallel.Parallel` needs to be used in "
22
+ "conjunction with `sklearn.utils.parallel.delayed` instead of "
23
+ "`joblib.delayed` to correctly propagate the scikit-learn "
24
+ "configuration to the joblib workers."
25
+ ),
26
+ UserWarning,
27
+ )
28
+ return delayed_func
29
+
30
+
31
+ class Parallel(joblib.Parallel):
32
+ """Tweak of :class:`joblib.Parallel` that propagates the scikit-learn configuration.
33
+
34
+ This subclass of :class:`joblib.Parallel` ensures that the active configuration
35
+ (thread-local) of scikit-learn is propagated to the parallel workers for the
36
+ duration of the execution of the parallel tasks.
37
+
38
+ The API does not change and you can refer to :class:`joblib.Parallel`
39
+ documentation for more details.
40
+
41
+ .. versionadded:: 1.3
42
+ """
43
+
44
+ def __call__(self, iterable):
45
+ """Dispatch the tasks and return the results.
46
+
47
+ Parameters
48
+ ----------
49
+ iterable : iterable
50
+ Iterable containing tuples of (delayed_function, args, kwargs) that should
51
+ be consumed.
52
+
53
+ Returns
54
+ -------
55
+ results : list
56
+ List of results of the tasks.
57
+ """
58
+ # Capture the thread-local scikit-learn configuration at the time
59
+ # Parallel.__call__ is issued since the tasks can be dispatched
60
+ # in a different thread depending on the backend and on the value of
61
+ # pre_dispatch and n_jobs.
62
+ config = get_config()
63
+ iterable_with_config = (
64
+ (_with_config(delayed_func, config), args, kwargs)
65
+ for delayed_func, args, kwargs in iterable
66
+ )
67
+ return super().__call__(iterable_with_config)
68
+
69
+
70
+ # remove when https://github.com/joblib/joblib/issues/1071 is fixed
71
+ def delayed(function):
72
+ """Decorator used to capture the arguments of a function.
73
+
74
+ This alternative to `joblib.delayed` is meant to be used in conjunction
75
+ with `sklearn.utils.parallel.Parallel`. The latter captures the scikit-
76
+ learn configuration by calling `sklearn.get_config()` in the current
77
+ thread, prior to dispatching the first task. The captured configuration is
78
+ then propagated and enabled for the duration of the execution of the
79
+ delayed function in the joblib workers.
80
+
81
+ .. versionchanged:: 1.3
82
+ `delayed` was moved from `sklearn.utils.fixes` to `sklearn.utils.parallel`
83
+ in scikit-learn 1.3.
84
+
85
+ Parameters
86
+ ----------
87
+ function : callable
88
+ The function to be delayed.
89
+
90
+ Returns
91
+ -------
92
+ output: tuple
93
+ Tuple containing the delayed function, the positional arguments, and the
94
+ keyword arguments.
95
+ """
96
+
97
+ @functools.wraps(function)
98
+ def delayed_function(*args, **kwargs):
99
+ return _FuncWrapper(function), args, kwargs
100
+
101
+ return delayed_function
102
+
103
+
104
+ class _FuncWrapper:
105
+ """Load the global configuration before calling the function."""
106
+
107
+ def __init__(self, function):
108
+ self.function = function
109
+ update_wrapper(self, self.function)
110
+
111
+ def with_config(self, config):
112
+ self.config = config
113
+ return self
114
+
115
+ def __call__(self, *args, **kwargs):
116
+ config = getattr(self, "config", None)
117
+ if config is None:
118
+ warnings.warn(
119
+ (
120
+ "`sklearn.utils.parallel.delayed` should be used with"
121
+ " `sklearn.utils.parallel.Parallel` to make it possible to"
122
+ " propagate the scikit-learn configuration of the current thread to"
123
+ " the joblib workers."
124
+ ),
125
+ UserWarning,
126
+ )
127
+ config = {}
128
+ with config_context(**config):
129
+ return self.function(*args, **kwargs)
llmeval-env/lib/python3.10/site-packages/sklearn/utils/stats.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+
3
+ from .extmath import stable_cumsum
4
+
5
+
6
+ def _weighted_percentile(array, sample_weight, percentile=50):
7
+ """Compute weighted percentile
8
+
9
+ Computes lower weighted percentile. If `array` is a 2D array, the
10
+ `percentile` is computed along the axis 0.
11
+
12
+ .. versionchanged:: 0.24
13
+ Accepts 2D `array`.
14
+
15
+ Parameters
16
+ ----------
17
+ array : 1D or 2D array
18
+ Values to take the weighted percentile of.
19
+
20
+ sample_weight: 1D or 2D array
21
+ Weights for each value in `array`. Must be same shape as `array` or
22
+ of shape `(array.shape[0],)`.
23
+
24
+ percentile: int or float, default=50
25
+ Percentile to compute. Must be value between 0 and 100.
26
+
27
+ Returns
28
+ -------
29
+ percentile : int if `array` 1D, ndarray if `array` 2D
30
+ Weighted percentile.
31
+ """
32
+ n_dim = array.ndim
33
+ if n_dim == 0:
34
+ return array[()]
35
+ if array.ndim == 1:
36
+ array = array.reshape((-1, 1))
37
+ # When sample_weight 1D, repeat for each array.shape[1]
38
+ if array.shape != sample_weight.shape and array.shape[0] == sample_weight.shape[0]:
39
+ sample_weight = np.tile(sample_weight, (array.shape[1], 1)).T
40
+ sorted_idx = np.argsort(array, axis=0)
41
+ sorted_weights = np.take_along_axis(sample_weight, sorted_idx, axis=0)
42
+
43
+ # Find index of median prediction for each sample
44
+ weight_cdf = stable_cumsum(sorted_weights, axis=0)
45
+ adjusted_percentile = percentile / 100 * weight_cdf[-1]
46
+
47
+ # For percentile=0, ignore leading observations with sample_weight=0. GH20528
48
+ mask = adjusted_percentile == 0
49
+ adjusted_percentile[mask] = np.nextafter(
50
+ adjusted_percentile[mask], adjusted_percentile[mask] + 1
51
+ )
52
+
53
+ percentile_idx = np.array(
54
+ [
55
+ np.searchsorted(weight_cdf[:, i], adjusted_percentile[i])
56
+ for i in range(weight_cdf.shape[1])
57
+ ]
58
+ )
59
+ percentile_idx = np.array(percentile_idx)
60
+ # In rare cases, percentile_idx equals to sorted_idx.shape[0]
61
+ max_idx = sorted_idx.shape[0] - 1
62
+ percentile_idx = np.apply_along_axis(
63
+ lambda x: np.clip(x, 0, max_idx), axis=0, arr=percentile_idx
64
+ )
65
+
66
+ col_index = np.arange(array.shape[1])
67
+ percentile_in_sorted = sorted_idx[percentile_idx, col_index]
68
+ percentile = array[percentile_in_sorted, col_index]
69
+ return percentile[0] if n_dim == 1 else percentile
llmeval-env/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (192 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_arpack.cpython-310.pyc ADDED
Binary file (664 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_array_api.cpython-310.pyc ADDED
Binary file (10.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_arrayfuncs.cpython-310.pyc ADDED
Binary file (1.33 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_class_weight.cpython-310.pyc ADDED
Binary file (8.06 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_cython_templating.cpython-310.pyc ADDED
Binary file (1.06 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_encode.cpython-310.pyc ADDED
Binary file (6.4 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_estimator_html_repr.cpython-310.pyc ADDED
Binary file (15.9 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_extmath.cpython-310.pyc ADDED
Binary file (25 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_fast_dict.cpython-310.pyc ADDED
Binary file (1.59 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_fixes.cpython-310.pyc ADDED
Binary file (3.5 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_graph.cpython-310.pyc ADDED
Binary file (2.39 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_mocking.cpython-310.pyc ADDED
Binary file (5.55 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_multiclass.cpython-310.pyc ADDED
Binary file (13.9 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_murmurhash.cpython-310.pyc ADDED
Binary file (2.56 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_parallel.cpython-310.pyc ADDED
Binary file (4.26 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_param_validation.cpython-310.pyc ADDED
Binary file (21.7 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_plotting.cpython-310.pyc ADDED
Binary file (1.69 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_random.cpython-310.pyc ADDED
Binary file (4.6 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_response.cpython-310.pyc ADDED
Binary file (8.64 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_seq_dataset.cpython-310.pyc ADDED
Binary file (5.25 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_set_output.cpython-310.pyc ADDED
Binary file (16 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_shortest_path.cpython-310.pyc ADDED
Binary file (1.59 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_sparsefuncs.cpython-310.pyc ADDED
Binary file (21.6 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_stats.cpython-310.pyc ADDED
Binary file (3.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_tags.cpython-310.pyc ADDED
Binary file (1.55 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_testing.cpython-310.pyc ADDED
Binary file (24.7 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_typedefs.cpython-310.pyc ADDED
Binary file (834 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_utils.cpython-310.pyc ADDED
Binary file (25.3 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_validation.cpython-310.pyc ADDED
Binary file (57.4 kB). View file