applied-ai-018 commited on
Commit
a803dcc
·
verified ·
1 Parent(s): f35a2c6

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/__init__.cpython-310.pyc +0 -0
  2. env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/_arpack.cpython-310.pyc +0 -0
  3. env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/_array_api.cpython-310.pyc +0 -0
  4. env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/_available_if.cpython-310.pyc +0 -0
  5. env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/_bunch.cpython-310.pyc +0 -0
  6. env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/_encode.cpython-310.pyc +0 -0
  7. env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/_estimator_html_repr.cpython-310.pyc +0 -0
  8. env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/_joblib.cpython-310.pyc +0 -0
  9. env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/_mask.cpython-310.pyc +0 -0
  10. env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/_metadata_requests.cpython-310.pyc +0 -0
  11. env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/_mocking.cpython-310.pyc +0 -0
  12. env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/_param_validation.cpython-310.pyc +0 -0
  13. env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/_plotting.cpython-310.pyc +0 -0
  14. env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/_pprint.cpython-310.pyc +0 -0
  15. env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/_response.cpython-310.pyc +0 -0
  16. env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/_set_output.cpython-310.pyc +0 -0
  17. env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/_show_versions.cpython-310.pyc +0 -0
  18. env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/_tags.cpython-310.pyc +0 -0
  19. env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/_testing.cpython-310.pyc +0 -0
  20. env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/class_weight.cpython-310.pyc +0 -0
  21. env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/deprecation.cpython-310.pyc +0 -0
  22. env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/discovery.cpython-310.pyc +0 -0
  23. env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/estimator_checks.cpython-310.pyc +0 -0
  24. env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/extmath.cpython-310.pyc +0 -0
  25. env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/fixes.cpython-310.pyc +0 -0
  26. env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/graph.cpython-310.pyc +0 -0
  27. env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/metadata_routing.cpython-310.pyc +0 -0
  28. env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/metaestimators.cpython-310.pyc +0 -0
  29. env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/multiclass.cpython-310.pyc +0 -0
  30. env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/optimize.cpython-310.pyc +0 -0
  31. env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/parallel.cpython-310.pyc +0 -0
  32. env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/random.cpython-310.pyc +0 -0
  33. env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/sparsefuncs.cpython-310.pyc +0 -0
  34. env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/stats.cpython-310.pyc +0 -0
  35. env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/validation.cpython-310.pyc +0 -0
  36. env-llmeval/lib/python3.10/site-packages/sklearn/utils/_cython_blas.cpython-310-x86_64-linux-gnu.so +0 -0
  37. env-llmeval/lib/python3.10/site-packages/sklearn/utils/_encode.py +367 -0
  38. env-llmeval/lib/python3.10/site-packages/sklearn/utils/_estimator_html_repr.py +496 -0
  39. env-llmeval/lib/python3.10/site-packages/sklearn/utils/_fast_dict.cpython-310-x86_64-linux-gnu.so +0 -0
  40. env-llmeval/lib/python3.10/site-packages/sklearn/utils/_isfinite.cpython-310-x86_64-linux-gnu.so +0 -0
  41. env-llmeval/lib/python3.10/site-packages/sklearn/utils/_joblib.py +38 -0
  42. env-llmeval/lib/python3.10/site-packages/sklearn/utils/_mocking.py +400 -0
  43. env-llmeval/lib/python3.10/site-packages/sklearn/utils/_pprint.py +463 -0
  44. env-llmeval/lib/python3.10/site-packages/sklearn/utils/_random.cpython-310-x86_64-linux-gnu.so +0 -0
  45. env-llmeval/lib/python3.10/site-packages/sklearn/utils/_response.py +298 -0
  46. env-llmeval/lib/python3.10/site-packages/sklearn/utils/_show_versions.py +112 -0
  47. env-llmeval/lib/python3.10/site-packages/sklearn/utils/_testing.py +1169 -0
  48. env-llmeval/lib/python3.10/site-packages/sklearn/utils/_weight_vector.cpython-310-x86_64-linux-gnu.so +0 -0
  49. env-llmeval/lib/python3.10/site-packages/sklearn/utils/deprecation.py +116 -0
  50. env-llmeval/lib/python3.10/site-packages/sklearn/utils/discovery.py +265 -0
env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (34.3 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/_arpack.cpython-310.pyc ADDED
Binary file (1.34 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/_array_api.cpython-310.pyc ADDED
Binary file (15.9 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/_available_if.cpython-310.pyc ADDED
Binary file (3.17 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/_bunch.cpython-310.pyc ADDED
Binary file (2.17 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/_encode.cpython-310.pyc ADDED
Binary file (10.2 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/_estimator_html_repr.cpython-310.pyc ADDED
Binary file (15 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/_joblib.cpython-310.pyc ADDED
Binary file (666 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/_mask.cpython-310.pyc ADDED
Binary file (1.66 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/_metadata_requests.cpython-310.pyc ADDED
Binary file (47.5 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/_mocking.cpython-310.pyc ADDED
Binary file (13.5 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/_param_validation.cpython-310.pyc ADDED
Binary file (28.5 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/_plotting.cpython-310.pyc ADDED
Binary file (3.49 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/_pprint.cpython-310.pyc ADDED
Binary file (11.5 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/_response.cpython-310.pyc ADDED
Binary file (10 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/_set_output.cpython-310.pyc ADDED
Binary file (12.7 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/_show_versions.cpython-310.pyc ADDED
Binary file (2.39 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/_tags.cpython-310.pyc ADDED
Binary file (2.01 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/_testing.cpython-310.pyc ADDED
Binary file (33.2 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/class_weight.cpython-310.pyc ADDED
Binary file (6.23 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/deprecation.cpython-310.pyc ADDED
Binary file (3.44 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/discovery.cpython-310.pyc ADDED
Binary file (7.99 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/estimator_checks.cpython-310.pyc ADDED
Binary file (104 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/extmath.cpython-310.pyc ADDED
Binary file (37.4 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/fixes.cpython-310.pyc ADDED
Binary file (9.12 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/graph.cpython-310.pyc ADDED
Binary file (4.86 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/metadata_routing.cpython-310.pyc ADDED
Binary file (821 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/metaestimators.cpython-310.pyc ADDED
Binary file (5.12 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/multiclass.cpython-310.pyc ADDED
Binary file (14.4 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/optimize.cpython-310.pyc ADDED
Binary file (6.48 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/parallel.cpython-310.pyc ADDED
Binary file (4.47 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/random.cpython-310.pyc ADDED
Binary file (2.68 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/sparsefuncs.cpython-310.pyc ADDED
Binary file (21.1 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/stats.cpython-310.pyc ADDED
Binary file (2.13 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/utils/__pycache__/validation.cpython-310.pyc ADDED
Binary file (68.5 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/utils/_cython_blas.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (528 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/utils/_encode.py ADDED
@@ -0,0 +1,367 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections import Counter
2
+ from contextlib import suppress
3
+ from typing import NamedTuple
4
+
5
+ import numpy as np
6
+
7
+ from . import is_scalar_nan
8
+
9
+
10
+ def _unique(values, *, return_inverse=False, return_counts=False):
11
+ """Helper function to find unique values with support for python objects.
12
+
13
+ Uses pure python method for object dtype, and numpy method for
14
+ all other dtypes.
15
+
16
+ Parameters
17
+ ----------
18
+ values : ndarray
19
+ Values to check for unknowns.
20
+
21
+ return_inverse : bool, default=False
22
+ If True, also return the indices of the unique values.
23
+
24
+ return_counts : bool, default=False
25
+ If True, also return the number of times each unique item appears in
26
+ values.
27
+
28
+ Returns
29
+ -------
30
+ unique : ndarray
31
+ The sorted unique values.
32
+
33
+ unique_inverse : ndarray
34
+ The indices to reconstruct the original array from the unique array.
35
+ Only provided if `return_inverse` is True.
36
+
37
+ unique_counts : ndarray
38
+ The number of times each of the unique values comes up in the original
39
+ array. Only provided if `return_counts` is True.
40
+ """
41
+ if values.dtype == object:
42
+ return _unique_python(
43
+ values, return_inverse=return_inverse, return_counts=return_counts
44
+ )
45
+ # numerical
46
+ return _unique_np(
47
+ values, return_inverse=return_inverse, return_counts=return_counts
48
+ )
49
+
50
+
51
+ def _unique_np(values, return_inverse=False, return_counts=False):
52
+ """Helper function to find unique values for numpy arrays that correctly
53
+ accounts for nans. See `_unique` documentation for details."""
54
+ uniques = np.unique(
55
+ values, return_inverse=return_inverse, return_counts=return_counts
56
+ )
57
+
58
+ inverse, counts = None, None
59
+
60
+ if return_counts:
61
+ *uniques, counts = uniques
62
+
63
+ if return_inverse:
64
+ *uniques, inverse = uniques
65
+
66
+ if return_counts or return_inverse:
67
+ uniques = uniques[0]
68
+
69
+ # np.unique will have duplicate missing values at the end of `uniques`
70
+ # here we clip the nans and remove it from uniques
71
+ if uniques.size and is_scalar_nan(uniques[-1]):
72
+ nan_idx = np.searchsorted(uniques, np.nan)
73
+ uniques = uniques[: nan_idx + 1]
74
+ if return_inverse:
75
+ inverse[inverse > nan_idx] = nan_idx
76
+
77
+ if return_counts:
78
+ counts[nan_idx] = np.sum(counts[nan_idx:])
79
+ counts = counts[: nan_idx + 1]
80
+
81
+ ret = (uniques,)
82
+
83
+ if return_inverse:
84
+ ret += (inverse,)
85
+
86
+ if return_counts:
87
+ ret += (counts,)
88
+
89
+ return ret[0] if len(ret) == 1 else ret
90
+
91
+
92
+ class MissingValues(NamedTuple):
93
+ """Data class for missing data information"""
94
+
95
+ nan: bool
96
+ none: bool
97
+
98
+ def to_list(self):
99
+ """Convert tuple to a list where None is always first."""
100
+ output = []
101
+ if self.none:
102
+ output.append(None)
103
+ if self.nan:
104
+ output.append(np.nan)
105
+ return output
106
+
107
+
108
+ def _extract_missing(values):
109
+ """Extract missing values from `values`.
110
+
111
+ Parameters
112
+ ----------
113
+ values: set
114
+ Set of values to extract missing from.
115
+
116
+ Returns
117
+ -------
118
+ output: set
119
+ Set with missing values extracted.
120
+
121
+ missing_values: MissingValues
122
+ Object with missing value information.
123
+ """
124
+ missing_values_set = {
125
+ value for value in values if value is None or is_scalar_nan(value)
126
+ }
127
+
128
+ if not missing_values_set:
129
+ return values, MissingValues(nan=False, none=False)
130
+
131
+ if None in missing_values_set:
132
+ if len(missing_values_set) == 1:
133
+ output_missing_values = MissingValues(nan=False, none=True)
134
+ else:
135
+ # If there is more than one missing value, then it has to be
136
+ # float('nan') or np.nan
137
+ output_missing_values = MissingValues(nan=True, none=True)
138
+ else:
139
+ output_missing_values = MissingValues(nan=True, none=False)
140
+
141
+ # create set without the missing values
142
+ output = values - missing_values_set
143
+ return output, output_missing_values
144
+
145
+
146
+ class _nandict(dict):
147
+ """Dictionary with support for nans."""
148
+
149
+ def __init__(self, mapping):
150
+ super().__init__(mapping)
151
+ for key, value in mapping.items():
152
+ if is_scalar_nan(key):
153
+ self.nan_value = value
154
+ break
155
+
156
+ def __missing__(self, key):
157
+ if hasattr(self, "nan_value") and is_scalar_nan(key):
158
+ return self.nan_value
159
+ raise KeyError(key)
160
+
161
+
162
+ def _map_to_integer(values, uniques):
163
+ """Map values based on its position in uniques."""
164
+ table = _nandict({val: i for i, val in enumerate(uniques)})
165
+ return np.array([table[v] for v in values])
166
+
167
+
168
+ def _unique_python(values, *, return_inverse, return_counts):
169
+ # Only used in `_uniques`, see docstring there for details
170
+ try:
171
+ uniques_set = set(values)
172
+ uniques_set, missing_values = _extract_missing(uniques_set)
173
+
174
+ uniques = sorted(uniques_set)
175
+ uniques.extend(missing_values.to_list())
176
+ uniques = np.array(uniques, dtype=values.dtype)
177
+ except TypeError:
178
+ types = sorted(t.__qualname__ for t in set(type(v) for v in values))
179
+ raise TypeError(
180
+ "Encoders require their input argument must be uniformly "
181
+ f"strings or numbers. Got {types}"
182
+ )
183
+ ret = (uniques,)
184
+
185
+ if return_inverse:
186
+ ret += (_map_to_integer(values, uniques),)
187
+
188
+ if return_counts:
189
+ ret += (_get_counts(values, uniques),)
190
+
191
+ return ret[0] if len(ret) == 1 else ret
192
+
193
+
194
+ def _encode(values, *, uniques, check_unknown=True):
195
+ """Helper function to encode values into [0, n_uniques - 1].
196
+
197
+ Uses pure python method for object dtype, and numpy method for
198
+ all other dtypes.
199
+ The numpy method has the limitation that the `uniques` need to
200
+ be sorted. Importantly, this is not checked but assumed to already be
201
+ the case. The calling method needs to ensure this for all non-object
202
+ values.
203
+
204
+ Parameters
205
+ ----------
206
+ values : ndarray
207
+ Values to encode.
208
+ uniques : ndarray
209
+ The unique values in `values`. If the dtype is not object, then
210
+ `uniques` needs to be sorted.
211
+ check_unknown : bool, default=True
212
+ If True, check for values in `values` that are not in `unique`
213
+ and raise an error. This is ignored for object dtype, and treated as
214
+ True in this case. This parameter is useful for
215
+ _BaseEncoder._transform() to avoid calling _check_unknown()
216
+ twice.
217
+
218
+ Returns
219
+ -------
220
+ encoded : ndarray
221
+ Encoded values
222
+ """
223
+ if values.dtype.kind in "OUS":
224
+ try:
225
+ return _map_to_integer(values, uniques)
226
+ except KeyError as e:
227
+ raise ValueError(f"y contains previously unseen labels: {str(e)}")
228
+ else:
229
+ if check_unknown:
230
+ diff = _check_unknown(values, uniques)
231
+ if diff:
232
+ raise ValueError(f"y contains previously unseen labels: {str(diff)}")
233
+ return np.searchsorted(uniques, values)
234
+
235
+
236
+ def _check_unknown(values, known_values, return_mask=False):
237
+ """
238
+ Helper function to check for unknowns in values to be encoded.
239
+
240
+ Uses pure python method for object dtype, and numpy method for
241
+ all other dtypes.
242
+
243
+ Parameters
244
+ ----------
245
+ values : array
246
+ Values to check for unknowns.
247
+ known_values : array
248
+ Known values. Must be unique.
249
+ return_mask : bool, default=False
250
+ If True, return a mask of the same shape as `values` indicating
251
+ the valid values.
252
+
253
+ Returns
254
+ -------
255
+ diff : list
256
+ The unique values present in `values` and not in `know_values`.
257
+ valid_mask : boolean array
258
+ Additionally returned if ``return_mask=True``.
259
+
260
+ """
261
+ valid_mask = None
262
+
263
+ if values.dtype.kind in "OUS":
264
+ values_set = set(values)
265
+ values_set, missing_in_values = _extract_missing(values_set)
266
+
267
+ uniques_set = set(known_values)
268
+ uniques_set, missing_in_uniques = _extract_missing(uniques_set)
269
+ diff = values_set - uniques_set
270
+
271
+ nan_in_diff = missing_in_values.nan and not missing_in_uniques.nan
272
+ none_in_diff = missing_in_values.none and not missing_in_uniques.none
273
+
274
+ def is_valid(value):
275
+ return (
276
+ value in uniques_set
277
+ or missing_in_uniques.none
278
+ and value is None
279
+ or missing_in_uniques.nan
280
+ and is_scalar_nan(value)
281
+ )
282
+
283
+ if return_mask:
284
+ if diff or nan_in_diff or none_in_diff:
285
+ valid_mask = np.array([is_valid(value) for value in values])
286
+ else:
287
+ valid_mask = np.ones(len(values), dtype=bool)
288
+
289
+ diff = list(diff)
290
+ if none_in_diff:
291
+ diff.append(None)
292
+ if nan_in_diff:
293
+ diff.append(np.nan)
294
+ else:
295
+ unique_values = np.unique(values)
296
+ diff = np.setdiff1d(unique_values, known_values, assume_unique=True)
297
+ if return_mask:
298
+ if diff.size:
299
+ valid_mask = np.isin(values, known_values)
300
+ else:
301
+ valid_mask = np.ones(len(values), dtype=bool)
302
+
303
+ # check for nans in the known_values
304
+ if np.isnan(known_values).any():
305
+ diff_is_nan = np.isnan(diff)
306
+ if diff_is_nan.any():
307
+ # removes nan from valid_mask
308
+ if diff.size and return_mask:
309
+ is_nan = np.isnan(values)
310
+ valid_mask[is_nan] = 1
311
+
312
+ # remove nan from diff
313
+ diff = diff[~diff_is_nan]
314
+ diff = list(diff)
315
+
316
+ if return_mask:
317
+ return diff, valid_mask
318
+ return diff
319
+
320
+
321
+ class _NaNCounter(Counter):
322
+ """Counter with support for nan values."""
323
+
324
+ def __init__(self, items):
325
+ super().__init__(self._generate_items(items))
326
+
327
+ def _generate_items(self, items):
328
+ """Generate items without nans. Stores the nan counts separately."""
329
+ for item in items:
330
+ if not is_scalar_nan(item):
331
+ yield item
332
+ continue
333
+ if not hasattr(self, "nan_count"):
334
+ self.nan_count = 0
335
+ self.nan_count += 1
336
+
337
+ def __missing__(self, key):
338
+ if hasattr(self, "nan_count") and is_scalar_nan(key):
339
+ return self.nan_count
340
+ raise KeyError(key)
341
+
342
+
343
+ def _get_counts(values, uniques):
344
+ """Get the count of each of the `uniques` in `values`.
345
+
346
+ The counts will use the order passed in by `uniques`. For non-object dtypes,
347
+ `uniques` is assumed to be sorted and `np.nan` is at the end.
348
+ """
349
+ if values.dtype.kind in "OU":
350
+ counter = _NaNCounter(values)
351
+ output = np.zeros(len(uniques), dtype=np.int64)
352
+ for i, item in enumerate(uniques):
353
+ with suppress(KeyError):
354
+ output[i] = counter[item]
355
+ return output
356
+
357
+ unique_values, counts = _unique_np(values, return_counts=True)
358
+
359
+ # Recorder unique_values based on input: `uniques`
360
+ uniques_in_values = np.isin(uniques, unique_values, assume_unique=True)
361
+ if np.isnan(unique_values[-1]) and np.isnan(uniques[-1]):
362
+ uniques_in_values[-1] = True
363
+
364
+ unique_valid_indices = np.searchsorted(unique_values, uniques[uniques_in_values])
365
+ output = np.zeros_like(uniques, dtype=np.int64)
366
+ output[uniques_in_values] = counts[unique_valid_indices]
367
+ return output
env-llmeval/lib/python3.10/site-packages/sklearn/utils/_estimator_html_repr.py ADDED
@@ -0,0 +1,496 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import html
2
+ import itertools
3
+ from contextlib import closing
4
+ from inspect import isclass
5
+ from io import StringIO
6
+ from pathlib import Path
7
+ from string import Template
8
+
9
+ from .. import __version__, config_context
10
+ from .fixes import parse_version
11
+
12
+
13
+ class _IDCounter:
14
+ """Generate sequential ids with a prefix."""
15
+
16
+ def __init__(self, prefix):
17
+ self.prefix = prefix
18
+ self.count = 0
19
+
20
+ def get_id(self):
21
+ self.count += 1
22
+ return f"{self.prefix}-{self.count}"
23
+
24
+
25
+ def _get_css_style():
26
+ return Path(__file__).with_suffix(".css").read_text(encoding="utf-8")
27
+
28
+
29
+ _CONTAINER_ID_COUNTER = _IDCounter("sk-container-id")
30
+ _ESTIMATOR_ID_COUNTER = _IDCounter("sk-estimator-id")
31
+ _CSS_STYLE = _get_css_style()
32
+
33
+
34
+ class _VisualBlock:
35
+ """HTML Representation of Estimator
36
+
37
+ Parameters
38
+ ----------
39
+ kind : {'serial', 'parallel', 'single'}
40
+ kind of HTML block
41
+
42
+ estimators : list of estimators or `_VisualBlock`s or a single estimator
43
+ If kind != 'single', then `estimators` is a list of
44
+ estimators.
45
+ If kind == 'single', then `estimators` is a single estimator.
46
+
47
+ names : list of str, default=None
48
+ If kind != 'single', then `names` corresponds to estimators.
49
+ If kind == 'single', then `names` is a single string corresponding to
50
+ the single estimator.
51
+
52
+ name_details : list of str, str, or None, default=None
53
+ If kind != 'single', then `name_details` corresponds to `names`.
54
+ If kind == 'single', then `name_details` is a single string
55
+ corresponding to the single estimator.
56
+
57
+ dash_wrapped : bool, default=True
58
+ If true, wrapped HTML element will be wrapped with a dashed border.
59
+ Only active when kind != 'single'.
60
+ """
61
+
62
+ def __init__(
63
+ self, kind, estimators, *, names=None, name_details=None, dash_wrapped=True
64
+ ):
65
+ self.kind = kind
66
+ self.estimators = estimators
67
+ self.dash_wrapped = dash_wrapped
68
+
69
+ if self.kind in ("parallel", "serial"):
70
+ if names is None:
71
+ names = (None,) * len(estimators)
72
+ if name_details is None:
73
+ name_details = (None,) * len(estimators)
74
+
75
+ self.names = names
76
+ self.name_details = name_details
77
+
78
+ def _sk_visual_block_(self):
79
+ return self
80
+
81
+
82
+ def _write_label_html(
83
+ out,
84
+ name,
85
+ name_details,
86
+ outer_class="sk-label-container",
87
+ inner_class="sk-label",
88
+ checked=False,
89
+ doc_link="",
90
+ is_fitted_css_class="",
91
+ is_fitted_icon="",
92
+ ):
93
+ """Write labeled html with or without a dropdown with named details.
94
+
95
+ Parameters
96
+ ----------
97
+ out : file-like object
98
+ The file to write the HTML representation to.
99
+ name : str
100
+ The label for the estimator. It corresponds either to the estimator class name
101
+ for a simple estimator or in the case of a `Pipeline` and `ColumnTransformer`,
102
+ it corresponds to the name of the step.
103
+ name_details : str
104
+ The details to show as content in the dropdown part of the toggleable label. It
105
+ can contain information such as non-default parameters or column information for
106
+ `ColumnTransformer`.
107
+ outer_class : {"sk-label-container", "sk-item"}, default="sk-label-container"
108
+ The CSS class for the outer container.
109
+ inner_class : {"sk-label", "sk-estimator"}, default="sk-label"
110
+ The CSS class for the inner container.
111
+ checked : bool, default=False
112
+ Whether the dropdown is folded or not. With a single estimator, we intend to
113
+ unfold the content.
114
+ doc_link : str, default=""
115
+ The link to the documentation for the estimator. If an empty string, no link is
116
+ added to the diagram. This can be generated for an estimator if it uses the
117
+ `_HTMLDocumentationLinkMixin`.
118
+ is_fitted_css_class : {"", "fitted"}
119
+ The CSS class to indicate whether or not the estimator is fitted. The
120
+ empty string means that the estimator is not fitted and "fitted" means that the
121
+ estimator is fitted.
122
+ is_fitted_icon : str, default=""
123
+ The HTML representation to show the fitted information in the diagram. An empty
124
+ string means that no information is shown.
125
+ """
126
+ # we need to add some padding to the left of the label to be sure it is centered
127
+ padding_label = " " if is_fitted_icon else "" # add padding for the "i" char
128
+
129
+ out.write(
130
+ f'<div class="{outer_class}"><div'
131
+ f' class="{inner_class} {is_fitted_css_class} sk-toggleable">'
132
+ )
133
+ name = html.escape(name)
134
+
135
+ if name_details is not None:
136
+ name_details = html.escape(str(name_details))
137
+ label_class = (
138
+ f"sk-toggleable__label {is_fitted_css_class} sk-toggleable__label-arrow"
139
+ )
140
+
141
+ checked_str = "checked" if checked else ""
142
+ est_id = _ESTIMATOR_ID_COUNTER.get_id()
143
+
144
+ if doc_link:
145
+ doc_label = "<span>Online documentation</span>"
146
+ if name is not None:
147
+ doc_label = f"<span>Documentation for {name}</span>"
148
+ doc_link = (
149
+ f'<a class="sk-estimator-doc-link {is_fitted_css_class}"'
150
+ f' rel="noreferrer" target="_blank" href="{doc_link}">?{doc_label}</a>'
151
+ )
152
+ padding_label += "&nbsp;" # add additional padding for the "?" char
153
+
154
+ fmt_str = (
155
+ '<input class="sk-toggleable__control sk-hidden--visually"'
156
+ f' id="{est_id}" '
157
+ f'type="checkbox" {checked_str}><label for="{est_id}" '
158
+ f'class="{label_class} {is_fitted_css_class}">{padding_label}{name}'
159
+ f"{doc_link}{is_fitted_icon}</label><div "
160
+ f'class="sk-toggleable__content {is_fitted_css_class}">'
161
+ f"<pre>{name_details}</pre></div> "
162
+ )
163
+ out.write(fmt_str)
164
+ else:
165
+ out.write(f"<label>{name}</label>")
166
+ out.write("</div></div>") # outer_class inner_class
167
+
168
+
169
+ def _get_visual_block(estimator):
170
+ """Generate information about how to display an estimator."""
171
+ if hasattr(estimator, "_sk_visual_block_"):
172
+ try:
173
+ return estimator._sk_visual_block_()
174
+ except Exception:
175
+ return _VisualBlock(
176
+ "single",
177
+ estimator,
178
+ names=estimator.__class__.__name__,
179
+ name_details=str(estimator),
180
+ )
181
+
182
+ if isinstance(estimator, str):
183
+ return _VisualBlock(
184
+ "single", estimator, names=estimator, name_details=estimator
185
+ )
186
+ elif estimator is None:
187
+ return _VisualBlock("single", estimator, names="None", name_details="None")
188
+
189
+ # check if estimator looks like a meta estimator (wraps estimators)
190
+ if hasattr(estimator, "get_params") and not isclass(estimator):
191
+ estimators = [
192
+ (key, est)
193
+ for key, est in estimator.get_params(deep=False).items()
194
+ if hasattr(est, "get_params") and hasattr(est, "fit") and not isclass(est)
195
+ ]
196
+ if estimators:
197
+ return _VisualBlock(
198
+ "parallel",
199
+ [est for _, est in estimators],
200
+ names=[f"{key}: {est.__class__.__name__}" for key, est in estimators],
201
+ name_details=[str(est) for _, est in estimators],
202
+ )
203
+
204
+ return _VisualBlock(
205
+ "single",
206
+ estimator,
207
+ names=estimator.__class__.__name__,
208
+ name_details=str(estimator),
209
+ )
210
+
211
+
212
+ def _write_estimator_html(
213
+ out,
214
+ estimator,
215
+ estimator_label,
216
+ estimator_label_details,
217
+ is_fitted_css_class,
218
+ is_fitted_icon="",
219
+ first_call=False,
220
+ ):
221
+ """Write estimator to html in serial, parallel, or by itself (single).
222
+
223
+ For multiple estimators, this function is called recursively.
224
+
225
+ Parameters
226
+ ----------
227
+ out : file-like object
228
+ The file to write the HTML representation to.
229
+ estimator : estimator object
230
+ The estimator to visualize.
231
+ estimator_label : str
232
+ The label for the estimator. It corresponds either to the estimator class name
233
+ for simple estimator or in the case of `Pipeline` and `ColumnTransformer`, it
234
+ corresponds to the name of the step.
235
+ estimator_label_details : str
236
+ The details to show as content in the dropdown part of the toggleable label.
237
+ It can contain information as non-default parameters or column information for
238
+ `ColumnTransformer`.
239
+ is_fitted_css_class : {"", "fitted"}
240
+ The CSS class to indicate whether or not the estimator is fitted or not. The
241
+ empty string means that the estimator is not fitted and "fitted" means that the
242
+ estimator is fitted.
243
+ is_fitted_icon : str, default=""
244
+ The HTML representation to show the fitted information in the diagram. An empty
245
+ string means that no information is shown. If the estimator to be shown is not
246
+ the first estimator (i.e. `first_call=False`), `is_fitted_icon` is always an
247
+ empty string.
248
+ first_call : bool, default=False
249
+ Whether this is the first time this function is called.
250
+ """
251
+ if first_call:
252
+ est_block = _get_visual_block(estimator)
253
+ else:
254
+ is_fitted_icon = ""
255
+ with config_context(print_changed_only=True):
256
+ est_block = _get_visual_block(estimator)
257
+ # `estimator` can also be an instance of `_VisualBlock`
258
+ if hasattr(estimator, "_get_doc_link"):
259
+ doc_link = estimator._get_doc_link()
260
+ else:
261
+ doc_link = ""
262
+ if est_block.kind in ("serial", "parallel"):
263
+ dashed_wrapped = first_call or est_block.dash_wrapped
264
+ dash_cls = " sk-dashed-wrapped" if dashed_wrapped else ""
265
+ out.write(f'<div class="sk-item{dash_cls}">')
266
+
267
+ if estimator_label:
268
+ _write_label_html(
269
+ out,
270
+ estimator_label,
271
+ estimator_label_details,
272
+ doc_link=doc_link,
273
+ is_fitted_css_class=is_fitted_css_class,
274
+ is_fitted_icon=is_fitted_icon,
275
+ )
276
+
277
+ kind = est_block.kind
278
+ out.write(f'<div class="sk-{kind}">')
279
+ est_infos = zip(est_block.estimators, est_block.names, est_block.name_details)
280
+
281
+ for est, name, name_details in est_infos:
282
+ if kind == "serial":
283
+ _write_estimator_html(
284
+ out,
285
+ est,
286
+ name,
287
+ name_details,
288
+ is_fitted_css_class=is_fitted_css_class,
289
+ )
290
+ else: # parallel
291
+ out.write('<div class="sk-parallel-item">')
292
+ # wrap element in a serial visualblock
293
+ serial_block = _VisualBlock("serial", [est], dash_wrapped=False)
294
+ _write_estimator_html(
295
+ out,
296
+ serial_block,
297
+ name,
298
+ name_details,
299
+ is_fitted_css_class=is_fitted_css_class,
300
+ )
301
+ out.write("</div>") # sk-parallel-item
302
+
303
+ out.write("</div></div>")
304
+ elif est_block.kind == "single":
305
+ _write_label_html(
306
+ out,
307
+ est_block.names,
308
+ est_block.name_details,
309
+ outer_class="sk-item",
310
+ inner_class="sk-estimator",
311
+ checked=first_call,
312
+ doc_link=doc_link,
313
+ is_fitted_css_class=is_fitted_css_class,
314
+ is_fitted_icon=is_fitted_icon,
315
+ )
316
+
317
+
318
+ def estimator_html_repr(estimator):
319
+ """Build a HTML representation of an estimator.
320
+
321
+ Read more in the :ref:`User Guide <visualizing_composite_estimators>`.
322
+
323
+ Parameters
324
+ ----------
325
+ estimator : estimator object
326
+ The estimator to visualize.
327
+
328
+ Returns
329
+ -------
330
+ html: str
331
+ HTML representation of estimator.
332
+
333
+ Examples
334
+ --------
335
+ >>> from sklearn.utils._estimator_html_repr import estimator_html_repr
336
+ >>> from sklearn.linear_model import LogisticRegression
337
+ >>> estimator_html_repr(LogisticRegression())
338
+ '<style>...</div>'
339
+ """
340
+ from sklearn.exceptions import NotFittedError
341
+ from sklearn.utils.validation import check_is_fitted
342
+
343
+ if not hasattr(estimator, "fit"):
344
+ status_label = "<span>Not fitted</span>"
345
+ is_fitted_css_class = ""
346
+ else:
347
+ try:
348
+ check_is_fitted(estimator)
349
+ status_label = "<span>Fitted</span>"
350
+ is_fitted_css_class = "fitted"
351
+ except NotFittedError:
352
+ status_label = "<span>Not fitted</span>"
353
+ is_fitted_css_class = ""
354
+
355
+ is_fitted_icon = (
356
+ f'<span class="sk-estimator-doc-link {is_fitted_css_class}">'
357
+ f"i{status_label}</span>"
358
+ )
359
+ with closing(StringIO()) as out:
360
+ container_id = _CONTAINER_ID_COUNTER.get_id()
361
+ style_template = Template(_CSS_STYLE)
362
+ style_with_id = style_template.substitute(id=container_id)
363
+ estimator_str = str(estimator)
364
+
365
+ # The fallback message is shown by default and loading the CSS sets
366
+ # div.sk-text-repr-fallback to display: none to hide the fallback message.
367
+ #
368
+ # If the notebook is trusted, the CSS is loaded which hides the fallback
369
+ # message. If the notebook is not trusted, then the CSS is not loaded and the
370
+ # fallback message is shown by default.
371
+ #
372
+ # The reverse logic applies to HTML repr div.sk-container.
373
+ # div.sk-container is hidden by default and the loading the CSS displays it.
374
+ fallback_msg = (
375
+ "In a Jupyter environment, please rerun this cell to show the HTML"
376
+ " representation or trust the notebook. <br />On GitHub, the"
377
+ " HTML representation is unable to render, please try loading this page"
378
+ " with nbviewer.org."
379
+ )
380
+ html_template = (
381
+ f"<style>{style_with_id}</style>"
382
+ f'<div id="{container_id}" class="sk-top-container">'
383
+ '<div class="sk-text-repr-fallback">'
384
+ f"<pre>{html.escape(estimator_str)}</pre><b>{fallback_msg}</b>"
385
+ "</div>"
386
+ '<div class="sk-container" hidden>'
387
+ )
388
+
389
+ out.write(html_template)
390
+
391
+ _write_estimator_html(
392
+ out,
393
+ estimator,
394
+ estimator.__class__.__name__,
395
+ estimator_str,
396
+ first_call=True,
397
+ is_fitted_css_class=is_fitted_css_class,
398
+ is_fitted_icon=is_fitted_icon,
399
+ )
400
+ out.write("</div></div>")
401
+
402
+ html_output = out.getvalue()
403
+ return html_output
404
+
405
+
406
+ class _HTMLDocumentationLinkMixin:
407
+ """Mixin class allowing to generate a link to the API documentation.
408
+
409
+ This mixin relies on three attributes:
410
+ - `_doc_link_module`: it corresponds to the root module (e.g. `sklearn`). Using this
411
+ mixin, the default value is `sklearn`.
412
+ - `_doc_link_template`: it corresponds to the template used to generate the
413
+ link to the API documentation. Using this mixin, the default value is
414
+ `"https://scikit-learn.org/{version_url}/modules/generated/
415
+ {estimator_module}.{estimator_name}.html"`.
416
+ - `_doc_link_url_param_generator`: it corresponds to a function that generates the
417
+ parameters to be used in the template when the estimator module and name are not
418
+ sufficient.
419
+
420
+ The method :meth:`_get_doc_link` generates the link to the API documentation for a
421
+ given estimator.
422
+
423
+ This useful provides all the necessary states for
424
+ :func:`sklearn.utils.estimator_html_repr` to generate a link to the API
425
+ documentation for the estimator HTML diagram.
426
+
427
+ Examples
428
+ --------
429
+ If the default values for `_doc_link_module`, `_doc_link_template` are not suitable,
430
+ then you can override them:
431
+ >>> from sklearn.base import BaseEstimator
432
+ >>> estimator = BaseEstimator()
433
+ >>> estimator._doc_link_template = "https://website.com/{single_param}.html"
434
+ >>> def url_param_generator(estimator):
435
+ ... return {"single_param": estimator.__class__.__name__}
436
+ >>> estimator._doc_link_url_param_generator = url_param_generator
437
+ >>> estimator._get_doc_link()
438
+ 'https://website.com/BaseEstimator.html'
439
+ """
440
+
441
+ _doc_link_module = "sklearn"
442
+ _doc_link_url_param_generator = None
443
+
444
+ @property
445
+ def _doc_link_template(self):
446
+ sklearn_version = parse_version(__version__)
447
+ if sklearn_version.dev is None:
448
+ version_url = f"{sklearn_version.major}.{sklearn_version.minor}"
449
+ else:
450
+ version_url = "dev"
451
+ return getattr(
452
+ self,
453
+ "__doc_link_template",
454
+ (
455
+ f"https://scikit-learn.org/{version_url}/modules/generated/"
456
+ "{estimator_module}.{estimator_name}.html"
457
+ ),
458
+ )
459
+
460
+ @_doc_link_template.setter
461
+ def _doc_link_template(self, value):
462
+ setattr(self, "__doc_link_template", value)
463
+
464
+ def _get_doc_link(self):
465
+ """Generates a link to the API documentation for a given estimator.
466
+
467
+ This method generates the link to the estimator's documentation page
468
+ by using the template defined by the attribute `_doc_link_template`.
469
+
470
+ Returns
471
+ -------
472
+ url : str
473
+ The URL to the API documentation for this estimator. If the estimator does
474
+ not belong to module `_doc_link_module`, the empty string (i.e. `""`) is
475
+ returned.
476
+ """
477
+ if self.__class__.__module__.split(".")[0] != self._doc_link_module:
478
+ return ""
479
+
480
+ if self._doc_link_url_param_generator is None:
481
+ estimator_name = self.__class__.__name__
482
+ # Construct the estimator's module name, up to the first private submodule.
483
+ # This works because in scikit-learn all public estimators are exposed at
484
+ # that level, even if they actually live in a private sub-module.
485
+ estimator_module = ".".join(
486
+ itertools.takewhile(
487
+ lambda part: not part.startswith("_"),
488
+ self.__class__.__module__.split("."),
489
+ )
490
+ )
491
+ return self._doc_link_template.format(
492
+ estimator_module=estimator_module, estimator_name=estimator_name
493
+ )
494
+ return self._doc_link_template.format(
495
+ **self._doc_link_url_param_generator(self)
496
+ )
env-llmeval/lib/python3.10/site-packages/sklearn/utils/_fast_dict.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (288 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/utils/_isfinite.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (287 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/utils/_joblib.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import warnings as _warnings
2
+
3
+ with _warnings.catch_warnings():
4
+ _warnings.simplefilter("ignore")
5
+ # joblib imports may raise DeprecationWarning on certain Python
6
+ # versions
7
+ import joblib
8
+ from joblib import (
9
+ Memory,
10
+ Parallel,
11
+ __version__,
12
+ cpu_count,
13
+ delayed,
14
+ dump,
15
+ effective_n_jobs,
16
+ hash,
17
+ load,
18
+ logger,
19
+ parallel_backend,
20
+ register_parallel_backend,
21
+ )
22
+
23
+
24
+ __all__ = [
25
+ "parallel_backend",
26
+ "register_parallel_backend",
27
+ "cpu_count",
28
+ "Parallel",
29
+ "Memory",
30
+ "delayed",
31
+ "effective_n_jobs",
32
+ "hash",
33
+ "logger",
34
+ "dump",
35
+ "load",
36
+ "joblib",
37
+ "__version__",
38
+ ]
env-llmeval/lib/python3.10/site-packages/sklearn/utils/_mocking.py ADDED
@@ -0,0 +1,400 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+
3
+ from ..base import BaseEstimator, ClassifierMixin
4
+ from ..utils._metadata_requests import RequestMethod
5
+ from .metaestimators import available_if
6
+ from .validation import _check_sample_weight, _num_samples, check_array, check_is_fitted
7
+
8
+
9
+ class ArraySlicingWrapper:
10
+ """
11
+ Parameters
12
+ ----------
13
+ array
14
+ """
15
+
16
+ def __init__(self, array):
17
+ self.array = array
18
+
19
+ def __getitem__(self, aslice):
20
+ return MockDataFrame(self.array[aslice])
21
+
22
+
23
+ class MockDataFrame:
24
+ """
25
+ Parameters
26
+ ----------
27
+ array
28
+ """
29
+
30
+ # have shape and length but don't support indexing.
31
+
32
+ def __init__(self, array):
33
+ self.array = array
34
+ self.values = array
35
+ self.shape = array.shape
36
+ self.ndim = array.ndim
37
+ # ugly hack to make iloc work.
38
+ self.iloc = ArraySlicingWrapper(array)
39
+
40
+ def __len__(self):
41
+ return len(self.array)
42
+
43
+ def __array__(self, dtype=None):
44
+ # Pandas data frames also are array-like: we want to make sure that
45
+ # input validation in cross-validation does not try to call that
46
+ # method.
47
+ return self.array
48
+
49
+ def __eq__(self, other):
50
+ return MockDataFrame(self.array == other.array)
51
+
52
+ def __ne__(self, other):
53
+ return not self == other
54
+
55
+ def take(self, indices, axis=0):
56
+ return MockDataFrame(self.array.take(indices, axis=axis))
57
+
58
+
59
+ class CheckingClassifier(ClassifierMixin, BaseEstimator):
60
+ """Dummy classifier to test pipelining and meta-estimators.
61
+
62
+ Checks some property of `X` and `y`in fit / predict.
63
+ This allows testing whether pipelines / cross-validation or metaestimators
64
+ changed the input.
65
+
66
+ Can also be used to check if `fit_params` are passed correctly, and
67
+ to force a certain score to be returned.
68
+
69
+ Parameters
70
+ ----------
71
+ check_y, check_X : callable, default=None
72
+ The callable used to validate `X` and `y`. These callable should return
73
+ a bool where `False` will trigger an `AssertionError`. If `None`, the
74
+ data is not validated. Default is `None`.
75
+
76
+ check_y_params, check_X_params : dict, default=None
77
+ The optional parameters to pass to `check_X` and `check_y`. If `None`,
78
+ then no parameters are passed in.
79
+
80
+ methods_to_check : "all" or list of str, default="all"
81
+ The methods in which the checks should be applied. By default,
82
+ all checks will be done on all methods (`fit`, `predict`,
83
+ `predict_proba`, `decision_function` and `score`).
84
+
85
+ foo_param : int, default=0
86
+ A `foo` param. When `foo > 1`, the output of :meth:`score` will be 1
87
+ otherwise it is 0.
88
+
89
+ expected_sample_weight : bool, default=False
90
+ Whether to check if a valid `sample_weight` was passed to `fit`.
91
+
92
+ expected_fit_params : list of str, default=None
93
+ A list of the expected parameters given when calling `fit`.
94
+
95
+ Attributes
96
+ ----------
97
+ classes_ : int
98
+ The classes seen during `fit`.
99
+
100
+ n_features_in_ : int
101
+ The number of features seen during `fit`.
102
+
103
+ Examples
104
+ --------
105
+ >>> from sklearn.utils._mocking import CheckingClassifier
106
+
107
+ This helper allow to assert to specificities regarding `X` or `y`. In this
108
+ case we expect `check_X` or `check_y` to return a boolean.
109
+
110
+ >>> from sklearn.datasets import load_iris
111
+ >>> X, y = load_iris(return_X_y=True)
112
+ >>> clf = CheckingClassifier(check_X=lambda x: x.shape == (150, 4))
113
+ >>> clf.fit(X, y)
114
+ CheckingClassifier(...)
115
+
116
+ We can also provide a check which might raise an error. In this case, we
117
+ expect `check_X` to return `X` and `check_y` to return `y`.
118
+
119
+ >>> from sklearn.utils import check_array
120
+ >>> clf = CheckingClassifier(check_X=check_array)
121
+ >>> clf.fit(X, y)
122
+ CheckingClassifier(...)
123
+ """
124
+
125
+ def __init__(
126
+ self,
127
+ *,
128
+ check_y=None,
129
+ check_y_params=None,
130
+ check_X=None,
131
+ check_X_params=None,
132
+ methods_to_check="all",
133
+ foo_param=0,
134
+ expected_sample_weight=None,
135
+ expected_fit_params=None,
136
+ ):
137
+ self.check_y = check_y
138
+ self.check_y_params = check_y_params
139
+ self.check_X = check_X
140
+ self.check_X_params = check_X_params
141
+ self.methods_to_check = methods_to_check
142
+ self.foo_param = foo_param
143
+ self.expected_sample_weight = expected_sample_weight
144
+ self.expected_fit_params = expected_fit_params
145
+
146
+ def _check_X_y(self, X, y=None, should_be_fitted=True):
147
+ """Validate X and y and make extra check.
148
+
149
+ Parameters
150
+ ----------
151
+ X : array-like of shape (n_samples, n_features)
152
+ The data set.
153
+ `X` is checked only if `check_X` is not `None` (default is None).
154
+ y : array-like of shape (n_samples), default=None
155
+ The corresponding target, by default `None`.
156
+ `y` is checked only if `check_y` is not `None` (default is None).
157
+ should_be_fitted : bool, default=True
158
+ Whether or not the classifier should be already fitted.
159
+ By default True.
160
+
161
+ Returns
162
+ -------
163
+ X, y
164
+ """
165
+ if should_be_fitted:
166
+ check_is_fitted(self)
167
+ if self.check_X is not None:
168
+ params = {} if self.check_X_params is None else self.check_X_params
169
+ checked_X = self.check_X(X, **params)
170
+ if isinstance(checked_X, (bool, np.bool_)):
171
+ assert checked_X
172
+ else:
173
+ X = checked_X
174
+ if y is not None and self.check_y is not None:
175
+ params = {} if self.check_y_params is None else self.check_y_params
176
+ checked_y = self.check_y(y, **params)
177
+ if isinstance(checked_y, (bool, np.bool_)):
178
+ assert checked_y
179
+ else:
180
+ y = checked_y
181
+ return X, y
182
+
183
+ def fit(self, X, y, sample_weight=None, **fit_params):
184
+ """Fit classifier.
185
+
186
+ Parameters
187
+ ----------
188
+ X : array-like of shape (n_samples, n_features)
189
+ Training vector, where `n_samples` is the number of samples and
190
+ `n_features` is the number of features.
191
+
192
+ y : array-like of shape (n_samples, n_outputs) or (n_samples,), \
193
+ default=None
194
+ Target relative to X for classification or regression;
195
+ None for unsupervised learning.
196
+
197
+ sample_weight : array-like of shape (n_samples,), default=None
198
+ Sample weights. If None, then samples are equally weighted.
199
+
200
+ **fit_params : dict of string -> object
201
+ Parameters passed to the ``fit`` method of the estimator
202
+
203
+ Returns
204
+ -------
205
+ self
206
+ """
207
+ assert _num_samples(X) == _num_samples(y)
208
+ if self.methods_to_check == "all" or "fit" in self.methods_to_check:
209
+ X, y = self._check_X_y(X, y, should_be_fitted=False)
210
+ self.n_features_in_ = np.shape(X)[1]
211
+ self.classes_ = np.unique(check_array(y, ensure_2d=False, allow_nd=True))
212
+ if self.expected_fit_params:
213
+ missing = set(self.expected_fit_params) - set(fit_params)
214
+ if missing:
215
+ raise AssertionError(
216
+ f"Expected fit parameter(s) {list(missing)} not seen."
217
+ )
218
+ for key, value in fit_params.items():
219
+ if _num_samples(value) != _num_samples(X):
220
+ raise AssertionError(
221
+ f"Fit parameter {key} has length {_num_samples(value)}"
222
+ f"; expected {_num_samples(X)}."
223
+ )
224
+ if self.expected_sample_weight:
225
+ if sample_weight is None:
226
+ raise AssertionError("Expected sample_weight to be passed")
227
+ _check_sample_weight(sample_weight, X)
228
+
229
+ return self
230
+
231
+ def predict(self, X):
232
+ """Predict the first class seen in `classes_`.
233
+
234
+ Parameters
235
+ ----------
236
+ X : array-like of shape (n_samples, n_features)
237
+ The input data.
238
+
239
+ Returns
240
+ -------
241
+ preds : ndarray of shape (n_samples,)
242
+ Predictions of the first class seens in `classes_`.
243
+ """
244
+ if self.methods_to_check == "all" or "predict" in self.methods_to_check:
245
+ X, y = self._check_X_y(X)
246
+ return self.classes_[np.zeros(_num_samples(X), dtype=int)]
247
+
248
+ def predict_proba(self, X):
249
+ """Predict probabilities for each class.
250
+
251
+ Here, the dummy classifier will provide a probability of 1 for the
252
+ first class of `classes_` and 0 otherwise.
253
+
254
+ Parameters
255
+ ----------
256
+ X : array-like of shape (n_samples, n_features)
257
+ The input data.
258
+
259
+ Returns
260
+ -------
261
+ proba : ndarray of shape (n_samples, n_classes)
262
+ The probabilities for each sample and class.
263
+ """
264
+ if self.methods_to_check == "all" or "predict_proba" in self.methods_to_check:
265
+ X, y = self._check_X_y(X)
266
+ proba = np.zeros((_num_samples(X), len(self.classes_)))
267
+ proba[:, 0] = 1
268
+ return proba
269
+
270
+ def decision_function(self, X):
271
+ """Confidence score.
272
+
273
+ Parameters
274
+ ----------
275
+ X : array-like of shape (n_samples, n_features)
276
+ The input data.
277
+
278
+ Returns
279
+ -------
280
+ decision : ndarray of shape (n_samples,) if n_classes == 2\
281
+ else (n_samples, n_classes)
282
+ Confidence score.
283
+ """
284
+ if (
285
+ self.methods_to_check == "all"
286
+ or "decision_function" in self.methods_to_check
287
+ ):
288
+ X, y = self._check_X_y(X)
289
+ if len(self.classes_) == 2:
290
+ # for binary classifier, the confidence score is related to
291
+ # classes_[1] and therefore should be null.
292
+ return np.zeros(_num_samples(X))
293
+ else:
294
+ decision = np.zeros((_num_samples(X), len(self.classes_)))
295
+ decision[:, 0] = 1
296
+ return decision
297
+
298
+ def score(self, X=None, Y=None):
299
+ """Fake score.
300
+
301
+ Parameters
302
+ ----------
303
+ X : array-like of shape (n_samples, n_features)
304
+ Input data, where `n_samples` is the number of samples and
305
+ `n_features` is the number of features.
306
+
307
+ Y : array-like of shape (n_samples, n_output) or (n_samples,)
308
+ Target relative to X for classification or regression;
309
+ None for unsupervised learning.
310
+
311
+ Returns
312
+ -------
313
+ score : float
314
+ Either 0 or 1 depending of `foo_param` (i.e. `foo_param > 1 =>
315
+ score=1` otherwise `score=0`).
316
+ """
317
+ if self.methods_to_check == "all" or "score" in self.methods_to_check:
318
+ self._check_X_y(X, Y)
319
+ if self.foo_param > 1:
320
+ score = 1.0
321
+ else:
322
+ score = 0.0
323
+ return score
324
+
325
+ def _more_tags(self):
326
+ return {"_skip_test": True, "X_types": ["1dlabel"]}
327
+
328
+
329
+ # Deactivate key validation for CheckingClassifier because we want to be able to
330
+ # call fit with arbitrary fit_params and record them. Without this change, we
331
+ # would get an error because those arbitrary params are not expected.
332
+ CheckingClassifier.set_fit_request = RequestMethod( # type: ignore
333
+ name="fit", keys=[], validate_keys=False
334
+ )
335
+
336
+
337
+ class NoSampleWeightWrapper(BaseEstimator):
338
+ """Wrap estimator which will not expose `sample_weight`.
339
+
340
+ Parameters
341
+ ----------
342
+ est : estimator, default=None
343
+ The estimator to wrap.
344
+ """
345
+
346
+ def __init__(self, est=None):
347
+ self.est = est
348
+
349
+ def fit(self, X, y):
350
+ return self.est.fit(X, y)
351
+
352
+ def predict(self, X):
353
+ return self.est.predict(X)
354
+
355
+ def predict_proba(self, X):
356
+ return self.est.predict_proba(X)
357
+
358
+ def _more_tags(self):
359
+ return {"_skip_test": True}
360
+
361
+
362
+ def _check_response(method):
363
+ def check(self):
364
+ return self.response_methods is not None and method in self.response_methods
365
+
366
+ return check
367
+
368
+
369
+ class _MockEstimatorOnOffPrediction(BaseEstimator):
370
+ """Estimator for which we can turn on/off the prediction methods.
371
+
372
+ Parameters
373
+ ----------
374
+ response_methods: list of \
375
+ {"predict", "predict_proba", "decision_function"}, default=None
376
+ List containing the response implemented by the estimator. When, the
377
+ response is in the list, it will return the name of the response method
378
+ when called. Otherwise, an `AttributeError` is raised. It allows to
379
+ use `getattr` as any conventional estimator. By default, no response
380
+ methods are mocked.
381
+ """
382
+
383
+ def __init__(self, response_methods=None):
384
+ self.response_methods = response_methods
385
+
386
+ def fit(self, X, y):
387
+ self.classes_ = np.unique(y)
388
+ return self
389
+
390
+ @available_if(_check_response("predict"))
391
+ def predict(self, X):
392
+ return "predict"
393
+
394
+ @available_if(_check_response("predict_proba"))
395
+ def predict_proba(self, X):
396
+ return "predict_proba"
397
+
398
+ @available_if(_check_response("decision_function"))
399
+ def decision_function(self, X):
400
+ return "decision_function"
env-llmeval/lib/python3.10/site-packages/sklearn/utils/_pprint.py ADDED
@@ -0,0 +1,463 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """This module contains the _EstimatorPrettyPrinter class used in
2
+ BaseEstimator.__repr__ for pretty-printing estimators"""
3
+
4
+ # Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
5
+ # 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018 Python Software Foundation;
6
+ # All Rights Reserved
7
+
8
+ # Authors: Fred L. Drake, Jr. <[email protected]> (built-in CPython pprint module)
9
+ # Nicolas Hug (scikit-learn specific changes)
10
+
11
+ # License: PSF License version 2 (see below)
12
+
13
+ # PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
14
+ # --------------------------------------------
15
+
16
+ # 1. This LICENSE AGREEMENT is between the Python Software Foundation ("PSF"),
17
+ # and the Individual or Organization ("Licensee") accessing and otherwise
18
+ # using this software ("Python") in source or binary form and its associated
19
+ # documentation.
20
+
21
+ # 2. Subject to the terms and conditions of this License Agreement, PSF hereby
22
+ # grants Licensee a nonexclusive, royalty-free, world-wide license to
23
+ # reproduce, analyze, test, perform and/or display publicly, prepare
24
+ # derivative works, distribute, and otherwise use Python alone or in any
25
+ # derivative version, provided, however, that PSF's License Agreement and
26
+ # PSF's notice of copyright, i.e., "Copyright (c) 2001, 2002, 2003, 2004,
27
+ # 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016,
28
+ # 2017, 2018 Python Software Foundation; All Rights Reserved" are retained in
29
+ # Python alone or in any derivative version prepared by Licensee.
30
+
31
+ # 3. In the event Licensee prepares a derivative work that is based on or
32
+ # incorporates Python or any part thereof, and wants to make the derivative
33
+ # work available to others as provided herein, then Licensee hereby agrees to
34
+ # include in any such work a brief summary of the changes made to Python.
35
+
36
+ # 4. PSF is making Python available to Licensee on an "AS IS" basis. PSF MAKES
37
+ # NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED. BY WAY OF EXAMPLE, BUT
38
+ # NOT LIMITATION, PSF MAKES NO AND DISCLAIMS ANY REPRESENTATION OR WARRANTY OF
39
+ # MERCHANTABILITY OR FITNESS FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF
40
+ # PYTHON WILL NOT INFRINGE ANY THIRD PARTY RIGHTS.
41
+
42
+ # 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON FOR ANY
43
+ # INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS A RESULT OF
44
+ # MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON, OR ANY DERIVATIVE
45
+ # THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
46
+
47
+ # 6. This License Agreement will automatically terminate upon a material
48
+ # breach of its terms and conditions.
49
+
50
+ # 7. Nothing in this License Agreement shall be deemed to create any
51
+ # relationship of agency, partnership, or joint venture between PSF and
52
+ # Licensee. This License Agreement does not grant permission to use PSF
53
+ # trademarks or trade name in a trademark sense to endorse or promote products
54
+ # or services of Licensee, or any third party.
55
+
56
+ # 8. By copying, installing or otherwise using Python, Licensee agrees to be
57
+ # bound by the terms and conditions of this License Agreement.
58
+
59
+
60
+ # Brief summary of changes to original code:
61
+ # - "compact" parameter is supported for dicts, not just lists or tuples
62
+ # - estimators have a custom handler, they're not just treated as objects
63
+ # - long sequences (lists, tuples, dict items) with more than N elements are
64
+ # shortened using ellipsis (', ...') at the end.
65
+
66
+ import inspect
67
+ import pprint
68
+ from collections import OrderedDict
69
+
70
+ from .._config import get_config
71
+ from ..base import BaseEstimator
72
+ from . import is_scalar_nan
73
+
74
+
75
+ class KeyValTuple(tuple):
76
+ """Dummy class for correctly rendering key-value tuples from dicts."""
77
+
78
+ def __repr__(self):
79
+ # needed for _dispatch[tuple.__repr__] not to be overridden
80
+ return super().__repr__()
81
+
82
+
83
+ class KeyValTupleParam(KeyValTuple):
84
+ """Dummy class for correctly rendering key-value tuples from parameters."""
85
+
86
+ pass
87
+
88
+
89
+ def _changed_params(estimator):
90
+ """Return dict (param_name: value) of parameters that were given to
91
+ estimator with non-default values."""
92
+
93
+ params = estimator.get_params(deep=False)
94
+ init_func = getattr(estimator.__init__, "deprecated_original", estimator.__init__)
95
+ init_params = inspect.signature(init_func).parameters
96
+ init_params = {name: param.default for name, param in init_params.items()}
97
+
98
+ def has_changed(k, v):
99
+ if k not in init_params: # happens if k is part of a **kwargs
100
+ return True
101
+ if init_params[k] == inspect._empty: # k has no default value
102
+ return True
103
+ # try to avoid calling repr on nested estimators
104
+ if isinstance(v, BaseEstimator) and v.__class__ != init_params[k].__class__:
105
+ return True
106
+ # Use repr as a last resort. It may be expensive.
107
+ if repr(v) != repr(init_params[k]) and not (
108
+ is_scalar_nan(init_params[k]) and is_scalar_nan(v)
109
+ ):
110
+ return True
111
+ return False
112
+
113
+ return {k: v for k, v in params.items() if has_changed(k, v)}
114
+
115
+
116
+ class _EstimatorPrettyPrinter(pprint.PrettyPrinter):
117
+ """Pretty Printer class for estimator objects.
118
+
119
+ This extends the pprint.PrettyPrinter class, because:
120
+ - we need estimators to be printed with their parameters, e.g.
121
+ Estimator(param1=value1, ...) which is not supported by default.
122
+ - the 'compact' parameter of PrettyPrinter is ignored for dicts, which
123
+ may lead to very long representations that we want to avoid.
124
+
125
+ Quick overview of pprint.PrettyPrinter (see also
126
+ https://stackoverflow.com/questions/49565047/pprint-with-hex-numbers):
127
+
128
+ - the entry point is the _format() method which calls format() (overridden
129
+ here)
130
+ - format() directly calls _safe_repr() for a first try at rendering the
131
+ object
132
+ - _safe_repr formats the whole object recursively, only calling itself,
133
+ not caring about line length or anything
134
+ - back to _format(), if the output string is too long, _format() then calls
135
+ the appropriate _pprint_TYPE() method (e.g. _pprint_list()) depending on
136
+ the type of the object. This where the line length and the compact
137
+ parameters are taken into account.
138
+ - those _pprint_TYPE() methods will internally use the format() method for
139
+ rendering the nested objects of an object (e.g. the elements of a list)
140
+
141
+ In the end, everything has to be implemented twice: in _safe_repr and in
142
+ the custom _pprint_TYPE methods. Unfortunately PrettyPrinter is really not
143
+ straightforward to extend (especially when we want a compact output), so
144
+ the code is a bit convoluted.
145
+
146
+ This class overrides:
147
+ - format() to support the changed_only parameter
148
+ - _safe_repr to support printing of estimators (for when they fit on a
149
+ single line)
150
+ - _format_dict_items so that dict are correctly 'compacted'
151
+ - _format_items so that ellipsis is used on long lists and tuples
152
+
153
+ When estimators cannot be printed on a single line, the builtin _format()
154
+ will call _pprint_estimator() because it was registered to do so (see
155
+ _dispatch[BaseEstimator.__repr__] = _pprint_estimator).
156
+
157
+ both _format_dict_items() and _pprint_estimator() use the
158
+ _format_params_or_dict_items() method that will format parameters and
159
+ key-value pairs respecting the compact parameter. This method needs another
160
+ subroutine _pprint_key_val_tuple() used when a parameter or a key-value
161
+ pair is too long to fit on a single line. This subroutine is called in
162
+ _format() and is registered as well in the _dispatch dict (just like
163
+ _pprint_estimator). We had to create the two classes KeyValTuple and
164
+ KeyValTupleParam for this.
165
+ """
166
+
167
+ def __init__(
168
+ self,
169
+ indent=1,
170
+ width=80,
171
+ depth=None,
172
+ stream=None,
173
+ *,
174
+ compact=False,
175
+ indent_at_name=True,
176
+ n_max_elements_to_show=None,
177
+ ):
178
+ super().__init__(indent, width, depth, stream, compact=compact)
179
+ self._indent_at_name = indent_at_name
180
+ if self._indent_at_name:
181
+ self._indent_per_level = 1 # ignore indent param
182
+ self._changed_only = get_config()["print_changed_only"]
183
+ # Max number of elements in a list, dict, tuple until we start using
184
+ # ellipsis. This also affects the number of arguments of an estimators
185
+ # (they are treated as dicts)
186
+ self.n_max_elements_to_show = n_max_elements_to_show
187
+
188
+ def format(self, object, context, maxlevels, level):
189
+ return _safe_repr(
190
+ object, context, maxlevels, level, changed_only=self._changed_only
191
+ )
192
+
193
+ def _pprint_estimator(self, object, stream, indent, allowance, context, level):
194
+ stream.write(object.__class__.__name__ + "(")
195
+ if self._indent_at_name:
196
+ indent += len(object.__class__.__name__)
197
+
198
+ if self._changed_only:
199
+ params = _changed_params(object)
200
+ else:
201
+ params = object.get_params(deep=False)
202
+
203
+ params = OrderedDict((name, val) for (name, val) in sorted(params.items()))
204
+
205
+ self._format_params(
206
+ params.items(), stream, indent, allowance + 1, context, level
207
+ )
208
+ stream.write(")")
209
+
210
+ def _format_dict_items(self, items, stream, indent, allowance, context, level):
211
+ return self._format_params_or_dict_items(
212
+ items, stream, indent, allowance, context, level, is_dict=True
213
+ )
214
+
215
+ def _format_params(self, items, stream, indent, allowance, context, level):
216
+ return self._format_params_or_dict_items(
217
+ items, stream, indent, allowance, context, level, is_dict=False
218
+ )
219
+
220
+ def _format_params_or_dict_items(
221
+ self, object, stream, indent, allowance, context, level, is_dict
222
+ ):
223
+ """Format dict items or parameters respecting the compact=True
224
+ parameter. For some reason, the builtin rendering of dict items doesn't
225
+ respect compact=True and will use one line per key-value if all cannot
226
+ fit in a single line.
227
+ Dict items will be rendered as <'key': value> while params will be
228
+ rendered as <key=value>. The implementation is mostly copy/pasting from
229
+ the builtin _format_items().
230
+ This also adds ellipsis if the number of items is greater than
231
+ self.n_max_elements_to_show.
232
+ """
233
+ write = stream.write
234
+ indent += self._indent_per_level
235
+ delimnl = ",\n" + " " * indent
236
+ delim = ""
237
+ width = max_width = self._width - indent + 1
238
+ it = iter(object)
239
+ try:
240
+ next_ent = next(it)
241
+ except StopIteration:
242
+ return
243
+ last = False
244
+ n_items = 0
245
+ while not last:
246
+ if n_items == self.n_max_elements_to_show:
247
+ write(", ...")
248
+ break
249
+ n_items += 1
250
+ ent = next_ent
251
+ try:
252
+ next_ent = next(it)
253
+ except StopIteration:
254
+ last = True
255
+ max_width -= allowance
256
+ width -= allowance
257
+ if self._compact:
258
+ k, v = ent
259
+ krepr = self._repr(k, context, level)
260
+ vrepr = self._repr(v, context, level)
261
+ if not is_dict:
262
+ krepr = krepr.strip("'")
263
+ middle = ": " if is_dict else "="
264
+ rep = krepr + middle + vrepr
265
+ w = len(rep) + 2
266
+ if width < w:
267
+ width = max_width
268
+ if delim:
269
+ delim = delimnl
270
+ if width >= w:
271
+ width -= w
272
+ write(delim)
273
+ delim = ", "
274
+ write(rep)
275
+ continue
276
+ write(delim)
277
+ delim = delimnl
278
+ class_ = KeyValTuple if is_dict else KeyValTupleParam
279
+ self._format(
280
+ class_(ent), stream, indent, allowance if last else 1, context, level
281
+ )
282
+
283
+ def _format_items(self, items, stream, indent, allowance, context, level):
284
+ """Format the items of an iterable (list, tuple...). Same as the
285
+ built-in _format_items, with support for ellipsis if the number of
286
+ elements is greater than self.n_max_elements_to_show.
287
+ """
288
+ write = stream.write
289
+ indent += self._indent_per_level
290
+ if self._indent_per_level > 1:
291
+ write((self._indent_per_level - 1) * " ")
292
+ delimnl = ",\n" + " " * indent
293
+ delim = ""
294
+ width = max_width = self._width - indent + 1
295
+ it = iter(items)
296
+ try:
297
+ next_ent = next(it)
298
+ except StopIteration:
299
+ return
300
+ last = False
301
+ n_items = 0
302
+ while not last:
303
+ if n_items == self.n_max_elements_to_show:
304
+ write(", ...")
305
+ break
306
+ n_items += 1
307
+ ent = next_ent
308
+ try:
309
+ next_ent = next(it)
310
+ except StopIteration:
311
+ last = True
312
+ max_width -= allowance
313
+ width -= allowance
314
+ if self._compact:
315
+ rep = self._repr(ent, context, level)
316
+ w = len(rep) + 2
317
+ if width < w:
318
+ width = max_width
319
+ if delim:
320
+ delim = delimnl
321
+ if width >= w:
322
+ width -= w
323
+ write(delim)
324
+ delim = ", "
325
+ write(rep)
326
+ continue
327
+ write(delim)
328
+ delim = delimnl
329
+ self._format(ent, stream, indent, allowance if last else 1, context, level)
330
+
331
+ def _pprint_key_val_tuple(self, object, stream, indent, allowance, context, level):
332
+ """Pretty printing for key-value tuples from dict or parameters."""
333
+ k, v = object
334
+ rep = self._repr(k, context, level)
335
+ if isinstance(object, KeyValTupleParam):
336
+ rep = rep.strip("'")
337
+ middle = "="
338
+ else:
339
+ middle = ": "
340
+ stream.write(rep)
341
+ stream.write(middle)
342
+ self._format(
343
+ v, stream, indent + len(rep) + len(middle), allowance, context, level
344
+ )
345
+
346
+ # Note: need to copy _dispatch to prevent instances of the builtin
347
+ # PrettyPrinter class to call methods of _EstimatorPrettyPrinter (see issue
348
+ # 12906)
349
+ # mypy error: "Type[PrettyPrinter]" has no attribute "_dispatch"
350
+ _dispatch = pprint.PrettyPrinter._dispatch.copy() # type: ignore
351
+ _dispatch[BaseEstimator.__repr__] = _pprint_estimator
352
+ _dispatch[KeyValTuple.__repr__] = _pprint_key_val_tuple
353
+
354
+
355
+ def _safe_repr(object, context, maxlevels, level, changed_only=False):
356
+ """Same as the builtin _safe_repr, with added support for Estimator
357
+ objects."""
358
+ typ = type(object)
359
+
360
+ if typ in pprint._builtin_scalars:
361
+ return repr(object), True, False
362
+
363
+ r = getattr(typ, "__repr__", None)
364
+ if issubclass(typ, dict) and r is dict.__repr__:
365
+ if not object:
366
+ return "{}", True, False
367
+ objid = id(object)
368
+ if maxlevels and level >= maxlevels:
369
+ return "{...}", False, objid in context
370
+ if objid in context:
371
+ return pprint._recursion(object), False, True
372
+ context[objid] = 1
373
+ readable = True
374
+ recursive = False
375
+ components = []
376
+ append = components.append
377
+ level += 1
378
+ saferepr = _safe_repr
379
+ items = sorted(object.items(), key=pprint._safe_tuple)
380
+ for k, v in items:
381
+ krepr, kreadable, krecur = saferepr(
382
+ k, context, maxlevels, level, changed_only=changed_only
383
+ )
384
+ vrepr, vreadable, vrecur = saferepr(
385
+ v, context, maxlevels, level, changed_only=changed_only
386
+ )
387
+ append("%s: %s" % (krepr, vrepr))
388
+ readable = readable and kreadable and vreadable
389
+ if krecur or vrecur:
390
+ recursive = True
391
+ del context[objid]
392
+ return "{%s}" % ", ".join(components), readable, recursive
393
+
394
+ if (issubclass(typ, list) and r is list.__repr__) or (
395
+ issubclass(typ, tuple) and r is tuple.__repr__
396
+ ):
397
+ if issubclass(typ, list):
398
+ if not object:
399
+ return "[]", True, False
400
+ format = "[%s]"
401
+ elif len(object) == 1:
402
+ format = "(%s,)"
403
+ else:
404
+ if not object:
405
+ return "()", True, False
406
+ format = "(%s)"
407
+ objid = id(object)
408
+ if maxlevels and level >= maxlevels:
409
+ return format % "...", False, objid in context
410
+ if objid in context:
411
+ return pprint._recursion(object), False, True
412
+ context[objid] = 1
413
+ readable = True
414
+ recursive = False
415
+ components = []
416
+ append = components.append
417
+ level += 1
418
+ for o in object:
419
+ orepr, oreadable, orecur = _safe_repr(
420
+ o, context, maxlevels, level, changed_only=changed_only
421
+ )
422
+ append(orepr)
423
+ if not oreadable:
424
+ readable = False
425
+ if orecur:
426
+ recursive = True
427
+ del context[objid]
428
+ return format % ", ".join(components), readable, recursive
429
+
430
+ if issubclass(typ, BaseEstimator):
431
+ objid = id(object)
432
+ if maxlevels and level >= maxlevels:
433
+ return "{...}", False, objid in context
434
+ if objid in context:
435
+ return pprint._recursion(object), False, True
436
+ context[objid] = 1
437
+ readable = True
438
+ recursive = False
439
+ if changed_only:
440
+ params = _changed_params(object)
441
+ else:
442
+ params = object.get_params(deep=False)
443
+ components = []
444
+ append = components.append
445
+ level += 1
446
+ saferepr = _safe_repr
447
+ items = sorted(params.items(), key=pprint._safe_tuple)
448
+ for k, v in items:
449
+ krepr, kreadable, krecur = saferepr(
450
+ k, context, maxlevels, level, changed_only=changed_only
451
+ )
452
+ vrepr, vreadable, vrecur = saferepr(
453
+ v, context, maxlevels, level, changed_only=changed_only
454
+ )
455
+ append("%s=%s" % (krepr.strip("'"), vrepr))
456
+ readable = readable and kreadable and vreadable
457
+ if krecur or vrecur:
458
+ recursive = True
459
+ del context[objid]
460
+ return ("%s(%s)" % (typ.__name__, ", ".join(components)), readable, recursive)
461
+
462
+ rep = repr(object)
463
+ return rep, (rep and not rep.startswith("<")), False
env-llmeval/lib/python3.10/site-packages/sklearn/utils/_random.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (356 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/utils/_response.py ADDED
@@ -0,0 +1,298 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Utilities to get the response values of a classifier or a regressor.
2
+
3
+ It allows to make uniform checks and validation.
4
+ """
5
+ import numpy as np
6
+
7
+ from ..base import is_classifier
8
+ from .multiclass import type_of_target
9
+ from .validation import _check_response_method, check_is_fitted
10
+
11
+
12
+ def _process_predict_proba(*, y_pred, target_type, classes, pos_label):
13
+ """Get the response values when the response method is `predict_proba`.
14
+
15
+ This function process the `y_pred` array in the binary and multi-label cases.
16
+ In the binary case, it selects the column corresponding to the positive
17
+ class. In the multi-label case, it stacks the predictions if they are not
18
+ in the "compressed" format `(n_samples, n_outputs)`.
19
+
20
+ Parameters
21
+ ----------
22
+ y_pred : ndarray
23
+ Output of `estimator.predict_proba`. The shape depends on the target type:
24
+
25
+ - for binary classification, it is a 2d array of shape `(n_samples, 2)`;
26
+ - for multiclass classification, it is a 2d array of shape
27
+ `(n_samples, n_classes)`;
28
+ - for multilabel classification, it is either a list of 2d arrays of shape
29
+ `(n_samples, 2)` (e.g. `RandomForestClassifier` or `KNeighborsClassifier`) or
30
+ an array of shape `(n_samples, n_outputs)` (e.g. `MLPClassifier` or
31
+ `RidgeClassifier`).
32
+
33
+ target_type : {"binary", "multiclass", "multilabel-indicator"}
34
+ Type of the target.
35
+
36
+ classes : ndarray of shape (n_classes,) or list of such arrays
37
+ Class labels as reported by `estimator.classes_`.
38
+
39
+ pos_label : int, float, bool or str
40
+ Only used with binary and multiclass targets.
41
+
42
+ Returns
43
+ -------
44
+ y_pred : ndarray of shape (n_samples,), (n_samples, n_classes) or \
45
+ (n_samples, n_output)
46
+ Compressed predictions format as requested by the metrics.
47
+ """
48
+ if target_type == "binary" and y_pred.shape[1] < 2:
49
+ # We don't handle classifiers trained on a single class.
50
+ raise ValueError(
51
+ f"Got predict_proba of shape {y_pred.shape}, but need "
52
+ "classifier with two classes."
53
+ )
54
+
55
+ if target_type == "binary":
56
+ col_idx = np.flatnonzero(classes == pos_label)[0]
57
+ return y_pred[:, col_idx]
58
+ elif target_type == "multilabel-indicator":
59
+ # Use a compress format of shape `(n_samples, n_output)`.
60
+ # Only `MLPClassifier` and `RidgeClassifier` return an array of shape
61
+ # `(n_samples, n_outputs)`.
62
+ if isinstance(y_pred, list):
63
+ # list of arrays of shape `(n_samples, 2)`
64
+ return np.vstack([p[:, -1] for p in y_pred]).T
65
+ else:
66
+ # array of shape `(n_samples, n_outputs)`
67
+ return y_pred
68
+
69
+ return y_pred
70
+
71
+
72
+ def _process_decision_function(*, y_pred, target_type, classes, pos_label):
73
+ """Get the response values when the response method is `decision_function`.
74
+
75
+ This function process the `y_pred` array in the binary and multi-label cases.
76
+ In the binary case, it inverts the sign of the score if the positive label
77
+ is not `classes[1]`. In the multi-label case, it stacks the predictions if
78
+ they are not in the "compressed" format `(n_samples, n_outputs)`.
79
+
80
+ Parameters
81
+ ----------
82
+ y_pred : ndarray
83
+ Output of `estimator.predict_proba`. The shape depends on the target type:
84
+
85
+ - for binary classification, it is a 1d array of shape `(n_samples,)` where the
86
+ sign is assuming that `classes[1]` is the positive class;
87
+ - for multiclass classification, it is a 2d array of shape
88
+ `(n_samples, n_classes)`;
89
+ - for multilabel classification, it is a 2d array of shape `(n_samples,
90
+ n_outputs)`.
91
+
92
+ target_type : {"binary", "multiclass", "multilabel-indicator"}
93
+ Type of the target.
94
+
95
+ classes : ndarray of shape (n_classes,) or list of such arrays
96
+ Class labels as reported by `estimator.classes_`.
97
+
98
+ pos_label : int, float, bool or str
99
+ Only used with binary and multiclass targets.
100
+
101
+ Returns
102
+ -------
103
+ y_pred : ndarray of shape (n_samples,), (n_samples, n_classes) or \
104
+ (n_samples, n_output)
105
+ Compressed predictions format as requested by the metrics.
106
+ """
107
+ if target_type == "binary" and pos_label == classes[0]:
108
+ return -1 * y_pred
109
+ return y_pred
110
+
111
+
112
+ def _get_response_values(
113
+ estimator,
114
+ X,
115
+ response_method,
116
+ pos_label=None,
117
+ return_response_method_used=False,
118
+ ):
119
+ """Compute the response values of a classifier, an outlier detector, or a regressor.
120
+
121
+ The response values are predictions such that it follows the following shape:
122
+
123
+ - for binary classification, it is a 1d array of shape `(n_samples,)`;
124
+ - for multiclass classification, it is a 2d array of shape `(n_samples, n_classes)`;
125
+ - for multilabel classification, it is a 2d array of shape `(n_samples, n_outputs)`;
126
+ - for outlier detection, it is a 1d array of shape `(n_samples,)`;
127
+ - for regression, it is a 1d array of shape `(n_samples,)`.
128
+
129
+ If `estimator` is a binary classifier, also return the label for the
130
+ effective positive class.
131
+
132
+ This utility is used primarily in the displays and the scikit-learn scorers.
133
+
134
+ .. versionadded:: 1.3
135
+
136
+ Parameters
137
+ ----------
138
+ estimator : estimator instance
139
+ Fitted classifier, outlier detector, or regressor or a
140
+ fitted :class:`~sklearn.pipeline.Pipeline` in which the last estimator is a
141
+ classifier, an outlier detector, or a regressor.
142
+
143
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
144
+ Input values.
145
+
146
+ response_method : {"predict_proba", "predict_log_proba", "decision_function", \
147
+ "predict"} or list of such str
148
+ Specifies the response method to use get prediction from an estimator
149
+ (i.e. :term:`predict_proba`, :term:`predict_log_proba`,
150
+ :term:`decision_function` or :term:`predict`). Possible choices are:
151
+
152
+ - if `str`, it corresponds to the name to the method to return;
153
+ - if a list of `str`, it provides the method names in order of
154
+ preference. The method returned corresponds to the first method in
155
+ the list and which is implemented by `estimator`.
156
+
157
+ pos_label : int, float, bool or str, default=None
158
+ The class considered as the positive class when computing
159
+ the metrics. If `None` and target is 'binary', `estimators.classes_[1]` is
160
+ considered as the positive class.
161
+
162
+ return_response_method_used : bool, default=False
163
+ Whether to return the response method used to compute the response
164
+ values.
165
+
166
+ .. versionadded:: 1.4
167
+
168
+ Returns
169
+ -------
170
+ y_pred : ndarray of shape (n_samples,), (n_samples, n_classes) or \
171
+ (n_samples, n_outputs)
172
+ Target scores calculated from the provided `response_method`
173
+ and `pos_label`.
174
+
175
+ pos_label : int, float, bool, str or None
176
+ The class considered as the positive class when computing
177
+ the metrics. Returns `None` if `estimator` is a regressor or an outlier
178
+ detector.
179
+
180
+ response_method_used : str
181
+ The response method used to compute the response values. Only returned
182
+ if `return_response_method_used` is `True`.
183
+
184
+ .. versionadded:: 1.4
185
+
186
+ Raises
187
+ ------
188
+ ValueError
189
+ If `pos_label` is not a valid label.
190
+ If the shape of `y_pred` is not consistent for binary classifier.
191
+ If the response method can be applied to a classifier only and
192
+ `estimator` is a regressor.
193
+ """
194
+ from sklearn.base import is_classifier, is_outlier_detector # noqa
195
+
196
+ if is_classifier(estimator):
197
+ prediction_method = _check_response_method(estimator, response_method)
198
+ classes = estimator.classes_
199
+ target_type = type_of_target(classes)
200
+
201
+ if target_type in ("binary", "multiclass"):
202
+ if pos_label is not None and pos_label not in classes.tolist():
203
+ raise ValueError(
204
+ f"pos_label={pos_label} is not a valid label: It should be "
205
+ f"one of {classes}"
206
+ )
207
+ elif pos_label is None and target_type == "binary":
208
+ pos_label = classes[-1]
209
+
210
+ y_pred = prediction_method(X)
211
+
212
+ if prediction_method.__name__ in ("predict_proba", "predict_log_proba"):
213
+ y_pred = _process_predict_proba(
214
+ y_pred=y_pred,
215
+ target_type=target_type,
216
+ classes=classes,
217
+ pos_label=pos_label,
218
+ )
219
+ elif prediction_method.__name__ == "decision_function":
220
+ y_pred = _process_decision_function(
221
+ y_pred=y_pred,
222
+ target_type=target_type,
223
+ classes=classes,
224
+ pos_label=pos_label,
225
+ )
226
+ elif is_outlier_detector(estimator):
227
+ prediction_method = _check_response_method(estimator, response_method)
228
+ y_pred, pos_label = prediction_method(X), None
229
+ else: # estimator is a regressor
230
+ if response_method != "predict":
231
+ raise ValueError(
232
+ f"{estimator.__class__.__name__} should either be a classifier to be "
233
+ f"used with response_method={response_method} or the response_method "
234
+ "should be 'predict'. Got a regressor with response_method="
235
+ f"{response_method} instead."
236
+ )
237
+ prediction_method = estimator.predict
238
+ y_pred, pos_label = prediction_method(X), None
239
+
240
+ if return_response_method_used:
241
+ return y_pred, pos_label, prediction_method.__name__
242
+ return y_pred, pos_label
243
+
244
+
245
+ def _get_response_values_binary(estimator, X, response_method, pos_label=None):
246
+ """Compute the response values of a binary classifier.
247
+
248
+ Parameters
249
+ ----------
250
+ estimator : estimator instance
251
+ Fitted classifier or a fitted :class:`~sklearn.pipeline.Pipeline`
252
+ in which the last estimator is a binary classifier.
253
+
254
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
255
+ Input values.
256
+
257
+ response_method : {'auto', 'predict_proba', 'decision_function'}
258
+ Specifies whether to use :term:`predict_proba` or
259
+ :term:`decision_function` as the target response. If set to 'auto',
260
+ :term:`predict_proba` is tried first and if it does not exist
261
+ :term:`decision_function` is tried next.
262
+
263
+ pos_label : int, float, bool or str, default=None
264
+ The class considered as the positive class when computing
265
+ the metrics. By default, `estimators.classes_[1]` is
266
+ considered as the positive class.
267
+
268
+ Returns
269
+ -------
270
+ y_pred : ndarray of shape (n_samples,)
271
+ Target scores calculated from the provided response_method
272
+ and pos_label.
273
+
274
+ pos_label : int, float, bool or str
275
+ The class considered as the positive class when computing
276
+ the metrics.
277
+ """
278
+ classification_error = "Expected 'estimator' to be a binary classifier."
279
+
280
+ check_is_fitted(estimator)
281
+ if not is_classifier(estimator):
282
+ raise ValueError(
283
+ classification_error + f" Got {estimator.__class__.__name__} instead."
284
+ )
285
+ elif len(estimator.classes_) != 2:
286
+ raise ValueError(
287
+ classification_error + f" Got {len(estimator.classes_)} classes instead."
288
+ )
289
+
290
+ if response_method == "auto":
291
+ response_method = ["predict_proba", "decision_function"]
292
+
293
+ return _get_response_values(
294
+ estimator,
295
+ X,
296
+ response_method,
297
+ pos_label=pos_label,
298
+ )
env-llmeval/lib/python3.10/site-packages/sklearn/utils/_show_versions.py ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Utility methods to print system info for debugging
3
+
4
+ adapted from :func:`pandas.show_versions`
5
+ """
6
+ # License: BSD 3 clause
7
+
8
+ import platform
9
+ import sys
10
+
11
+ from .. import __version__
12
+ from ..utils.fixes import threadpool_info
13
+ from ._openmp_helpers import _openmp_parallelism_enabled
14
+
15
+
16
+ def _get_sys_info():
17
+ """System information
18
+
19
+ Returns
20
+ -------
21
+ sys_info : dict
22
+ system and Python version information
23
+
24
+ """
25
+ python = sys.version.replace("\n", " ")
26
+
27
+ blob = [
28
+ ("python", python),
29
+ ("executable", sys.executable),
30
+ ("machine", platform.platform()),
31
+ ]
32
+
33
+ return dict(blob)
34
+
35
+
36
+ def _get_deps_info():
37
+ """Overview of the installed version of main dependencies
38
+
39
+ This function does not import the modules to collect the version numbers
40
+ but instead relies on standard Python package metadata.
41
+
42
+ Returns
43
+ -------
44
+ deps_info: dict
45
+ version information on relevant Python libraries
46
+
47
+ """
48
+ deps = [
49
+ "pip",
50
+ "setuptools",
51
+ "numpy",
52
+ "scipy",
53
+ "Cython",
54
+ "pandas",
55
+ "matplotlib",
56
+ "joblib",
57
+ "threadpoolctl",
58
+ ]
59
+
60
+ deps_info = {
61
+ "sklearn": __version__,
62
+ }
63
+
64
+ from importlib.metadata import PackageNotFoundError, version
65
+
66
+ for modname in deps:
67
+ try:
68
+ deps_info[modname] = version(modname)
69
+ except PackageNotFoundError:
70
+ deps_info[modname] = None
71
+ return deps_info
72
+
73
+
74
+ def show_versions():
75
+ """Print useful debugging information"
76
+
77
+ .. versionadded:: 0.20
78
+
79
+ Examples
80
+ --------
81
+ >>> from sklearn import show_versions
82
+ >>> show_versions() # doctest: +SKIP
83
+ """
84
+
85
+ sys_info = _get_sys_info()
86
+ deps_info = _get_deps_info()
87
+
88
+ print("\nSystem:")
89
+ for k, stat in sys_info.items():
90
+ print("{k:>10}: {stat}".format(k=k, stat=stat))
91
+
92
+ print("\nPython dependencies:")
93
+ for k, stat in deps_info.items():
94
+ print("{k:>13}: {stat}".format(k=k, stat=stat))
95
+
96
+ print(
97
+ "\n{k}: {stat}".format(
98
+ k="Built with OpenMP", stat=_openmp_parallelism_enabled()
99
+ )
100
+ )
101
+
102
+ # show threadpoolctl results
103
+ threadpool_results = threadpool_info()
104
+ if threadpool_results:
105
+ print()
106
+ print("threadpoolctl info:")
107
+
108
+ for i, result in enumerate(threadpool_results):
109
+ for key, val in result.items():
110
+ print(f"{key:>15}: {val}")
111
+ if i != len(threadpool_results) - 1:
112
+ print()
env-llmeval/lib/python3.10/site-packages/sklearn/utils/_testing.py ADDED
@@ -0,0 +1,1169 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Testing utilities."""
2
+
3
+ # Copyright (c) 2011, 2012
4
+ # Authors: Pietro Berkes,
5
+ # Andreas Muller
6
+ # Mathieu Blondel
7
+ # Olivier Grisel
8
+ # Arnaud Joly
9
+ # Denis Engemann
10
+ # Giorgio Patrini
11
+ # Thierry Guillemot
12
+ # License: BSD 3 clause
13
+ import atexit
14
+ import contextlib
15
+ import functools
16
+ import importlib
17
+ import inspect
18
+ import os
19
+ import os.path as op
20
+ import re
21
+ import shutil
22
+ import sys
23
+ import tempfile
24
+ import unittest
25
+ import warnings
26
+ from collections.abc import Iterable
27
+ from dataclasses import dataclass
28
+ from functools import wraps
29
+ from inspect import signature
30
+ from subprocess import STDOUT, CalledProcessError, TimeoutExpired, check_output
31
+ from unittest import TestCase
32
+
33
+ import joblib
34
+ import numpy as np
35
+ import scipy as sp
36
+ from numpy.testing import assert_allclose as np_assert_allclose
37
+ from numpy.testing import (
38
+ assert_almost_equal,
39
+ assert_approx_equal,
40
+ assert_array_almost_equal,
41
+ assert_array_equal,
42
+ assert_array_less,
43
+ assert_no_warnings,
44
+ )
45
+
46
+ import sklearn
47
+ from sklearn.utils import (
48
+ _IS_32BIT,
49
+ IS_PYPY,
50
+ _in_unstable_openblas_configuration,
51
+ )
52
+ from sklearn.utils._array_api import _check_array_api_dispatch
53
+ from sklearn.utils.fixes import VisibleDeprecationWarning, parse_version, sp_version
54
+ from sklearn.utils.multiclass import check_classification_targets
55
+ from sklearn.utils.validation import (
56
+ check_array,
57
+ check_is_fitted,
58
+ check_X_y,
59
+ )
60
+
61
+ __all__ = [
62
+ "assert_raises",
63
+ "assert_raises_regexp",
64
+ "assert_array_equal",
65
+ "assert_almost_equal",
66
+ "assert_array_almost_equal",
67
+ "assert_array_less",
68
+ "assert_approx_equal",
69
+ "assert_allclose",
70
+ "assert_run_python_script_without_output",
71
+ "assert_no_warnings",
72
+ "SkipTest",
73
+ ]
74
+
75
+ _dummy = TestCase("__init__")
76
+ assert_raises = _dummy.assertRaises
77
+ SkipTest = unittest.case.SkipTest
78
+ assert_dict_equal = _dummy.assertDictEqual
79
+
80
+ assert_raises_regex = _dummy.assertRaisesRegex
81
+ # assert_raises_regexp is deprecated in Python 3.4 in favor of
82
+ # assert_raises_regex but lets keep the backward compat in scikit-learn with
83
+ # the old name for now
84
+ assert_raises_regexp = assert_raises_regex
85
+
86
+
87
+ def ignore_warnings(obj=None, category=Warning):
88
+ """Context manager and decorator to ignore warnings.
89
+
90
+ Note: Using this (in both variants) will clear all warnings
91
+ from all python modules loaded. In case you need to test
92
+ cross-module-warning-logging, this is not your tool of choice.
93
+
94
+ Parameters
95
+ ----------
96
+ obj : callable, default=None
97
+ callable where you want to ignore the warnings.
98
+ category : warning class, default=Warning
99
+ The category to filter. If Warning, all categories will be muted.
100
+
101
+ Examples
102
+ --------
103
+ >>> import warnings
104
+ >>> from sklearn.utils._testing import ignore_warnings
105
+ >>> with ignore_warnings():
106
+ ... warnings.warn('buhuhuhu')
107
+
108
+ >>> def nasty_warn():
109
+ ... warnings.warn('buhuhuhu')
110
+ ... print(42)
111
+
112
+ >>> ignore_warnings(nasty_warn)()
113
+ 42
114
+ """
115
+ if isinstance(obj, type) and issubclass(obj, Warning):
116
+ # Avoid common pitfall of passing category as the first positional
117
+ # argument which result in the test not being run
118
+ warning_name = obj.__name__
119
+ raise ValueError(
120
+ "'obj' should be a callable where you want to ignore warnings. "
121
+ "You passed a warning class instead: 'obj={warning_name}'. "
122
+ "If you want to pass a warning class to ignore_warnings, "
123
+ "you should use 'category={warning_name}'".format(warning_name=warning_name)
124
+ )
125
+ elif callable(obj):
126
+ return _IgnoreWarnings(category=category)(obj)
127
+ else:
128
+ return _IgnoreWarnings(category=category)
129
+
130
+
131
+ class _IgnoreWarnings:
132
+ """Improved and simplified Python warnings context manager and decorator.
133
+
134
+ This class allows the user to ignore the warnings raised by a function.
135
+ Copied from Python 2.7.5 and modified as required.
136
+
137
+ Parameters
138
+ ----------
139
+ category : tuple of warning class, default=Warning
140
+ The category to filter. By default, all the categories will be muted.
141
+
142
+ """
143
+
144
+ def __init__(self, category):
145
+ self._record = True
146
+ self._module = sys.modules["warnings"]
147
+ self._entered = False
148
+ self.log = []
149
+ self.category = category
150
+
151
+ def __call__(self, fn):
152
+ """Decorator to catch and hide warnings without visual nesting."""
153
+
154
+ @wraps(fn)
155
+ def wrapper(*args, **kwargs):
156
+ with warnings.catch_warnings():
157
+ warnings.simplefilter("ignore", self.category)
158
+ return fn(*args, **kwargs)
159
+
160
+ return wrapper
161
+
162
+ def __repr__(self):
163
+ args = []
164
+ if self._record:
165
+ args.append("record=True")
166
+ if self._module is not sys.modules["warnings"]:
167
+ args.append("module=%r" % self._module)
168
+ name = type(self).__name__
169
+ return "%s(%s)" % (name, ", ".join(args))
170
+
171
+ def __enter__(self):
172
+ if self._entered:
173
+ raise RuntimeError("Cannot enter %r twice" % self)
174
+ self._entered = True
175
+ self._filters = self._module.filters
176
+ self._module.filters = self._filters[:]
177
+ self._showwarning = self._module.showwarning
178
+ warnings.simplefilter("ignore", self.category)
179
+
180
+ def __exit__(self, *exc_info):
181
+ if not self._entered:
182
+ raise RuntimeError("Cannot exit %r without entering first" % self)
183
+ self._module.filters = self._filters
184
+ self._module.showwarning = self._showwarning
185
+ self.log[:] = []
186
+
187
+
188
+ def assert_raise_message(exceptions, message, function, *args, **kwargs):
189
+ """Helper function to test the message raised in an exception.
190
+
191
+ Given an exception, a callable to raise the exception, and
192
+ a message string, tests that the correct exception is raised and
193
+ that the message is a substring of the error thrown. Used to test
194
+ that the specific message thrown during an exception is correct.
195
+
196
+ Parameters
197
+ ----------
198
+ exceptions : exception or tuple of exception
199
+ An Exception object.
200
+
201
+ message : str
202
+ The error message or a substring of the error message.
203
+
204
+ function : callable
205
+ Callable object to raise error.
206
+
207
+ *args : the positional arguments to `function`.
208
+
209
+ **kwargs : the keyword arguments to `function`.
210
+ """
211
+ try:
212
+ function(*args, **kwargs)
213
+ except exceptions as e:
214
+ error_message = str(e)
215
+ if message not in error_message:
216
+ raise AssertionError(
217
+ "Error message does not include the expected"
218
+ " string: %r. Observed error message: %r" % (message, error_message)
219
+ )
220
+ else:
221
+ # concatenate exception names
222
+ if isinstance(exceptions, tuple):
223
+ names = " or ".join(e.__name__ for e in exceptions)
224
+ else:
225
+ names = exceptions.__name__
226
+
227
+ raise AssertionError("%s not raised by %s" % (names, function.__name__))
228
+
229
+
230
+ def assert_allclose(
231
+ actual, desired, rtol=None, atol=0.0, equal_nan=True, err_msg="", verbose=True
232
+ ):
233
+ """dtype-aware variant of numpy.testing.assert_allclose
234
+
235
+ This variant introspects the least precise floating point dtype
236
+ in the input argument and automatically sets the relative tolerance
237
+ parameter to 1e-4 float32 and use 1e-7 otherwise (typically float64
238
+ in scikit-learn).
239
+
240
+ `atol` is always left to 0. by default. It should be adjusted manually
241
+ to an assertion-specific value in case there are null values expected
242
+ in `desired`.
243
+
244
+ The aggregate tolerance is `atol + rtol * abs(desired)`.
245
+
246
+ Parameters
247
+ ----------
248
+ actual : array_like
249
+ Array obtained.
250
+ desired : array_like
251
+ Array desired.
252
+ rtol : float, optional, default=None
253
+ Relative tolerance.
254
+ If None, it is set based on the provided arrays' dtypes.
255
+ atol : float, optional, default=0.
256
+ Absolute tolerance.
257
+ equal_nan : bool, optional, default=True
258
+ If True, NaNs will compare equal.
259
+ err_msg : str, optional, default=''
260
+ The error message to be printed in case of failure.
261
+ verbose : bool, optional, default=True
262
+ If True, the conflicting values are appended to the error message.
263
+
264
+ Raises
265
+ ------
266
+ AssertionError
267
+ If actual and desired are not equal up to specified precision.
268
+
269
+ See Also
270
+ --------
271
+ numpy.testing.assert_allclose
272
+
273
+ Examples
274
+ --------
275
+ >>> import numpy as np
276
+ >>> from sklearn.utils._testing import assert_allclose
277
+ >>> x = [1e-5, 1e-3, 1e-1]
278
+ >>> y = np.arccos(np.cos(x))
279
+ >>> assert_allclose(x, y, rtol=1e-5, atol=0)
280
+ >>> a = np.full(shape=10, fill_value=1e-5, dtype=np.float32)
281
+ >>> assert_allclose(a, 1e-5)
282
+ """
283
+ dtypes = []
284
+
285
+ actual, desired = np.asanyarray(actual), np.asanyarray(desired)
286
+ dtypes = [actual.dtype, desired.dtype]
287
+
288
+ if rtol is None:
289
+ rtols = [1e-4 if dtype == np.float32 else 1e-7 for dtype in dtypes]
290
+ rtol = max(rtols)
291
+
292
+ np_assert_allclose(
293
+ actual,
294
+ desired,
295
+ rtol=rtol,
296
+ atol=atol,
297
+ equal_nan=equal_nan,
298
+ err_msg=err_msg,
299
+ verbose=verbose,
300
+ )
301
+
302
+
303
+ def assert_allclose_dense_sparse(x, y, rtol=1e-07, atol=1e-9, err_msg=""):
304
+ """Assert allclose for sparse and dense data.
305
+
306
+ Both x and y need to be either sparse or dense, they
307
+ can't be mixed.
308
+
309
+ Parameters
310
+ ----------
311
+ x : {array-like, sparse matrix}
312
+ First array to compare.
313
+
314
+ y : {array-like, sparse matrix}
315
+ Second array to compare.
316
+
317
+ rtol : float, default=1e-07
318
+ relative tolerance; see numpy.allclose.
319
+
320
+ atol : float, default=1e-9
321
+ absolute tolerance; see numpy.allclose. Note that the default here is
322
+ more tolerant than the default for numpy.testing.assert_allclose, where
323
+ atol=0.
324
+
325
+ err_msg : str, default=''
326
+ Error message to raise.
327
+ """
328
+ if sp.sparse.issparse(x) and sp.sparse.issparse(y):
329
+ x = x.tocsr()
330
+ y = y.tocsr()
331
+ x.sum_duplicates()
332
+ y.sum_duplicates()
333
+ assert_array_equal(x.indices, y.indices, err_msg=err_msg)
334
+ assert_array_equal(x.indptr, y.indptr, err_msg=err_msg)
335
+ assert_allclose(x.data, y.data, rtol=rtol, atol=atol, err_msg=err_msg)
336
+ elif not sp.sparse.issparse(x) and not sp.sparse.issparse(y):
337
+ # both dense
338
+ assert_allclose(x, y, rtol=rtol, atol=atol, err_msg=err_msg)
339
+ else:
340
+ raise ValueError(
341
+ "Can only compare two sparse matrices, not a sparse matrix and an array."
342
+ )
343
+
344
+
345
+ def set_random_state(estimator, random_state=0):
346
+ """Set random state of an estimator if it has the `random_state` param.
347
+
348
+ Parameters
349
+ ----------
350
+ estimator : object
351
+ The estimator.
352
+ random_state : int, RandomState instance or None, default=0
353
+ Pseudo random number generator state.
354
+ Pass an int for reproducible results across multiple function calls.
355
+ See :term:`Glossary <random_state>`.
356
+ """
357
+ if "random_state" in estimator.get_params():
358
+ estimator.set_params(random_state=random_state)
359
+
360
+
361
+ try:
362
+ _check_array_api_dispatch(True)
363
+ ARRAY_API_COMPAT_FUNCTIONAL = True
364
+ except ImportError:
365
+ ARRAY_API_COMPAT_FUNCTIONAL = False
366
+
367
+ try:
368
+ import pytest
369
+
370
+ skip_if_32bit = pytest.mark.skipif(_IS_32BIT, reason="skipped on 32bit platforms")
371
+ fails_if_pypy = pytest.mark.xfail(IS_PYPY, reason="not compatible with PyPy")
372
+ fails_if_unstable_openblas = pytest.mark.xfail(
373
+ _in_unstable_openblas_configuration(),
374
+ reason="OpenBLAS is unstable for this configuration",
375
+ )
376
+ skip_if_no_parallel = pytest.mark.skipif(
377
+ not joblib.parallel.mp, reason="joblib is in serial mode"
378
+ )
379
+ skip_if_array_api_compat_not_configured = pytest.mark.skipif(
380
+ not ARRAY_API_COMPAT_FUNCTIONAL,
381
+ reason="requires array_api_compat installed and a new enough version of NumPy",
382
+ )
383
+
384
+ # Decorator for tests involving both BLAS calls and multiprocessing.
385
+ #
386
+ # Under POSIX (e.g. Linux or OSX), using multiprocessing in conjunction
387
+ # with some implementation of BLAS (or other libraries that manage an
388
+ # internal posix thread pool) can cause a crash or a freeze of the Python
389
+ # process.
390
+ #
391
+ # In practice all known packaged distributions (from Linux distros or
392
+ # Anaconda) of BLAS under Linux seems to be safe. So we this problem seems
393
+ # to only impact OSX users.
394
+ #
395
+ # This wrapper makes it possible to skip tests that can possibly cause
396
+ # this crash under OS X with.
397
+ #
398
+ # Under Python 3.4+ it is possible to use the `forkserver` start method
399
+ # for multiprocessing to avoid this issue. However it can cause pickling
400
+ # errors on interactively defined functions. It therefore not enabled by
401
+ # default.
402
+
403
+ if_safe_multiprocessing_with_blas = pytest.mark.skipif(
404
+ sys.platform == "darwin", reason="Possible multi-process bug with some BLAS"
405
+ )
406
+ except ImportError:
407
+ pass
408
+
409
+
410
+ def check_skip_network():
411
+ if int(os.environ.get("SKLEARN_SKIP_NETWORK_TESTS", 0)):
412
+ raise SkipTest("Text tutorial requires large dataset download")
413
+
414
+
415
+ def _delete_folder(folder_path, warn=False):
416
+ """Utility function to cleanup a temporary folder if still existing.
417
+
418
+ Copy from joblib.pool (for independence).
419
+ """
420
+ try:
421
+ if os.path.exists(folder_path):
422
+ # This can fail under windows,
423
+ # but will succeed when called by atexit
424
+ shutil.rmtree(folder_path)
425
+ except OSError:
426
+ if warn:
427
+ warnings.warn("Could not delete temporary folder %s" % folder_path)
428
+
429
+
430
+ class TempMemmap:
431
+ """
432
+ Parameters
433
+ ----------
434
+ data
435
+ mmap_mode : str, default='r'
436
+ """
437
+
438
+ def __init__(self, data, mmap_mode="r"):
439
+ self.mmap_mode = mmap_mode
440
+ self.data = data
441
+
442
+ def __enter__(self):
443
+ data_read_only, self.temp_folder = create_memmap_backed_data(
444
+ self.data, mmap_mode=self.mmap_mode, return_folder=True
445
+ )
446
+ return data_read_only
447
+
448
+ def __exit__(self, exc_type, exc_val, exc_tb):
449
+ _delete_folder(self.temp_folder)
450
+
451
+
452
+ def create_memmap_backed_data(data, mmap_mode="r", return_folder=False):
453
+ """
454
+ Parameters
455
+ ----------
456
+ data
457
+ mmap_mode : str, default='r'
458
+ return_folder : bool, default=False
459
+ """
460
+ temp_folder = tempfile.mkdtemp(prefix="sklearn_testing_")
461
+ atexit.register(functools.partial(_delete_folder, temp_folder, warn=True))
462
+ filename = op.join(temp_folder, "data.pkl")
463
+ joblib.dump(data, filename)
464
+ memmap_backed_data = joblib.load(filename, mmap_mode=mmap_mode)
465
+ result = (
466
+ memmap_backed_data if not return_folder else (memmap_backed_data, temp_folder)
467
+ )
468
+ return result
469
+
470
+
471
+ # Utils to test docstrings
472
+
473
+
474
+ def _get_args(function, varargs=False):
475
+ """Helper to get function arguments."""
476
+
477
+ try:
478
+ params = signature(function).parameters
479
+ except ValueError:
480
+ # Error on builtin C function
481
+ return []
482
+ args = [
483
+ key
484
+ for key, param in params.items()
485
+ if param.kind not in (param.VAR_POSITIONAL, param.VAR_KEYWORD)
486
+ ]
487
+ if varargs:
488
+ varargs = [
489
+ param.name
490
+ for param in params.values()
491
+ if param.kind == param.VAR_POSITIONAL
492
+ ]
493
+ if len(varargs) == 0:
494
+ varargs = None
495
+ return args, varargs
496
+ else:
497
+ return args
498
+
499
+
500
+ def _get_func_name(func):
501
+ """Get function full name.
502
+
503
+ Parameters
504
+ ----------
505
+ func : callable
506
+ The function object.
507
+
508
+ Returns
509
+ -------
510
+ name : str
511
+ The function name.
512
+ """
513
+ parts = []
514
+ module = inspect.getmodule(func)
515
+ if module:
516
+ parts.append(module.__name__)
517
+
518
+ qualname = func.__qualname__
519
+ if qualname != func.__name__:
520
+ parts.append(qualname[: qualname.find(".")])
521
+
522
+ parts.append(func.__name__)
523
+ return ".".join(parts)
524
+
525
+
526
+ def check_docstring_parameters(func, doc=None, ignore=None):
527
+ """Helper to check docstring.
528
+
529
+ Parameters
530
+ ----------
531
+ func : callable
532
+ The function object to test.
533
+ doc : str, default=None
534
+ Docstring if it is passed manually to the test.
535
+ ignore : list, default=None
536
+ Parameters to ignore.
537
+
538
+ Returns
539
+ -------
540
+ incorrect : list
541
+ A list of string describing the incorrect results.
542
+ """
543
+ from numpydoc import docscrape
544
+
545
+ incorrect = []
546
+ ignore = [] if ignore is None else ignore
547
+
548
+ func_name = _get_func_name(func)
549
+ if not func_name.startswith("sklearn.") or func_name.startswith(
550
+ "sklearn.externals"
551
+ ):
552
+ return incorrect
553
+ # Don't check docstring for property-functions
554
+ if inspect.isdatadescriptor(func):
555
+ return incorrect
556
+ # Don't check docstring for setup / teardown pytest functions
557
+ if func_name.split(".")[-1] in ("setup_module", "teardown_module"):
558
+ return incorrect
559
+ # Dont check estimator_checks module
560
+ if func_name.split(".")[2] == "estimator_checks":
561
+ return incorrect
562
+ # Get the arguments from the function signature
563
+ param_signature = list(filter(lambda x: x not in ignore, _get_args(func)))
564
+ # drop self
565
+ if len(param_signature) > 0 and param_signature[0] == "self":
566
+ param_signature.remove("self")
567
+
568
+ # Analyze function's docstring
569
+ if doc is None:
570
+ records = []
571
+ with warnings.catch_warnings(record=True):
572
+ warnings.simplefilter("error", UserWarning)
573
+ try:
574
+ doc = docscrape.FunctionDoc(func)
575
+ except UserWarning as exp:
576
+ if "potentially wrong underline length" in str(exp):
577
+ # Catch warning raised as of numpydoc 1.2 when
578
+ # the underline length for a section of a docstring
579
+ # is not consistent.
580
+ message = str(exp).split("\n")[:3]
581
+ incorrect += [f"In function: {func_name}"] + message
582
+ return incorrect
583
+ records.append(str(exp))
584
+ except Exception as exp:
585
+ incorrect += [func_name + " parsing error: " + str(exp)]
586
+ return incorrect
587
+ if len(records):
588
+ raise RuntimeError("Error for %s:\n%s" % (func_name, records[0]))
589
+
590
+ param_docs = []
591
+ for name, type_definition, param_doc in doc["Parameters"]:
592
+ # Type hints are empty only if parameter name ended with :
593
+ if not type_definition.strip():
594
+ if ":" in name and name[: name.index(":")][-1:].strip():
595
+ incorrect += [
596
+ func_name
597
+ + " There was no space between the param name and colon (%r)" % name
598
+ ]
599
+ elif name.rstrip().endswith(":"):
600
+ incorrect += [
601
+ func_name
602
+ + " Parameter %r has an empty type spec. Remove the colon"
603
+ % (name.lstrip())
604
+ ]
605
+
606
+ # Create a list of parameters to compare with the parameters gotten
607
+ # from the func signature
608
+ if "*" not in name:
609
+ param_docs.append(name.split(":")[0].strip("` "))
610
+
611
+ # If one of the docstring's parameters had an error then return that
612
+ # incorrect message
613
+ if len(incorrect) > 0:
614
+ return incorrect
615
+
616
+ # Remove the parameters that should be ignored from list
617
+ param_docs = list(filter(lambda x: x not in ignore, param_docs))
618
+
619
+ # The following is derived from pytest, Copyright (c) 2004-2017 Holger
620
+ # Krekel and others, Licensed under MIT License. See
621
+ # https://github.com/pytest-dev/pytest
622
+
623
+ message = []
624
+ for i in range(min(len(param_docs), len(param_signature))):
625
+ if param_signature[i] != param_docs[i]:
626
+ message += [
627
+ "There's a parameter name mismatch in function"
628
+ " docstring w.r.t. function signature, at index %s"
629
+ " diff: %r != %r" % (i, param_signature[i], param_docs[i])
630
+ ]
631
+ break
632
+ if len(param_signature) > len(param_docs):
633
+ message += [
634
+ "Parameters in function docstring have less items w.r.t."
635
+ " function signature, first missing item: %s"
636
+ % param_signature[len(param_docs)]
637
+ ]
638
+
639
+ elif len(param_signature) < len(param_docs):
640
+ message += [
641
+ "Parameters in function docstring have more items w.r.t."
642
+ " function signature, first extra item: %s"
643
+ % param_docs[len(param_signature)]
644
+ ]
645
+
646
+ # If there wasn't any difference in the parameters themselves between
647
+ # docstring and signature including having the same length then return
648
+ # empty list
649
+ if len(message) == 0:
650
+ return []
651
+
652
+ import difflib
653
+ import pprint
654
+
655
+ param_docs_formatted = pprint.pformat(param_docs).splitlines()
656
+ param_signature_formatted = pprint.pformat(param_signature).splitlines()
657
+
658
+ message += ["Full diff:"]
659
+
660
+ message.extend(
661
+ line.strip()
662
+ for line in difflib.ndiff(param_signature_formatted, param_docs_formatted)
663
+ )
664
+
665
+ incorrect.extend(message)
666
+
667
+ # Prepend function name
668
+ incorrect = ["In function: " + func_name] + incorrect
669
+
670
+ return incorrect
671
+
672
+
673
+ def assert_run_python_script_without_output(source_code, pattern=".+", timeout=60):
674
+ """Utility to check assertions in an independent Python subprocess.
675
+
676
+ The script provided in the source code should return 0 and the stdtout +
677
+ stderr should not match the pattern `pattern`.
678
+
679
+ This is a port from cloudpickle https://github.com/cloudpipe/cloudpickle
680
+
681
+ Parameters
682
+ ----------
683
+ source_code : str
684
+ The Python source code to execute.
685
+ pattern : str
686
+ Pattern that the stdout + stderr should not match. By default, unless
687
+ stdout + stderr are both empty, an error will be raised.
688
+ timeout : int, default=60
689
+ Time in seconds before timeout.
690
+ """
691
+ fd, source_file = tempfile.mkstemp(suffix="_src_test_sklearn.py")
692
+ os.close(fd)
693
+ try:
694
+ with open(source_file, "wb") as f:
695
+ f.write(source_code.encode("utf-8"))
696
+ cmd = [sys.executable, source_file]
697
+ cwd = op.normpath(op.join(op.dirname(sklearn.__file__), ".."))
698
+ env = os.environ.copy()
699
+ try:
700
+ env["PYTHONPATH"] = os.pathsep.join([cwd, env["PYTHONPATH"]])
701
+ except KeyError:
702
+ env["PYTHONPATH"] = cwd
703
+ kwargs = {"cwd": cwd, "stderr": STDOUT, "env": env}
704
+ # If coverage is running, pass the config file to the subprocess
705
+ coverage_rc = os.environ.get("COVERAGE_PROCESS_START")
706
+ if coverage_rc:
707
+ kwargs["env"]["COVERAGE_PROCESS_START"] = coverage_rc
708
+
709
+ kwargs["timeout"] = timeout
710
+ try:
711
+ try:
712
+ out = check_output(cmd, **kwargs)
713
+ except CalledProcessError as e:
714
+ raise RuntimeError(
715
+ "script errored with output:\n%s" % e.output.decode("utf-8")
716
+ )
717
+
718
+ out = out.decode("utf-8")
719
+ if re.search(pattern, out):
720
+ if pattern == ".+":
721
+ expectation = "Expected no output"
722
+ else:
723
+ expectation = f"The output was not supposed to match {pattern!r}"
724
+
725
+ message = f"{expectation}, got the following output instead: {out!r}"
726
+ raise AssertionError(message)
727
+ except TimeoutExpired as e:
728
+ raise RuntimeError(
729
+ "script timeout, output so far:\n%s" % e.output.decode("utf-8")
730
+ )
731
+ finally:
732
+ os.unlink(source_file)
733
+
734
+
735
+ def _convert_container(
736
+ container,
737
+ constructor_name,
738
+ columns_name=None,
739
+ dtype=None,
740
+ minversion=None,
741
+ categorical_feature_names=None,
742
+ ):
743
+ """Convert a given container to a specific array-like with a dtype.
744
+
745
+ Parameters
746
+ ----------
747
+ container : array-like
748
+ The container to convert.
749
+ constructor_name : {"list", "tuple", "array", "sparse", "dataframe", \
750
+ "series", "index", "slice", "sparse_csr", "sparse_csc"}
751
+ The type of the returned container.
752
+ columns_name : index or array-like, default=None
753
+ For pandas container supporting `columns_names`, it will affect
754
+ specific names.
755
+ dtype : dtype, default=None
756
+ Force the dtype of the container. Does not apply to `"slice"`
757
+ container.
758
+ minversion : str, default=None
759
+ Minimum version for package to install.
760
+ categorical_feature_names : list of str, default=None
761
+ List of column names to cast to categorical dtype.
762
+
763
+ Returns
764
+ -------
765
+ converted_container
766
+ """
767
+ if constructor_name == "list":
768
+ if dtype is None:
769
+ return list(container)
770
+ else:
771
+ return np.asarray(container, dtype=dtype).tolist()
772
+ elif constructor_name == "tuple":
773
+ if dtype is None:
774
+ return tuple(container)
775
+ else:
776
+ return tuple(np.asarray(container, dtype=dtype).tolist())
777
+ elif constructor_name == "array":
778
+ return np.asarray(container, dtype=dtype)
779
+ elif constructor_name in ("pandas", "dataframe"):
780
+ pd = pytest.importorskip("pandas", minversion=minversion)
781
+ result = pd.DataFrame(container, columns=columns_name, dtype=dtype, copy=False)
782
+ if categorical_feature_names is not None:
783
+ for col_name in categorical_feature_names:
784
+ result[col_name] = result[col_name].astype("category")
785
+ return result
786
+ elif constructor_name == "pyarrow":
787
+ pa = pytest.importorskip("pyarrow", minversion=minversion)
788
+ array = np.asarray(container)
789
+ if columns_name is None:
790
+ columns_name = [f"col{i}" for i in range(array.shape[1])]
791
+ data = {name: array[:, i] for i, name in enumerate(columns_name)}
792
+ result = pa.Table.from_pydict(data)
793
+ if categorical_feature_names is not None:
794
+ for col_idx, col_name in enumerate(result.column_names):
795
+ if col_name in categorical_feature_names:
796
+ result = result.set_column(
797
+ col_idx, col_name, result.column(col_name).dictionary_encode()
798
+ )
799
+ return result
800
+ elif constructor_name == "polars":
801
+ pl = pytest.importorskip("polars", minversion=minversion)
802
+ result = pl.DataFrame(container, schema=columns_name, orient="row")
803
+ if categorical_feature_names is not None:
804
+ for col_name in categorical_feature_names:
805
+ result = result.with_columns(pl.col(col_name).cast(pl.Categorical))
806
+ return result
807
+ elif constructor_name == "series":
808
+ pd = pytest.importorskip("pandas", minversion=minversion)
809
+ return pd.Series(container, dtype=dtype)
810
+ elif constructor_name == "index":
811
+ pd = pytest.importorskip("pandas", minversion=minversion)
812
+ return pd.Index(container, dtype=dtype)
813
+ elif constructor_name == "slice":
814
+ return slice(container[0], container[1])
815
+ elif "sparse" in constructor_name:
816
+ if not sp.sparse.issparse(container):
817
+ # For scipy >= 1.13, sparse array constructed from 1d array may be
818
+ # 1d or raise an exception. To avoid this, we make sure that the
819
+ # input container is 2d. For more details, see
820
+ # https://github.com/scipy/scipy/pull/18530#issuecomment-1878005149
821
+ container = np.atleast_2d(container)
822
+
823
+ if "array" in constructor_name and sp_version < parse_version("1.8"):
824
+ raise ValueError(
825
+ f"{constructor_name} is only available with scipy>=1.8.0, got "
826
+ f"{sp_version}"
827
+ )
828
+ if constructor_name in ("sparse", "sparse_csr"):
829
+ # sparse and sparse_csr are equivalent for legacy reasons
830
+ return sp.sparse.csr_matrix(container, dtype=dtype)
831
+ elif constructor_name == "sparse_csr_array":
832
+ return sp.sparse.csr_array(container, dtype=dtype)
833
+ elif constructor_name == "sparse_csc":
834
+ return sp.sparse.csc_matrix(container, dtype=dtype)
835
+ elif constructor_name == "sparse_csc_array":
836
+ return sp.sparse.csc_array(container, dtype=dtype)
837
+
838
+
839
+ def raises(expected_exc_type, match=None, may_pass=False, err_msg=None):
840
+ """Context manager to ensure exceptions are raised within a code block.
841
+
842
+ This is similar to and inspired from pytest.raises, but supports a few
843
+ other cases.
844
+
845
+ This is only intended to be used in estimator_checks.py where we don't
846
+ want to use pytest. In the rest of the code base, just use pytest.raises
847
+ instead.
848
+
849
+ Parameters
850
+ ----------
851
+ excepted_exc_type : Exception or list of Exception
852
+ The exception that should be raised by the block. If a list, the block
853
+ should raise one of the exceptions.
854
+ match : str or list of str, default=None
855
+ A regex that the exception message should match. If a list, one of
856
+ the entries must match. If None, match isn't enforced.
857
+ may_pass : bool, default=False
858
+ If True, the block is allowed to not raise an exception. Useful in
859
+ cases where some estimators may support a feature but others must
860
+ fail with an appropriate error message. By default, the context
861
+ manager will raise an exception if the block does not raise an
862
+ exception.
863
+ err_msg : str, default=None
864
+ If the context manager fails (e.g. the block fails to raise the
865
+ proper exception, or fails to match), then an AssertionError is
866
+ raised with this message. By default, an AssertionError is raised
867
+ with a default error message (depends on the kind of failure). Use
868
+ this to indicate how users should fix their estimators to pass the
869
+ checks.
870
+
871
+ Attributes
872
+ ----------
873
+ raised_and_matched : bool
874
+ True if an exception was raised and a match was found, False otherwise.
875
+ """
876
+ return _Raises(expected_exc_type, match, may_pass, err_msg)
877
+
878
+
879
+ class _Raises(contextlib.AbstractContextManager):
880
+ # see raises() for parameters
881
+ def __init__(self, expected_exc_type, match, may_pass, err_msg):
882
+ self.expected_exc_types = (
883
+ expected_exc_type
884
+ if isinstance(expected_exc_type, Iterable)
885
+ else [expected_exc_type]
886
+ )
887
+ self.matches = [match] if isinstance(match, str) else match
888
+ self.may_pass = may_pass
889
+ self.err_msg = err_msg
890
+ self.raised_and_matched = False
891
+
892
+ def __exit__(self, exc_type, exc_value, _):
893
+ # see
894
+ # https://docs.python.org/2.5/whatsnew/pep-343.html#SECTION000910000000000000000
895
+
896
+ if exc_type is None: # No exception was raised in the block
897
+ if self.may_pass:
898
+ return True # CM is happy
899
+ else:
900
+ err_msg = self.err_msg or f"Did not raise: {self.expected_exc_types}"
901
+ raise AssertionError(err_msg)
902
+
903
+ if not any(
904
+ issubclass(exc_type, expected_type)
905
+ for expected_type in self.expected_exc_types
906
+ ):
907
+ if self.err_msg is not None:
908
+ raise AssertionError(self.err_msg) from exc_value
909
+ else:
910
+ return False # will re-raise the original exception
911
+
912
+ if self.matches is not None:
913
+ err_msg = self.err_msg or (
914
+ "The error message should contain one of the following "
915
+ "patterns:\n{}\nGot {}".format("\n".join(self.matches), str(exc_value))
916
+ )
917
+ if not any(re.search(match, str(exc_value)) for match in self.matches):
918
+ raise AssertionError(err_msg) from exc_value
919
+ self.raised_and_matched = True
920
+
921
+ return True
922
+
923
+
924
+ class MinimalClassifier:
925
+ """Minimal classifier implementation with inheriting from BaseEstimator.
926
+
927
+ This estimator should be tested with:
928
+
929
+ * `check_estimator` in `test_estimator_checks.py`;
930
+ * within a `Pipeline` in `test_pipeline.py`;
931
+ * within a `SearchCV` in `test_search.py`.
932
+ """
933
+
934
+ _estimator_type = "classifier"
935
+
936
+ def __init__(self, param=None):
937
+ self.param = param
938
+
939
+ def get_params(self, deep=True):
940
+ return {"param": self.param}
941
+
942
+ def set_params(self, **params):
943
+ for key, value in params.items():
944
+ setattr(self, key, value)
945
+ return self
946
+
947
+ def fit(self, X, y):
948
+ X, y = check_X_y(X, y)
949
+ check_classification_targets(y)
950
+ self.classes_, counts = np.unique(y, return_counts=True)
951
+ self._most_frequent_class_idx = counts.argmax()
952
+ return self
953
+
954
+ def predict_proba(self, X):
955
+ check_is_fitted(self)
956
+ X = check_array(X)
957
+ proba_shape = (X.shape[0], self.classes_.size)
958
+ y_proba = np.zeros(shape=proba_shape, dtype=np.float64)
959
+ y_proba[:, self._most_frequent_class_idx] = 1.0
960
+ return y_proba
961
+
962
+ def predict(self, X):
963
+ y_proba = self.predict_proba(X)
964
+ y_pred = y_proba.argmax(axis=1)
965
+ return self.classes_[y_pred]
966
+
967
+ def score(self, X, y):
968
+ from sklearn.metrics import accuracy_score
969
+
970
+ return accuracy_score(y, self.predict(X))
971
+
972
+
973
+ class MinimalRegressor:
974
+ """Minimal regressor implementation with inheriting from BaseEstimator.
975
+
976
+ This estimator should be tested with:
977
+
978
+ * `check_estimator` in `test_estimator_checks.py`;
979
+ * within a `Pipeline` in `test_pipeline.py`;
980
+ * within a `SearchCV` in `test_search.py`.
981
+ """
982
+
983
+ _estimator_type = "regressor"
984
+
985
+ def __init__(self, param=None):
986
+ self.param = param
987
+
988
+ def get_params(self, deep=True):
989
+ return {"param": self.param}
990
+
991
+ def set_params(self, **params):
992
+ for key, value in params.items():
993
+ setattr(self, key, value)
994
+ return self
995
+
996
+ def fit(self, X, y):
997
+ X, y = check_X_y(X, y)
998
+ self.is_fitted_ = True
999
+ self._mean = np.mean(y)
1000
+ return self
1001
+
1002
+ def predict(self, X):
1003
+ check_is_fitted(self)
1004
+ X = check_array(X)
1005
+ return np.ones(shape=(X.shape[0],)) * self._mean
1006
+
1007
+ def score(self, X, y):
1008
+ from sklearn.metrics import r2_score
1009
+
1010
+ return r2_score(y, self.predict(X))
1011
+
1012
+
1013
+ class MinimalTransformer:
1014
+ """Minimal transformer implementation with inheriting from
1015
+ BaseEstimator.
1016
+
1017
+ This estimator should be tested with:
1018
+
1019
+ * `check_estimator` in `test_estimator_checks.py`;
1020
+ * within a `Pipeline` in `test_pipeline.py`;
1021
+ * within a `SearchCV` in `test_search.py`.
1022
+ """
1023
+
1024
+ def __init__(self, param=None):
1025
+ self.param = param
1026
+
1027
+ def get_params(self, deep=True):
1028
+ return {"param": self.param}
1029
+
1030
+ def set_params(self, **params):
1031
+ for key, value in params.items():
1032
+ setattr(self, key, value)
1033
+ return self
1034
+
1035
+ def fit(self, X, y=None):
1036
+ check_array(X)
1037
+ self.is_fitted_ = True
1038
+ return self
1039
+
1040
+ def transform(self, X, y=None):
1041
+ check_is_fitted(self)
1042
+ X = check_array(X)
1043
+ return X
1044
+
1045
+ def fit_transform(self, X, y=None):
1046
+ return self.fit(X, y).transform(X, y)
1047
+
1048
+
1049
+ def _array_api_for_tests(array_namespace, device):
1050
+ try:
1051
+ if array_namespace == "numpy.array_api":
1052
+ # FIXME: once it is not experimental anymore
1053
+ with ignore_warnings(category=UserWarning):
1054
+ # UserWarning: numpy.array_api submodule is still experimental.
1055
+ array_mod = importlib.import_module(array_namespace)
1056
+ else:
1057
+ array_mod = importlib.import_module(array_namespace)
1058
+ except ModuleNotFoundError:
1059
+ raise SkipTest(
1060
+ f"{array_namespace} is not installed: not checking array_api input"
1061
+ )
1062
+ try:
1063
+ import array_api_compat # noqa
1064
+ except ImportError:
1065
+ raise SkipTest(
1066
+ "array_api_compat is not installed: not checking array_api input"
1067
+ )
1068
+
1069
+ # First create an array using the chosen array module and then get the
1070
+ # corresponding (compatibility wrapped) array namespace based on it.
1071
+ # This is because `cupy` is not the same as the compatibility wrapped
1072
+ # namespace of a CuPy array.
1073
+ xp = array_api_compat.get_namespace(array_mod.asarray(1))
1074
+ if (
1075
+ array_namespace == "torch"
1076
+ and device == "cuda"
1077
+ and not xp.backends.cuda.is_built()
1078
+ ):
1079
+ raise SkipTest("PyTorch test requires cuda, which is not available")
1080
+ elif array_namespace == "torch" and device == "mps":
1081
+ if os.getenv("PYTORCH_ENABLE_MPS_FALLBACK") != "1":
1082
+ # For now we need PYTORCH_ENABLE_MPS_FALLBACK=1 for all estimators to work
1083
+ # when using the MPS device.
1084
+ raise SkipTest(
1085
+ "Skipping MPS device test because PYTORCH_ENABLE_MPS_FALLBACK is not "
1086
+ "set."
1087
+ )
1088
+ if not xp.backends.mps.is_built():
1089
+ raise SkipTest(
1090
+ "MPS is not available because the current PyTorch install was not "
1091
+ "built with MPS enabled."
1092
+ )
1093
+ elif array_namespace in {"cupy", "cupy.array_api"}: # pragma: nocover
1094
+ import cupy
1095
+
1096
+ if cupy.cuda.runtime.getDeviceCount() == 0:
1097
+ raise SkipTest("CuPy test requires cuda, which is not available")
1098
+ return xp
1099
+
1100
+
1101
+ def _get_warnings_filters_info_list():
1102
+ @dataclass
1103
+ class WarningInfo:
1104
+ action: "warnings._ActionKind"
1105
+ message: str = ""
1106
+ category: type[Warning] = Warning
1107
+
1108
+ def to_filterwarning_str(self):
1109
+ if self.category.__module__ == "builtins":
1110
+ category = self.category.__name__
1111
+ else:
1112
+ category = f"{self.category.__module__}.{self.category.__name__}"
1113
+
1114
+ return f"{self.action}:{self.message}:{category}"
1115
+
1116
+ return [
1117
+ WarningInfo("error", category=DeprecationWarning),
1118
+ WarningInfo("error", category=FutureWarning),
1119
+ WarningInfo("error", category=VisibleDeprecationWarning),
1120
+ # TODO: remove when pyamg > 5.0.1
1121
+ # Avoid a deprecation warning due pkg_resources usage in pyamg.
1122
+ WarningInfo(
1123
+ "ignore",
1124
+ message="pkg_resources is deprecated as an API",
1125
+ category=DeprecationWarning,
1126
+ ),
1127
+ WarningInfo(
1128
+ "ignore",
1129
+ message="Deprecated call to `pkg_resources",
1130
+ category=DeprecationWarning,
1131
+ ),
1132
+ # pytest-cov issue https://github.com/pytest-dev/pytest-cov/issues/557 not
1133
+ # fixed although it has been closed. https://github.com/pytest-dev/pytest-cov/pull/623
1134
+ # would probably fix it.
1135
+ WarningInfo(
1136
+ "ignore",
1137
+ message=(
1138
+ "The --rsyncdir command line argument and rsyncdirs config variable are"
1139
+ " deprecated"
1140
+ ),
1141
+ category=DeprecationWarning,
1142
+ ),
1143
+ # XXX: Easiest way to ignore pandas Pyarrow DeprecationWarning in the
1144
+ # short-term. See https://github.com/pandas-dev/pandas/issues/54466 for
1145
+ # more details.
1146
+ WarningInfo(
1147
+ "ignore",
1148
+ message=r"\s*Pyarrow will become a required dependency",
1149
+ category=DeprecationWarning,
1150
+ ),
1151
+ ]
1152
+
1153
+
1154
+ def get_pytest_filterwarning_lines():
1155
+ warning_filters_info_list = _get_warnings_filters_info_list()
1156
+ return [
1157
+ warning_info.to_filterwarning_str()
1158
+ for warning_info in warning_filters_info_list
1159
+ ]
1160
+
1161
+
1162
+ def turn_warnings_into_errors():
1163
+ warnings_filters_info_list = _get_warnings_filters_info_list()
1164
+ for warning_info in warnings_filters_info_list:
1165
+ warnings.filterwarnings(
1166
+ warning_info.action,
1167
+ message=warning_info.message,
1168
+ category=warning_info.category,
1169
+ )
env-llmeval/lib/python3.10/site-packages/sklearn/utils/_weight_vector.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (208 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/utils/deprecation.py ADDED
@@ -0,0 +1,116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import functools
2
+ import warnings
3
+
4
+ __all__ = ["deprecated"]
5
+
6
+
7
+ class deprecated:
8
+ """Decorator to mark a function or class as deprecated.
9
+
10
+ Issue a warning when the function is called/the class is instantiated and
11
+ adds a warning to the docstring.
12
+
13
+ The optional extra argument will be appended to the deprecation message
14
+ and the docstring. Note: to use this with the default value for extra, put
15
+ in an empty of parentheses:
16
+
17
+ Examples
18
+ --------
19
+ >>> from sklearn.utils import deprecated
20
+ >>> deprecated()
21
+ <sklearn.utils.deprecation.deprecated object at ...>
22
+ >>> @deprecated()
23
+ ... def some_function(): pass
24
+
25
+ Parameters
26
+ ----------
27
+ extra : str, default=''
28
+ To be added to the deprecation messages.
29
+ """
30
+
31
+ # Adapted from https://wiki.python.org/moin/PythonDecoratorLibrary,
32
+ # but with many changes.
33
+
34
+ def __init__(self, extra=""):
35
+ self.extra = extra
36
+
37
+ def __call__(self, obj):
38
+ """Call method
39
+
40
+ Parameters
41
+ ----------
42
+ obj : object
43
+ """
44
+ if isinstance(obj, type):
45
+ return self._decorate_class(obj)
46
+ elif isinstance(obj, property):
47
+ # Note that this is only triggered properly if the `property`
48
+ # decorator comes before the `deprecated` decorator, like so:
49
+ #
50
+ # @deprecated(msg)
51
+ # @property
52
+ # def deprecated_attribute_(self):
53
+ # ...
54
+ return self._decorate_property(obj)
55
+ else:
56
+ return self._decorate_fun(obj)
57
+
58
+ def _decorate_class(self, cls):
59
+ msg = "Class %s is deprecated" % cls.__name__
60
+ if self.extra:
61
+ msg += "; %s" % self.extra
62
+
63
+ new = cls.__new__
64
+
65
+ def wrapped(cls, *args, **kwargs):
66
+ warnings.warn(msg, category=FutureWarning)
67
+ if new is object.__new__:
68
+ return object.__new__(cls)
69
+ return new(cls, *args, **kwargs)
70
+
71
+ cls.__new__ = wrapped
72
+
73
+ wrapped.__name__ = "__new__"
74
+ wrapped.deprecated_original = new
75
+
76
+ return cls
77
+
78
+ def _decorate_fun(self, fun):
79
+ """Decorate function fun"""
80
+
81
+ msg = "Function %s is deprecated" % fun.__name__
82
+ if self.extra:
83
+ msg += "; %s" % self.extra
84
+
85
+ @functools.wraps(fun)
86
+ def wrapped(*args, **kwargs):
87
+ warnings.warn(msg, category=FutureWarning)
88
+ return fun(*args, **kwargs)
89
+
90
+ # Add a reference to the wrapped function so that we can introspect
91
+ # on function arguments in Python 2 (already works in Python 3)
92
+ wrapped.__wrapped__ = fun
93
+
94
+ return wrapped
95
+
96
+ def _decorate_property(self, prop):
97
+ msg = self.extra
98
+
99
+ @property
100
+ @functools.wraps(prop)
101
+ def wrapped(*args, **kwargs):
102
+ warnings.warn(msg, category=FutureWarning)
103
+ return prop.fget(*args, **kwargs)
104
+
105
+ return wrapped
106
+
107
+
108
+ def _is_deprecated(func):
109
+ """Helper to check if func is wrapped by our deprecated decorator"""
110
+ closures = getattr(func, "__closure__", [])
111
+ if closures is None:
112
+ closures = []
113
+ is_deprecated = "deprecated" in "".join(
114
+ [c.cell_contents for c in closures if isinstance(c.cell_contents, str)]
115
+ )
116
+ return is_deprecated
env-llmeval/lib/python3.10/site-packages/sklearn/utils/discovery.py ADDED
@@ -0,0 +1,265 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ The :mod:`sklearn.utils.discovery` module includes utilities to discover
3
+ objects (i.e. estimators, displays, functions) from the `sklearn` package.
4
+ """
5
+
6
+ import inspect
7
+ import pkgutil
8
+ from importlib import import_module
9
+ from operator import itemgetter
10
+ from pathlib import Path
11
+
12
+ _MODULE_TO_IGNORE = {
13
+ "tests",
14
+ "externals",
15
+ "setup",
16
+ "conftest",
17
+ "experimental",
18
+ "estimator_checks",
19
+ }
20
+
21
+
22
+ def all_estimators(type_filter=None):
23
+ """Get a list of all estimators from `sklearn`.
24
+
25
+ This function crawls the module and gets all classes that inherit
26
+ from BaseEstimator. Classes that are defined in test-modules are not
27
+ included.
28
+
29
+ Parameters
30
+ ----------
31
+ type_filter : {"classifier", "regressor", "cluster", "transformer"} \
32
+ or list of such str, default=None
33
+ Which kind of estimators should be returned. If None, no filter is
34
+ applied and all estimators are returned. Possible values are
35
+ 'classifier', 'regressor', 'cluster' and 'transformer' to get
36
+ estimators only of these specific types, or a list of these to
37
+ get the estimators that fit at least one of the types.
38
+
39
+ Returns
40
+ -------
41
+ estimators : list of tuples
42
+ List of (name, class), where ``name`` is the class name as string
43
+ and ``class`` is the actual type of the class.
44
+
45
+ Examples
46
+ --------
47
+ >>> from sklearn.utils.discovery import all_estimators
48
+ >>> estimators = all_estimators()
49
+ >>> type(estimators)
50
+ <class 'list'>
51
+ >>> type(estimators[0])
52
+ <class 'tuple'>
53
+ >>> estimators[:2]
54
+ [('ARDRegression', <class 'sklearn.linear_model._bayes.ARDRegression'>),
55
+ ('AdaBoostClassifier',
56
+ <class 'sklearn.ensemble._weight_boosting.AdaBoostClassifier'>)]
57
+ >>> classifiers = all_estimators(type_filter="classifier")
58
+ >>> classifiers[:2]
59
+ [('AdaBoostClassifier',
60
+ <class 'sklearn.ensemble._weight_boosting.AdaBoostClassifier'>),
61
+ ('BaggingClassifier', <class 'sklearn.ensemble._bagging.BaggingClassifier'>)]
62
+ >>> regressors = all_estimators(type_filter="regressor")
63
+ >>> regressors[:2]
64
+ [('ARDRegression', <class 'sklearn.linear_model._bayes.ARDRegression'>),
65
+ ('AdaBoostRegressor',
66
+ <class 'sklearn.ensemble._weight_boosting.AdaBoostRegressor'>)]
67
+ >>> both = all_estimators(type_filter=["classifier", "regressor"])
68
+ >>> both[:2]
69
+ [('ARDRegression', <class 'sklearn.linear_model._bayes.ARDRegression'>),
70
+ ('AdaBoostClassifier',
71
+ <class 'sklearn.ensemble._weight_boosting.AdaBoostClassifier'>)]
72
+ """
73
+ # lazy import to avoid circular imports from sklearn.base
74
+ from ..base import (
75
+ BaseEstimator,
76
+ ClassifierMixin,
77
+ ClusterMixin,
78
+ RegressorMixin,
79
+ TransformerMixin,
80
+ )
81
+ from . import IS_PYPY
82
+ from ._testing import ignore_warnings
83
+
84
+ def is_abstract(c):
85
+ if not (hasattr(c, "__abstractmethods__")):
86
+ return False
87
+ if not len(c.__abstractmethods__):
88
+ return False
89
+ return True
90
+
91
+ all_classes = []
92
+ root = str(Path(__file__).parent.parent) # sklearn package
93
+ # Ignore deprecation warnings triggered at import time and from walking
94
+ # packages
95
+ with ignore_warnings(category=FutureWarning):
96
+ for _, module_name, _ in pkgutil.walk_packages(path=[root], prefix="sklearn."):
97
+ module_parts = module_name.split(".")
98
+ if (
99
+ any(part in _MODULE_TO_IGNORE for part in module_parts)
100
+ or "._" in module_name
101
+ ):
102
+ continue
103
+ module = import_module(module_name)
104
+ classes = inspect.getmembers(module, inspect.isclass)
105
+ classes = [
106
+ (name, est_cls) for name, est_cls in classes if not name.startswith("_")
107
+ ]
108
+
109
+ # TODO: Remove when FeatureHasher is implemented in PYPY
110
+ # Skips FeatureHasher for PYPY
111
+ if IS_PYPY and "feature_extraction" in module_name:
112
+ classes = [
113
+ (name, est_cls)
114
+ for name, est_cls in classes
115
+ if name == "FeatureHasher"
116
+ ]
117
+
118
+ all_classes.extend(classes)
119
+
120
+ all_classes = set(all_classes)
121
+
122
+ estimators = [
123
+ c
124
+ for c in all_classes
125
+ if (issubclass(c[1], BaseEstimator) and c[0] != "BaseEstimator")
126
+ ]
127
+ # get rid of abstract base classes
128
+ estimators = [c for c in estimators if not is_abstract(c[1])]
129
+
130
+ if type_filter is not None:
131
+ if not isinstance(type_filter, list):
132
+ type_filter = [type_filter]
133
+ else:
134
+ type_filter = list(type_filter) # copy
135
+ filtered_estimators = []
136
+ filters = {
137
+ "classifier": ClassifierMixin,
138
+ "regressor": RegressorMixin,
139
+ "transformer": TransformerMixin,
140
+ "cluster": ClusterMixin,
141
+ }
142
+ for name, mixin in filters.items():
143
+ if name in type_filter:
144
+ type_filter.remove(name)
145
+ filtered_estimators.extend(
146
+ [est for est in estimators if issubclass(est[1], mixin)]
147
+ )
148
+ estimators = filtered_estimators
149
+ if type_filter:
150
+ raise ValueError(
151
+ "Parameter type_filter must be 'classifier', "
152
+ "'regressor', 'transformer', 'cluster' or "
153
+ "None, got"
154
+ f" {repr(type_filter)}."
155
+ )
156
+
157
+ # drop duplicates, sort for reproducibility
158
+ # itemgetter is used to ensure the sort does not extend to the 2nd item of
159
+ # the tuple
160
+ return sorted(set(estimators), key=itemgetter(0))
161
+
162
+
163
+ def all_displays():
164
+ """Get a list of all displays from `sklearn`.
165
+
166
+ Returns
167
+ -------
168
+ displays : list of tuples
169
+ List of (name, class), where ``name`` is the display class name as
170
+ string and ``class`` is the actual type of the class.
171
+
172
+ Examples
173
+ --------
174
+ >>> from sklearn.utils.discovery import all_displays
175
+ >>> displays = all_displays()
176
+ >>> displays[0]
177
+ ('CalibrationDisplay', <class 'sklearn.calibration.CalibrationDisplay'>)
178
+ """
179
+ # lazy import to avoid circular imports from sklearn.base
180
+ from ._testing import ignore_warnings
181
+
182
+ all_classes = []
183
+ root = str(Path(__file__).parent.parent) # sklearn package
184
+ # Ignore deprecation warnings triggered at import time and from walking
185
+ # packages
186
+ with ignore_warnings(category=FutureWarning):
187
+ for _, module_name, _ in pkgutil.walk_packages(path=[root], prefix="sklearn."):
188
+ module_parts = module_name.split(".")
189
+ if (
190
+ any(part in _MODULE_TO_IGNORE for part in module_parts)
191
+ or "._" in module_name
192
+ ):
193
+ continue
194
+ module = import_module(module_name)
195
+ classes = inspect.getmembers(module, inspect.isclass)
196
+ classes = [
197
+ (name, display_class)
198
+ for name, display_class in classes
199
+ if not name.startswith("_") and name.endswith("Display")
200
+ ]
201
+ all_classes.extend(classes)
202
+
203
+ return sorted(set(all_classes), key=itemgetter(0))
204
+
205
+
206
+ def _is_checked_function(item):
207
+ if not inspect.isfunction(item):
208
+ return False
209
+
210
+ if item.__name__.startswith("_"):
211
+ return False
212
+
213
+ mod = item.__module__
214
+ if not mod.startswith("sklearn.") or mod.endswith("estimator_checks"):
215
+ return False
216
+
217
+ return True
218
+
219
+
220
+ def all_functions():
221
+ """Get a list of all functions from `sklearn`.
222
+
223
+ Returns
224
+ -------
225
+ functions : list of tuples
226
+ List of (name, function), where ``name`` is the function name as
227
+ string and ``function`` is the actual function.
228
+
229
+ Examples
230
+ --------
231
+ >>> from sklearn.utils.discovery import all_functions
232
+ >>> functions = all_functions()
233
+ >>> name, function = functions[0]
234
+ >>> name
235
+ 'accuracy_score'
236
+ """
237
+ # lazy import to avoid circular imports from sklearn.base
238
+ from ._testing import ignore_warnings
239
+
240
+ all_functions = []
241
+ root = str(Path(__file__).parent.parent) # sklearn package
242
+ # Ignore deprecation warnings triggered at import time and from walking
243
+ # packages
244
+ with ignore_warnings(category=FutureWarning):
245
+ for _, module_name, _ in pkgutil.walk_packages(path=[root], prefix="sklearn."):
246
+ module_parts = module_name.split(".")
247
+ if (
248
+ any(part in _MODULE_TO_IGNORE for part in module_parts)
249
+ or "._" in module_name
250
+ ):
251
+ continue
252
+
253
+ module = import_module(module_name)
254
+ functions = inspect.getmembers(module, _is_checked_function)
255
+ functions = [
256
+ (func.__name__, func)
257
+ for name, func in functions
258
+ if not name.startswith("_")
259
+ ]
260
+ all_functions.extend(functions)
261
+
262
+ # drop duplicates, sort for reproducibility
263
+ # itemgetter is used to ensure the sort does not extend to the 2nd item of
264
+ # the tuple
265
+ return sorted(set(all_functions), key=itemgetter(0))