applied-ai-018 commited on
Commit
b0b566c
·
verified ·
1 Parent(s): fb5a1b4

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. env-llmeval/lib/python3.10/site-packages/pandas/__pycache__/__init__.cpython-310.pyc +0 -0
  2. env-llmeval/lib/python3.10/site-packages/pandas/__pycache__/_typing.cpython-310.pyc +0 -0
  3. env-llmeval/lib/python3.10/site-packages/pandas/__pycache__/_version.cpython-310.pyc +0 -0
  4. env-llmeval/lib/python3.10/site-packages/pandas/__pycache__/_version_meson.cpython-310.pyc +0 -0
  5. env-llmeval/lib/python3.10/site-packages/pandas/__pycache__/conftest.cpython-310.pyc +0 -0
  6. env-llmeval/lib/python3.10/site-packages/pandas/__pycache__/testing.cpython-310.pyc +0 -0
  7. env-llmeval/lib/python3.10/site-packages/pandas/_libs/__init__.py +27 -0
  8. env-llmeval/lib/python3.10/site-packages/pandas/_libs/__pycache__/__init__.cpython-310.pyc +0 -0
  9. env-llmeval/lib/python3.10/site-packages/pandas/_libs/algos.pyi +416 -0
  10. env-llmeval/lib/python3.10/site-packages/pandas/_libs/arrays.pyi +40 -0
  11. env-llmeval/lib/python3.10/site-packages/pandas/_libs/byteswap.cpython-310-x86_64-linux-gnu.so +0 -0
  12. env-llmeval/lib/python3.10/site-packages/pandas/_libs/groupby.pyi +216 -0
  13. env-llmeval/lib/python3.10/site-packages/pandas/_libs/hashing.pyi +9 -0
  14. env-llmeval/lib/python3.10/site-packages/pandas/_libs/index.cpython-310-x86_64-linux-gnu.so +0 -0
  15. env-llmeval/lib/python3.10/site-packages/pandas/_libs/internals.pyi +94 -0
  16. env-llmeval/lib/python3.10/site-packages/pandas/_libs/join.pyi +79 -0
  17. env-llmeval/lib/python3.10/site-packages/pandas/_libs/json.pyi +23 -0
  18. env-llmeval/lib/python3.10/site-packages/pandas/_libs/lib.cpython-310-x86_64-linux-gnu.so +0 -0
  19. env-llmeval/lib/python3.10/site-packages/pandas/_libs/lib.pyi +213 -0
  20. env-llmeval/lib/python3.10/site-packages/pandas/_libs/missing.pyi +16 -0
  21. env-llmeval/lib/python3.10/site-packages/pandas/_libs/ops.cpython-310-x86_64-linux-gnu.so +0 -0
  22. env-llmeval/lib/python3.10/site-packages/pandas/_libs/ops_dispatch.cpython-310-x86_64-linux-gnu.so +0 -0
  23. env-llmeval/lib/python3.10/site-packages/pandas/_libs/pandas_parser.cpython-310-x86_64-linux-gnu.so +0 -0
  24. env-llmeval/lib/python3.10/site-packages/pandas/_libs/properties.cpython-310-x86_64-linux-gnu.so +0 -0
  25. env-llmeval/lib/python3.10/site-packages/pandas/_libs/properties.pyi +27 -0
  26. env-llmeval/lib/python3.10/site-packages/pandas/_libs/reshape.cpython-310-x86_64-linux-gnu.so +0 -0
  27. env-llmeval/lib/python3.10/site-packages/pandas/_libs/reshape.pyi +16 -0
  28. env-llmeval/lib/python3.10/site-packages/pandas/_libs/sas.cpython-310-x86_64-linux-gnu.so +0 -0
  29. env-llmeval/lib/python3.10/site-packages/pandas/_libs/sas.pyi +7 -0
  30. env-llmeval/lib/python3.10/site-packages/pandas/_libs/sparse.cpython-310-x86_64-linux-gnu.so +0 -0
  31. env-llmeval/lib/python3.10/site-packages/pandas/_libs/sparse.pyi +51 -0
  32. env-llmeval/lib/python3.10/site-packages/pandas/_libs/testing.cpython-310-x86_64-linux-gnu.so +0 -0
  33. env-llmeval/lib/python3.10/site-packages/pandas/_libs/testing.pyi +12 -0
  34. env-llmeval/lib/python3.10/site-packages/pandas/_libs/window/__pycache__/__init__.cpython-310.pyc +0 -0
  35. env-llmeval/lib/python3.10/site-packages/pandas/_libs/window/indexers.cpython-310-x86_64-linux-gnu.so +0 -0
  36. env-llmeval/lib/python3.10/site-packages/pandas/_libs/writers.pyi +20 -0
  37. env-llmeval/lib/python3.10/site-packages/pandas/_testing/__init__.py +638 -0
  38. env-llmeval/lib/python3.10/site-packages/pandas/_testing/__pycache__/__init__.cpython-310.pyc +0 -0
  39. env-llmeval/lib/python3.10/site-packages/pandas/_testing/__pycache__/_hypothesis.cpython-310.pyc +0 -0
  40. env-llmeval/lib/python3.10/site-packages/pandas/_testing/__pycache__/_io.cpython-310.pyc +0 -0
  41. env-llmeval/lib/python3.10/site-packages/pandas/_testing/__pycache__/_warnings.cpython-310.pyc +0 -0
  42. env-llmeval/lib/python3.10/site-packages/pandas/_testing/__pycache__/asserters.cpython-310.pyc +0 -0
  43. env-llmeval/lib/python3.10/site-packages/pandas/_testing/__pycache__/compat.cpython-310.pyc +0 -0
  44. env-llmeval/lib/python3.10/site-packages/pandas/_testing/__pycache__/contexts.cpython-310.pyc +0 -0
  45. env-llmeval/lib/python3.10/site-packages/pandas/_testing/_hypothesis.py +93 -0
  46. env-llmeval/lib/python3.10/site-packages/pandas/_testing/_io.py +170 -0
  47. env-llmeval/lib/python3.10/site-packages/pandas/_testing/_warnings.py +232 -0
  48. env-llmeval/lib/python3.10/site-packages/pandas/_testing/asserters.py +1435 -0
  49. env-llmeval/lib/python3.10/site-packages/pandas/_testing/compat.py +29 -0
  50. env-llmeval/lib/python3.10/site-packages/pandas/_testing/contexts.py +257 -0
env-llmeval/lib/python3.10/site-packages/pandas/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (6.96 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/__pycache__/_typing.cpython-310.pyc ADDED
Binary file (11.6 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/__pycache__/_version.cpython-310.pyc ADDED
Binary file (14.5 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/__pycache__/_version_meson.cpython-310.pyc ADDED
Binary file (263 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/__pycache__/conftest.cpython-310.pyc ADDED
Binary file (45.8 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/__pycache__/testing.cpython-310.pyc ADDED
Binary file (419 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/_libs/__init__.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ __all__ = [
2
+ "NaT",
3
+ "NaTType",
4
+ "OutOfBoundsDatetime",
5
+ "Period",
6
+ "Timedelta",
7
+ "Timestamp",
8
+ "iNaT",
9
+ "Interval",
10
+ ]
11
+
12
+
13
+ # Below imports needs to happen first to ensure pandas top level
14
+ # module gets monkeypatched with the pandas_datetime_CAPI
15
+ # see pandas_datetime_exec in pd_datetime.c
16
+ import pandas._libs.pandas_parser # isort: skip # type: ignore[reportUnusedImport]
17
+ import pandas._libs.pandas_datetime # noqa: F401 # isort: skip # type: ignore[reportUnusedImport]
18
+ from pandas._libs.interval import Interval
19
+ from pandas._libs.tslibs import (
20
+ NaT,
21
+ NaTType,
22
+ OutOfBoundsDatetime,
23
+ Period,
24
+ Timedelta,
25
+ Timestamp,
26
+ iNaT,
27
+ )
env-llmeval/lib/python3.10/site-packages/pandas/_libs/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (545 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/_libs/algos.pyi ADDED
@@ -0,0 +1,416 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any
2
+
3
+ import numpy as np
4
+
5
+ from pandas._typing import npt
6
+
7
+ class Infinity:
8
+ def __eq__(self, other) -> bool: ...
9
+ def __ne__(self, other) -> bool: ...
10
+ def __lt__(self, other) -> bool: ...
11
+ def __le__(self, other) -> bool: ...
12
+ def __gt__(self, other) -> bool: ...
13
+ def __ge__(self, other) -> bool: ...
14
+
15
+ class NegInfinity:
16
+ def __eq__(self, other) -> bool: ...
17
+ def __ne__(self, other) -> bool: ...
18
+ def __lt__(self, other) -> bool: ...
19
+ def __le__(self, other) -> bool: ...
20
+ def __gt__(self, other) -> bool: ...
21
+ def __ge__(self, other) -> bool: ...
22
+
23
+ def unique_deltas(
24
+ arr: np.ndarray, # const int64_t[:]
25
+ ) -> np.ndarray: ... # np.ndarray[np.int64, ndim=1]
26
+ def is_lexsorted(list_of_arrays: list[npt.NDArray[np.int64]]) -> bool: ...
27
+ def groupsort_indexer(
28
+ index: np.ndarray, # const int64_t[:]
29
+ ngroups: int,
30
+ ) -> tuple[
31
+ np.ndarray, # ndarray[int64_t, ndim=1]
32
+ np.ndarray, # ndarray[int64_t, ndim=1]
33
+ ]: ...
34
+ def kth_smallest(
35
+ arr: np.ndarray, # numeric[:]
36
+ k: int,
37
+ ) -> Any: ... # numeric
38
+
39
+ # ----------------------------------------------------------------------
40
+ # Pairwise correlation/covariance
41
+
42
+ def nancorr(
43
+ mat: npt.NDArray[np.float64], # const float64_t[:, :]
44
+ cov: bool = ...,
45
+ minp: int | None = ...,
46
+ ) -> npt.NDArray[np.float64]: ... # ndarray[float64_t, ndim=2]
47
+ def nancorr_spearman(
48
+ mat: npt.NDArray[np.float64], # ndarray[float64_t, ndim=2]
49
+ minp: int = ...,
50
+ ) -> npt.NDArray[np.float64]: ... # ndarray[float64_t, ndim=2]
51
+
52
+ # ----------------------------------------------------------------------
53
+
54
+ def validate_limit(nobs: int | None, limit=...) -> int: ...
55
+ def get_fill_indexer(
56
+ mask: npt.NDArray[np.bool_],
57
+ limit: int | None = None,
58
+ ) -> npt.NDArray[np.intp]: ...
59
+ def pad(
60
+ old: np.ndarray, # ndarray[numeric_object_t]
61
+ new: np.ndarray, # ndarray[numeric_object_t]
62
+ limit=...,
63
+ ) -> npt.NDArray[np.intp]: ... # np.ndarray[np.intp, ndim=1]
64
+ def pad_inplace(
65
+ values: np.ndarray, # numeric_object_t[:]
66
+ mask: np.ndarray, # uint8_t[:]
67
+ limit=...,
68
+ ) -> None: ...
69
+ def pad_2d_inplace(
70
+ values: np.ndarray, # numeric_object_t[:, :]
71
+ mask: np.ndarray, # const uint8_t[:, :]
72
+ limit=...,
73
+ ) -> None: ...
74
+ def backfill(
75
+ old: np.ndarray, # ndarray[numeric_object_t]
76
+ new: np.ndarray, # ndarray[numeric_object_t]
77
+ limit=...,
78
+ ) -> npt.NDArray[np.intp]: ... # np.ndarray[np.intp, ndim=1]
79
+ def backfill_inplace(
80
+ values: np.ndarray, # numeric_object_t[:]
81
+ mask: np.ndarray, # uint8_t[:]
82
+ limit=...,
83
+ ) -> None: ...
84
+ def backfill_2d_inplace(
85
+ values: np.ndarray, # numeric_object_t[:, :]
86
+ mask: np.ndarray, # const uint8_t[:, :]
87
+ limit=...,
88
+ ) -> None: ...
89
+ def is_monotonic(
90
+ arr: np.ndarray, # ndarray[numeric_object_t, ndim=1]
91
+ timelike: bool,
92
+ ) -> tuple[bool, bool, bool]: ...
93
+
94
+ # ----------------------------------------------------------------------
95
+ # rank_1d, rank_2d
96
+ # ----------------------------------------------------------------------
97
+
98
+ def rank_1d(
99
+ values: np.ndarray, # ndarray[numeric_object_t, ndim=1]
100
+ labels: np.ndarray | None = ..., # const int64_t[:]=None
101
+ is_datetimelike: bool = ...,
102
+ ties_method=...,
103
+ ascending: bool = ...,
104
+ pct: bool = ...,
105
+ na_option=...,
106
+ mask: npt.NDArray[np.bool_] | None = ...,
107
+ ) -> np.ndarray: ... # np.ndarray[float64_t, ndim=1]
108
+ def rank_2d(
109
+ in_arr: np.ndarray, # ndarray[numeric_object_t, ndim=2]
110
+ axis: int = ...,
111
+ is_datetimelike: bool = ...,
112
+ ties_method=...,
113
+ ascending: bool = ...,
114
+ na_option=...,
115
+ pct: bool = ...,
116
+ ) -> np.ndarray: ... # np.ndarray[float64_t, ndim=1]
117
+ def diff_2d(
118
+ arr: np.ndarray, # ndarray[diff_t, ndim=2]
119
+ out: np.ndarray, # ndarray[out_t, ndim=2]
120
+ periods: int,
121
+ axis: int,
122
+ datetimelike: bool = ...,
123
+ ) -> None: ...
124
+ def ensure_platform_int(arr: object) -> npt.NDArray[np.intp]: ...
125
+ def ensure_object(arr: object) -> npt.NDArray[np.object_]: ...
126
+ def ensure_float64(arr: object) -> npt.NDArray[np.float64]: ...
127
+ def ensure_int8(arr: object) -> npt.NDArray[np.int8]: ...
128
+ def ensure_int16(arr: object) -> npt.NDArray[np.int16]: ...
129
+ def ensure_int32(arr: object) -> npt.NDArray[np.int32]: ...
130
+ def ensure_int64(arr: object) -> npt.NDArray[np.int64]: ...
131
+ def ensure_uint64(arr: object) -> npt.NDArray[np.uint64]: ...
132
+ def take_1d_int8_int8(
133
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
134
+ ) -> None: ...
135
+ def take_1d_int8_int32(
136
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
137
+ ) -> None: ...
138
+ def take_1d_int8_int64(
139
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
140
+ ) -> None: ...
141
+ def take_1d_int8_float64(
142
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
143
+ ) -> None: ...
144
+ def take_1d_int16_int16(
145
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
146
+ ) -> None: ...
147
+ def take_1d_int16_int32(
148
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
149
+ ) -> None: ...
150
+ def take_1d_int16_int64(
151
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
152
+ ) -> None: ...
153
+ def take_1d_int16_float64(
154
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
155
+ ) -> None: ...
156
+ def take_1d_int32_int32(
157
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
158
+ ) -> None: ...
159
+ def take_1d_int32_int64(
160
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
161
+ ) -> None: ...
162
+ def take_1d_int32_float64(
163
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
164
+ ) -> None: ...
165
+ def take_1d_int64_int64(
166
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
167
+ ) -> None: ...
168
+ def take_1d_int64_float64(
169
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
170
+ ) -> None: ...
171
+ def take_1d_float32_float32(
172
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
173
+ ) -> None: ...
174
+ def take_1d_float32_float64(
175
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
176
+ ) -> None: ...
177
+ def take_1d_float64_float64(
178
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
179
+ ) -> None: ...
180
+ def take_1d_object_object(
181
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
182
+ ) -> None: ...
183
+ def take_1d_bool_bool(
184
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
185
+ ) -> None: ...
186
+ def take_1d_bool_object(
187
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
188
+ ) -> None: ...
189
+ def take_2d_axis0_int8_int8(
190
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
191
+ ) -> None: ...
192
+ def take_2d_axis0_int8_int32(
193
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
194
+ ) -> None: ...
195
+ def take_2d_axis0_int8_int64(
196
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
197
+ ) -> None: ...
198
+ def take_2d_axis0_int8_float64(
199
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
200
+ ) -> None: ...
201
+ def take_2d_axis0_int16_int16(
202
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
203
+ ) -> None: ...
204
+ def take_2d_axis0_int16_int32(
205
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
206
+ ) -> None: ...
207
+ def take_2d_axis0_int16_int64(
208
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
209
+ ) -> None: ...
210
+ def take_2d_axis0_int16_float64(
211
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
212
+ ) -> None: ...
213
+ def take_2d_axis0_int32_int32(
214
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
215
+ ) -> None: ...
216
+ def take_2d_axis0_int32_int64(
217
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
218
+ ) -> None: ...
219
+ def take_2d_axis0_int32_float64(
220
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
221
+ ) -> None: ...
222
+ def take_2d_axis0_int64_int64(
223
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
224
+ ) -> None: ...
225
+ def take_2d_axis0_int64_float64(
226
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
227
+ ) -> None: ...
228
+ def take_2d_axis0_float32_float32(
229
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
230
+ ) -> None: ...
231
+ def take_2d_axis0_float32_float64(
232
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
233
+ ) -> None: ...
234
+ def take_2d_axis0_float64_float64(
235
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
236
+ ) -> None: ...
237
+ def take_2d_axis0_object_object(
238
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
239
+ ) -> None: ...
240
+ def take_2d_axis0_bool_bool(
241
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
242
+ ) -> None: ...
243
+ def take_2d_axis0_bool_object(
244
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
245
+ ) -> None: ...
246
+ def take_2d_axis1_int8_int8(
247
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
248
+ ) -> None: ...
249
+ def take_2d_axis1_int8_int32(
250
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
251
+ ) -> None: ...
252
+ def take_2d_axis1_int8_int64(
253
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
254
+ ) -> None: ...
255
+ def take_2d_axis1_int8_float64(
256
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
257
+ ) -> None: ...
258
+ def take_2d_axis1_int16_int16(
259
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
260
+ ) -> None: ...
261
+ def take_2d_axis1_int16_int32(
262
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
263
+ ) -> None: ...
264
+ def take_2d_axis1_int16_int64(
265
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
266
+ ) -> None: ...
267
+ def take_2d_axis1_int16_float64(
268
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
269
+ ) -> None: ...
270
+ def take_2d_axis1_int32_int32(
271
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
272
+ ) -> None: ...
273
+ def take_2d_axis1_int32_int64(
274
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
275
+ ) -> None: ...
276
+ def take_2d_axis1_int32_float64(
277
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
278
+ ) -> None: ...
279
+ def take_2d_axis1_int64_int64(
280
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
281
+ ) -> None: ...
282
+ def take_2d_axis1_int64_float64(
283
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
284
+ ) -> None: ...
285
+ def take_2d_axis1_float32_float32(
286
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
287
+ ) -> None: ...
288
+ def take_2d_axis1_float32_float64(
289
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
290
+ ) -> None: ...
291
+ def take_2d_axis1_float64_float64(
292
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
293
+ ) -> None: ...
294
+ def take_2d_axis1_object_object(
295
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
296
+ ) -> None: ...
297
+ def take_2d_axis1_bool_bool(
298
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
299
+ ) -> None: ...
300
+ def take_2d_axis1_bool_object(
301
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
302
+ ) -> None: ...
303
+ def take_2d_multi_int8_int8(
304
+ values: np.ndarray,
305
+ indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]],
306
+ out: np.ndarray,
307
+ fill_value=...,
308
+ ) -> None: ...
309
+ def take_2d_multi_int8_int32(
310
+ values: np.ndarray,
311
+ indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]],
312
+ out: np.ndarray,
313
+ fill_value=...,
314
+ ) -> None: ...
315
+ def take_2d_multi_int8_int64(
316
+ values: np.ndarray,
317
+ indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]],
318
+ out: np.ndarray,
319
+ fill_value=...,
320
+ ) -> None: ...
321
+ def take_2d_multi_int8_float64(
322
+ values: np.ndarray,
323
+ indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]],
324
+ out: np.ndarray,
325
+ fill_value=...,
326
+ ) -> None: ...
327
+ def take_2d_multi_int16_int16(
328
+ values: np.ndarray,
329
+ indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]],
330
+ out: np.ndarray,
331
+ fill_value=...,
332
+ ) -> None: ...
333
+ def take_2d_multi_int16_int32(
334
+ values: np.ndarray,
335
+ indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]],
336
+ out: np.ndarray,
337
+ fill_value=...,
338
+ ) -> None: ...
339
+ def take_2d_multi_int16_int64(
340
+ values: np.ndarray,
341
+ indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]],
342
+ out: np.ndarray,
343
+ fill_value=...,
344
+ ) -> None: ...
345
+ def take_2d_multi_int16_float64(
346
+ values: np.ndarray,
347
+ indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]],
348
+ out: np.ndarray,
349
+ fill_value=...,
350
+ ) -> None: ...
351
+ def take_2d_multi_int32_int32(
352
+ values: np.ndarray,
353
+ indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]],
354
+ out: np.ndarray,
355
+ fill_value=...,
356
+ ) -> None: ...
357
+ def take_2d_multi_int32_int64(
358
+ values: np.ndarray,
359
+ indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]],
360
+ out: np.ndarray,
361
+ fill_value=...,
362
+ ) -> None: ...
363
+ def take_2d_multi_int32_float64(
364
+ values: np.ndarray,
365
+ indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]],
366
+ out: np.ndarray,
367
+ fill_value=...,
368
+ ) -> None: ...
369
+ def take_2d_multi_int64_float64(
370
+ values: np.ndarray,
371
+ indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]],
372
+ out: np.ndarray,
373
+ fill_value=...,
374
+ ) -> None: ...
375
+ def take_2d_multi_float32_float32(
376
+ values: np.ndarray,
377
+ indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]],
378
+ out: np.ndarray,
379
+ fill_value=...,
380
+ ) -> None: ...
381
+ def take_2d_multi_float32_float64(
382
+ values: np.ndarray,
383
+ indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]],
384
+ out: np.ndarray,
385
+ fill_value=...,
386
+ ) -> None: ...
387
+ def take_2d_multi_float64_float64(
388
+ values: np.ndarray,
389
+ indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]],
390
+ out: np.ndarray,
391
+ fill_value=...,
392
+ ) -> None: ...
393
+ def take_2d_multi_object_object(
394
+ values: np.ndarray,
395
+ indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]],
396
+ out: np.ndarray,
397
+ fill_value=...,
398
+ ) -> None: ...
399
+ def take_2d_multi_bool_bool(
400
+ values: np.ndarray,
401
+ indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]],
402
+ out: np.ndarray,
403
+ fill_value=...,
404
+ ) -> None: ...
405
+ def take_2d_multi_bool_object(
406
+ values: np.ndarray,
407
+ indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]],
408
+ out: np.ndarray,
409
+ fill_value=...,
410
+ ) -> None: ...
411
+ def take_2d_multi_int64_int64(
412
+ values: np.ndarray,
413
+ indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]],
414
+ out: np.ndarray,
415
+ fill_value=...,
416
+ ) -> None: ...
env-llmeval/lib/python3.10/site-packages/pandas/_libs/arrays.pyi ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Sequence
2
+
3
+ import numpy as np
4
+
5
+ from pandas._typing import (
6
+ AxisInt,
7
+ DtypeObj,
8
+ Self,
9
+ Shape,
10
+ )
11
+
12
+ class NDArrayBacked:
13
+ _dtype: DtypeObj
14
+ _ndarray: np.ndarray
15
+ def __init__(self, values: np.ndarray, dtype: DtypeObj) -> None: ...
16
+ @classmethod
17
+ def _simple_new(cls, values: np.ndarray, dtype: DtypeObj): ...
18
+ def _from_backing_data(self, values: np.ndarray): ...
19
+ def __setstate__(self, state): ...
20
+ def __len__(self) -> int: ...
21
+ @property
22
+ def shape(self) -> Shape: ...
23
+ @property
24
+ def ndim(self) -> int: ...
25
+ @property
26
+ def size(self) -> int: ...
27
+ @property
28
+ def nbytes(self) -> int: ...
29
+ def copy(self, order=...): ...
30
+ def delete(self, loc, axis=...): ...
31
+ def swapaxes(self, axis1, axis2): ...
32
+ def repeat(self, repeats: int | Sequence[int], axis: int | None = ...): ...
33
+ def reshape(self, *args, **kwargs): ...
34
+ def ravel(self, order=...): ...
35
+ @property
36
+ def T(self): ...
37
+ @classmethod
38
+ def _concat_same_type(
39
+ cls, to_concat: Sequence[Self], axis: AxisInt = ...
40
+ ) -> Self: ...
env-llmeval/lib/python3.10/site-packages/pandas/_libs/byteswap.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (61.7 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/_libs/groupby.pyi ADDED
@@ -0,0 +1,216 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Literal
2
+
3
+ import numpy as np
4
+
5
+ from pandas._typing import npt
6
+
7
+ def group_median_float64(
8
+ out: np.ndarray, # ndarray[float64_t, ndim=2]
9
+ counts: npt.NDArray[np.int64],
10
+ values: np.ndarray, # ndarray[float64_t, ndim=2]
11
+ labels: npt.NDArray[np.int64],
12
+ min_count: int = ..., # Py_ssize_t
13
+ mask: np.ndarray | None = ...,
14
+ result_mask: np.ndarray | None = ...,
15
+ ) -> None: ...
16
+ def group_cumprod(
17
+ out: np.ndarray, # float64_t[:, ::1]
18
+ values: np.ndarray, # const float64_t[:, :]
19
+ labels: np.ndarray, # const int64_t[:]
20
+ ngroups: int,
21
+ is_datetimelike: bool,
22
+ skipna: bool = ...,
23
+ mask: np.ndarray | None = ...,
24
+ result_mask: np.ndarray | None = ...,
25
+ ) -> None: ...
26
+ def group_cumsum(
27
+ out: np.ndarray, # int64float_t[:, ::1]
28
+ values: np.ndarray, # ndarray[int64float_t, ndim=2]
29
+ labels: np.ndarray, # const int64_t[:]
30
+ ngroups: int,
31
+ is_datetimelike: bool,
32
+ skipna: bool = ...,
33
+ mask: np.ndarray | None = ...,
34
+ result_mask: np.ndarray | None = ...,
35
+ ) -> None: ...
36
+ def group_shift_indexer(
37
+ out: np.ndarray, # int64_t[::1]
38
+ labels: np.ndarray, # const int64_t[:]
39
+ ngroups: int,
40
+ periods: int,
41
+ ) -> None: ...
42
+ def group_fillna_indexer(
43
+ out: np.ndarray, # ndarray[intp_t]
44
+ labels: np.ndarray, # ndarray[int64_t]
45
+ sorted_labels: npt.NDArray[np.intp],
46
+ mask: npt.NDArray[np.uint8],
47
+ limit: int, # int64_t
48
+ dropna: bool,
49
+ ) -> None: ...
50
+ def group_any_all(
51
+ out: np.ndarray, # uint8_t[::1]
52
+ values: np.ndarray, # const uint8_t[::1]
53
+ labels: np.ndarray, # const int64_t[:]
54
+ mask: np.ndarray, # const uint8_t[::1]
55
+ val_test: Literal["any", "all"],
56
+ skipna: bool,
57
+ result_mask: np.ndarray | None,
58
+ ) -> None: ...
59
+ def group_sum(
60
+ out: np.ndarray, # complexfloatingintuint_t[:, ::1]
61
+ counts: np.ndarray, # int64_t[::1]
62
+ values: np.ndarray, # ndarray[complexfloatingintuint_t, ndim=2]
63
+ labels: np.ndarray, # const intp_t[:]
64
+ mask: np.ndarray | None,
65
+ result_mask: np.ndarray | None = ...,
66
+ min_count: int = ...,
67
+ is_datetimelike: bool = ...,
68
+ ) -> None: ...
69
+ def group_prod(
70
+ out: np.ndarray, # int64float_t[:, ::1]
71
+ counts: np.ndarray, # int64_t[::1]
72
+ values: np.ndarray, # ndarray[int64float_t, ndim=2]
73
+ labels: np.ndarray, # const intp_t[:]
74
+ mask: np.ndarray | None,
75
+ result_mask: np.ndarray | None = ...,
76
+ min_count: int = ...,
77
+ ) -> None: ...
78
+ def group_var(
79
+ out: np.ndarray, # floating[:, ::1]
80
+ counts: np.ndarray, # int64_t[::1]
81
+ values: np.ndarray, # ndarray[floating, ndim=2]
82
+ labels: np.ndarray, # const intp_t[:]
83
+ min_count: int = ..., # Py_ssize_t
84
+ ddof: int = ..., # int64_t
85
+ mask: np.ndarray | None = ...,
86
+ result_mask: np.ndarray | None = ...,
87
+ is_datetimelike: bool = ...,
88
+ name: str = ...,
89
+ ) -> None: ...
90
+ def group_skew(
91
+ out: np.ndarray, # float64_t[:, ::1]
92
+ counts: np.ndarray, # int64_t[::1]
93
+ values: np.ndarray, # ndarray[float64_T, ndim=2]
94
+ labels: np.ndarray, # const intp_t[::1]
95
+ mask: np.ndarray | None = ...,
96
+ result_mask: np.ndarray | None = ...,
97
+ skipna: bool = ...,
98
+ ) -> None: ...
99
+ def group_mean(
100
+ out: np.ndarray, # floating[:, ::1]
101
+ counts: np.ndarray, # int64_t[::1]
102
+ values: np.ndarray, # ndarray[floating, ndim=2]
103
+ labels: np.ndarray, # const intp_t[:]
104
+ min_count: int = ..., # Py_ssize_t
105
+ is_datetimelike: bool = ..., # bint
106
+ mask: np.ndarray | None = ...,
107
+ result_mask: np.ndarray | None = ...,
108
+ ) -> None: ...
109
+ def group_ohlc(
110
+ out: np.ndarray, # floatingintuint_t[:, ::1]
111
+ counts: np.ndarray, # int64_t[::1]
112
+ values: np.ndarray, # ndarray[floatingintuint_t, ndim=2]
113
+ labels: np.ndarray, # const intp_t[:]
114
+ min_count: int = ...,
115
+ mask: np.ndarray | None = ...,
116
+ result_mask: np.ndarray | None = ...,
117
+ ) -> None: ...
118
+ def group_quantile(
119
+ out: npt.NDArray[np.float64],
120
+ values: np.ndarray, # ndarray[numeric, ndim=1]
121
+ labels: npt.NDArray[np.intp],
122
+ mask: npt.NDArray[np.uint8],
123
+ qs: npt.NDArray[np.float64], # const
124
+ starts: npt.NDArray[np.int64],
125
+ ends: npt.NDArray[np.int64],
126
+ interpolation: Literal["linear", "lower", "higher", "nearest", "midpoint"],
127
+ result_mask: np.ndarray | None,
128
+ is_datetimelike: bool,
129
+ ) -> None: ...
130
+ def group_last(
131
+ out: np.ndarray, # rank_t[:, ::1]
132
+ counts: np.ndarray, # int64_t[::1]
133
+ values: np.ndarray, # ndarray[rank_t, ndim=2]
134
+ labels: np.ndarray, # const int64_t[:]
135
+ mask: npt.NDArray[np.bool_] | None,
136
+ result_mask: npt.NDArray[np.bool_] | None = ...,
137
+ min_count: int = ..., # Py_ssize_t
138
+ is_datetimelike: bool = ...,
139
+ skipna: bool = ...,
140
+ ) -> None: ...
141
+ def group_nth(
142
+ out: np.ndarray, # rank_t[:, ::1]
143
+ counts: np.ndarray, # int64_t[::1]
144
+ values: np.ndarray, # ndarray[rank_t, ndim=2]
145
+ labels: np.ndarray, # const int64_t[:]
146
+ mask: npt.NDArray[np.bool_] | None,
147
+ result_mask: npt.NDArray[np.bool_] | None = ...,
148
+ min_count: int = ..., # int64_t
149
+ rank: int = ..., # int64_t
150
+ is_datetimelike: bool = ...,
151
+ skipna: bool = ...,
152
+ ) -> None: ...
153
+ def group_rank(
154
+ out: np.ndarray, # float64_t[:, ::1]
155
+ values: np.ndarray, # ndarray[rank_t, ndim=2]
156
+ labels: np.ndarray, # const int64_t[:]
157
+ ngroups: int,
158
+ is_datetimelike: bool,
159
+ ties_method: Literal["average", "min", "max", "first", "dense"] = ...,
160
+ ascending: bool = ...,
161
+ pct: bool = ...,
162
+ na_option: Literal["keep", "top", "bottom"] = ...,
163
+ mask: npt.NDArray[np.bool_] | None = ...,
164
+ ) -> None: ...
165
+ def group_max(
166
+ out: np.ndarray, # groupby_t[:, ::1]
167
+ counts: np.ndarray, # int64_t[::1]
168
+ values: np.ndarray, # ndarray[groupby_t, ndim=2]
169
+ labels: np.ndarray, # const int64_t[:]
170
+ min_count: int = ...,
171
+ is_datetimelike: bool = ...,
172
+ mask: np.ndarray | None = ...,
173
+ result_mask: np.ndarray | None = ...,
174
+ ) -> None: ...
175
+ def group_min(
176
+ out: np.ndarray, # groupby_t[:, ::1]
177
+ counts: np.ndarray, # int64_t[::1]
178
+ values: np.ndarray, # ndarray[groupby_t, ndim=2]
179
+ labels: np.ndarray, # const int64_t[:]
180
+ min_count: int = ...,
181
+ is_datetimelike: bool = ...,
182
+ mask: np.ndarray | None = ...,
183
+ result_mask: np.ndarray | None = ...,
184
+ ) -> None: ...
185
+ def group_idxmin_idxmax(
186
+ out: npt.NDArray[np.intp],
187
+ counts: npt.NDArray[np.int64],
188
+ values: np.ndarray, # ndarray[groupby_t, ndim=2]
189
+ labels: npt.NDArray[np.intp],
190
+ min_count: int = ...,
191
+ is_datetimelike: bool = ...,
192
+ mask: np.ndarray | None = ...,
193
+ name: str = ...,
194
+ skipna: bool = ...,
195
+ result_mask: np.ndarray | None = ...,
196
+ ) -> None: ...
197
+ def group_cummin(
198
+ out: np.ndarray, # groupby_t[:, ::1]
199
+ values: np.ndarray, # ndarray[groupby_t, ndim=2]
200
+ labels: np.ndarray, # const int64_t[:]
201
+ ngroups: int,
202
+ is_datetimelike: bool,
203
+ mask: np.ndarray | None = ...,
204
+ result_mask: np.ndarray | None = ...,
205
+ skipna: bool = ...,
206
+ ) -> None: ...
207
+ def group_cummax(
208
+ out: np.ndarray, # groupby_t[:, ::1]
209
+ values: np.ndarray, # ndarray[groupby_t, ndim=2]
210
+ labels: np.ndarray, # const int64_t[:]
211
+ ngroups: int,
212
+ is_datetimelike: bool,
213
+ mask: np.ndarray | None = ...,
214
+ result_mask: np.ndarray | None = ...,
215
+ skipna: bool = ...,
216
+ ) -> None: ...
env-llmeval/lib/python3.10/site-packages/pandas/_libs/hashing.pyi ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+
3
+ from pandas._typing import npt
4
+
5
+ def hash_object_array(
6
+ arr: npt.NDArray[np.object_],
7
+ key: str,
8
+ encoding: str = ...,
9
+ ) -> npt.NDArray[np.uint64]: ...
env-llmeval/lib/python3.10/site-packages/pandas/_libs/index.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (988 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/_libs/internals.pyi ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import (
2
+ Iterator,
3
+ Sequence,
4
+ final,
5
+ overload,
6
+ )
7
+ import weakref
8
+
9
+ import numpy as np
10
+
11
+ from pandas._typing import (
12
+ ArrayLike,
13
+ Self,
14
+ npt,
15
+ )
16
+
17
+ from pandas import Index
18
+ from pandas.core.internals.blocks import Block as B
19
+
20
+ def slice_len(slc: slice, objlen: int = ...) -> int: ...
21
+ def get_concat_blkno_indexers(
22
+ blknos_list: list[npt.NDArray[np.intp]],
23
+ ) -> list[tuple[npt.NDArray[np.intp], BlockPlacement]]: ...
24
+ def get_blkno_indexers(
25
+ blknos: np.ndarray, # int64_t[:]
26
+ group: bool = ...,
27
+ ) -> list[tuple[int, slice | np.ndarray]]: ...
28
+ def get_blkno_placements(
29
+ blknos: np.ndarray,
30
+ group: bool = ...,
31
+ ) -> Iterator[tuple[int, BlockPlacement]]: ...
32
+ def update_blklocs_and_blknos(
33
+ blklocs: npt.NDArray[np.intp],
34
+ blknos: npt.NDArray[np.intp],
35
+ loc: int,
36
+ nblocks: int,
37
+ ) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ...
38
+ @final
39
+ class BlockPlacement:
40
+ def __init__(self, val: int | slice | np.ndarray) -> None: ...
41
+ @property
42
+ def indexer(self) -> np.ndarray | slice: ...
43
+ @property
44
+ def as_array(self) -> np.ndarray: ...
45
+ @property
46
+ def as_slice(self) -> slice: ...
47
+ @property
48
+ def is_slice_like(self) -> bool: ...
49
+ @overload
50
+ def __getitem__(
51
+ self, loc: slice | Sequence[int] | npt.NDArray[np.intp]
52
+ ) -> BlockPlacement: ...
53
+ @overload
54
+ def __getitem__(self, loc: int) -> int: ...
55
+ def __iter__(self) -> Iterator[int]: ...
56
+ def __len__(self) -> int: ...
57
+ def delete(self, loc) -> BlockPlacement: ...
58
+ def add(self, other) -> BlockPlacement: ...
59
+ def append(self, others: list[BlockPlacement]) -> BlockPlacement: ...
60
+ def tile_for_unstack(self, factor: int) -> npt.NDArray[np.intp]: ...
61
+
62
+ class Block:
63
+ _mgr_locs: BlockPlacement
64
+ ndim: int
65
+ values: ArrayLike
66
+ refs: BlockValuesRefs
67
+ def __init__(
68
+ self,
69
+ values: ArrayLike,
70
+ placement: BlockPlacement,
71
+ ndim: int,
72
+ refs: BlockValuesRefs | None = ...,
73
+ ) -> None: ...
74
+ def slice_block_rows(self, slicer: slice) -> Self: ...
75
+
76
+ class BlockManager:
77
+ blocks: tuple[B, ...]
78
+ axes: list[Index]
79
+ _known_consolidated: bool
80
+ _is_consolidated: bool
81
+ _blknos: np.ndarray
82
+ _blklocs: np.ndarray
83
+ def __init__(
84
+ self, blocks: tuple[B, ...], axes: list[Index], verify_integrity=...
85
+ ) -> None: ...
86
+ def get_slice(self, slobj: slice, axis: int = ...) -> Self: ...
87
+ def _rebuild_blknos_and_blklocs(self) -> None: ...
88
+
89
+ class BlockValuesRefs:
90
+ referenced_blocks: list[weakref.ref]
91
+ def __init__(self, blk: Block | None = ...) -> None: ...
92
+ def add_reference(self, blk: Block) -> None: ...
93
+ def add_index_reference(self, index: Index) -> None: ...
94
+ def has_reference(self) -> bool: ...
env-llmeval/lib/python3.10/site-packages/pandas/_libs/join.pyi ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+
3
+ from pandas._typing import npt
4
+
5
+ def inner_join(
6
+ left: np.ndarray, # const intp_t[:]
7
+ right: np.ndarray, # const intp_t[:]
8
+ max_groups: int,
9
+ sort: bool = ...,
10
+ ) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ...
11
+ def left_outer_join(
12
+ left: np.ndarray, # const intp_t[:]
13
+ right: np.ndarray, # const intp_t[:]
14
+ max_groups: int,
15
+ sort: bool = ...,
16
+ ) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ...
17
+ def full_outer_join(
18
+ left: np.ndarray, # const intp_t[:]
19
+ right: np.ndarray, # const intp_t[:]
20
+ max_groups: int,
21
+ ) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ...
22
+ def ffill_indexer(
23
+ indexer: np.ndarray, # const intp_t[:]
24
+ ) -> npt.NDArray[np.intp]: ...
25
+ def left_join_indexer_unique(
26
+ left: np.ndarray, # ndarray[join_t]
27
+ right: np.ndarray, # ndarray[join_t]
28
+ ) -> npt.NDArray[np.intp]: ...
29
+ def left_join_indexer(
30
+ left: np.ndarray, # ndarray[join_t]
31
+ right: np.ndarray, # ndarray[join_t]
32
+ ) -> tuple[
33
+ np.ndarray, # np.ndarray[join_t]
34
+ npt.NDArray[np.intp],
35
+ npt.NDArray[np.intp],
36
+ ]: ...
37
+ def inner_join_indexer(
38
+ left: np.ndarray, # ndarray[join_t]
39
+ right: np.ndarray, # ndarray[join_t]
40
+ ) -> tuple[
41
+ np.ndarray, # np.ndarray[join_t]
42
+ npt.NDArray[np.intp],
43
+ npt.NDArray[np.intp],
44
+ ]: ...
45
+ def outer_join_indexer(
46
+ left: np.ndarray, # ndarray[join_t]
47
+ right: np.ndarray, # ndarray[join_t]
48
+ ) -> tuple[
49
+ np.ndarray, # np.ndarray[join_t]
50
+ npt.NDArray[np.intp],
51
+ npt.NDArray[np.intp],
52
+ ]: ...
53
+ def asof_join_backward_on_X_by_Y(
54
+ left_values: np.ndarray, # ndarray[numeric_t]
55
+ right_values: np.ndarray, # ndarray[numeric_t]
56
+ left_by_values: np.ndarray, # const int64_t[:]
57
+ right_by_values: np.ndarray, # const int64_t[:]
58
+ allow_exact_matches: bool = ...,
59
+ tolerance: np.number | float | None = ...,
60
+ use_hashtable: bool = ...,
61
+ ) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ...
62
+ def asof_join_forward_on_X_by_Y(
63
+ left_values: np.ndarray, # ndarray[numeric_t]
64
+ right_values: np.ndarray, # ndarray[numeric_t]
65
+ left_by_values: np.ndarray, # const int64_t[:]
66
+ right_by_values: np.ndarray, # const int64_t[:]
67
+ allow_exact_matches: bool = ...,
68
+ tolerance: np.number | float | None = ...,
69
+ use_hashtable: bool = ...,
70
+ ) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ...
71
+ def asof_join_nearest_on_X_by_Y(
72
+ left_values: np.ndarray, # ndarray[numeric_t]
73
+ right_values: np.ndarray, # ndarray[numeric_t]
74
+ left_by_values: np.ndarray, # const int64_t[:]
75
+ right_by_values: np.ndarray, # const int64_t[:]
76
+ allow_exact_matches: bool = ...,
77
+ tolerance: np.number | float | None = ...,
78
+ use_hashtable: bool = ...,
79
+ ) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ...
env-llmeval/lib/python3.10/site-packages/pandas/_libs/json.pyi ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import (
2
+ Any,
3
+ Callable,
4
+ )
5
+
6
+ def ujson_dumps(
7
+ obj: Any,
8
+ ensure_ascii: bool = ...,
9
+ double_precision: int = ...,
10
+ indent: int = ...,
11
+ orient: str = ...,
12
+ date_unit: str = ...,
13
+ iso_dates: bool = ...,
14
+ default_handler: None
15
+ | Callable[[Any], str | float | bool | list | dict | None] = ...,
16
+ ) -> str: ...
17
+ def ujson_loads(
18
+ s: str,
19
+ precise_float: bool = ...,
20
+ numpy: bool = ...,
21
+ dtype: None = ...,
22
+ labelled: bool = ...,
23
+ ) -> Any: ...
env-llmeval/lib/python3.10/site-packages/pandas/_libs/lib.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (938 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/_libs/lib.pyi ADDED
@@ -0,0 +1,213 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # TODO(npdtypes): Many types specified here can be made more specific/accurate;
2
+ # the more specific versions are specified in comments
3
+ from decimal import Decimal
4
+ from typing import (
5
+ Any,
6
+ Callable,
7
+ Final,
8
+ Generator,
9
+ Hashable,
10
+ Literal,
11
+ TypeAlias,
12
+ overload,
13
+ )
14
+
15
+ import numpy as np
16
+
17
+ from pandas._libs.interval import Interval
18
+ from pandas._libs.tslibs import Period
19
+ from pandas._typing import (
20
+ ArrayLike,
21
+ DtypeObj,
22
+ TypeGuard,
23
+ npt,
24
+ )
25
+
26
+ # placeholder until we can specify np.ndarray[object, ndim=2]
27
+ ndarray_obj_2d = np.ndarray
28
+
29
+ from enum import Enum
30
+
31
+ class _NoDefault(Enum):
32
+ no_default = ...
33
+
34
+ no_default: Final = _NoDefault.no_default
35
+ NoDefault: TypeAlias = Literal[_NoDefault.no_default]
36
+
37
+ i8max: int
38
+ u8max: int
39
+
40
+ def is_np_dtype(dtype: object, kinds: str | None = ...) -> TypeGuard[np.dtype]: ...
41
+ def item_from_zerodim(val: object) -> object: ...
42
+ def infer_dtype(value: object, skipna: bool = ...) -> str: ...
43
+ def is_iterator(obj: object) -> bool: ...
44
+ def is_scalar(val: object) -> bool: ...
45
+ def is_list_like(obj: object, allow_sets: bool = ...) -> bool: ...
46
+ def is_pyarrow_array(obj: object) -> bool: ...
47
+ def is_period(val: object) -> TypeGuard[Period]: ...
48
+ def is_interval(obj: object) -> TypeGuard[Interval]: ...
49
+ def is_decimal(obj: object) -> TypeGuard[Decimal]: ...
50
+ def is_complex(obj: object) -> TypeGuard[complex]: ...
51
+ def is_bool(obj: object) -> TypeGuard[bool | np.bool_]: ...
52
+ def is_integer(obj: object) -> TypeGuard[int | np.integer]: ...
53
+ def is_int_or_none(obj) -> bool: ...
54
+ def is_float(obj: object) -> TypeGuard[float]: ...
55
+ def is_interval_array(values: np.ndarray) -> bool: ...
56
+ def is_datetime64_array(values: np.ndarray, skipna: bool = True) -> bool: ...
57
+ def is_timedelta_or_timedelta64_array(
58
+ values: np.ndarray, skipna: bool = True
59
+ ) -> bool: ...
60
+ def is_datetime_with_singletz_array(values: np.ndarray) -> bool: ...
61
+ def is_time_array(values: np.ndarray, skipna: bool = ...): ...
62
+ def is_date_array(values: np.ndarray, skipna: bool = ...): ...
63
+ def is_datetime_array(values: np.ndarray, skipna: bool = ...): ...
64
+ def is_string_array(values: np.ndarray, skipna: bool = ...): ...
65
+ def is_float_array(values: np.ndarray): ...
66
+ def is_integer_array(values: np.ndarray, skipna: bool = ...): ...
67
+ def is_bool_array(values: np.ndarray, skipna: bool = ...): ...
68
+ def fast_multiget(
69
+ mapping: dict,
70
+ keys: np.ndarray, # object[:]
71
+ default=...,
72
+ ) -> np.ndarray: ...
73
+ def fast_unique_multiple_list_gen(gen: Generator, sort: bool = ...) -> list: ...
74
+ def fast_unique_multiple_list(lists: list, sort: bool | None = ...) -> list: ...
75
+ def map_infer(
76
+ arr: np.ndarray,
77
+ f: Callable[[Any], Any],
78
+ convert: bool = ...,
79
+ ignore_na: bool = ...,
80
+ ) -> np.ndarray: ...
81
+ @overload
82
+ def maybe_convert_objects(
83
+ objects: npt.NDArray[np.object_],
84
+ *,
85
+ try_float: bool = ...,
86
+ safe: bool = ...,
87
+ convert_numeric: bool = ...,
88
+ convert_non_numeric: Literal[False] = ...,
89
+ convert_to_nullable_dtype: Literal[False] = ...,
90
+ dtype_if_all_nat: DtypeObj | None = ...,
91
+ ) -> npt.NDArray[np.object_ | np.number]: ...
92
+ @overload
93
+ def maybe_convert_objects(
94
+ objects: npt.NDArray[np.object_],
95
+ *,
96
+ try_float: bool = ...,
97
+ safe: bool = ...,
98
+ convert_numeric: bool = ...,
99
+ convert_non_numeric: bool = ...,
100
+ convert_to_nullable_dtype: Literal[True] = ...,
101
+ dtype_if_all_nat: DtypeObj | None = ...,
102
+ ) -> ArrayLike: ...
103
+ @overload
104
+ def maybe_convert_objects(
105
+ objects: npt.NDArray[np.object_],
106
+ *,
107
+ try_float: bool = ...,
108
+ safe: bool = ...,
109
+ convert_numeric: bool = ...,
110
+ convert_non_numeric: bool = ...,
111
+ convert_to_nullable_dtype: bool = ...,
112
+ dtype_if_all_nat: DtypeObj | None = ...,
113
+ ) -> ArrayLike: ...
114
+ @overload
115
+ def maybe_convert_numeric(
116
+ values: npt.NDArray[np.object_],
117
+ na_values: set,
118
+ convert_empty: bool = ...,
119
+ coerce_numeric: bool = ...,
120
+ convert_to_masked_nullable: Literal[False] = ...,
121
+ ) -> tuple[np.ndarray, None]: ...
122
+ @overload
123
+ def maybe_convert_numeric(
124
+ values: npt.NDArray[np.object_],
125
+ na_values: set,
126
+ convert_empty: bool = ...,
127
+ coerce_numeric: bool = ...,
128
+ *,
129
+ convert_to_masked_nullable: Literal[True],
130
+ ) -> tuple[np.ndarray, np.ndarray]: ...
131
+
132
+ # TODO: restrict `arr`?
133
+ def ensure_string_array(
134
+ arr,
135
+ na_value: object = ...,
136
+ convert_na_value: bool = ...,
137
+ copy: bool = ...,
138
+ skipna: bool = ...,
139
+ ) -> npt.NDArray[np.object_]: ...
140
+ def convert_nans_to_NA(
141
+ arr: npt.NDArray[np.object_],
142
+ ) -> npt.NDArray[np.object_]: ...
143
+ def fast_zip(ndarrays: list) -> npt.NDArray[np.object_]: ...
144
+
145
+ # TODO: can we be more specific about rows?
146
+ def to_object_array_tuples(rows: object) -> ndarray_obj_2d: ...
147
+ def tuples_to_object_array(
148
+ tuples: npt.NDArray[np.object_],
149
+ ) -> ndarray_obj_2d: ...
150
+
151
+ # TODO: can we be more specific about rows?
152
+ def to_object_array(rows: object, min_width: int = ...) -> ndarray_obj_2d: ...
153
+ def dicts_to_array(dicts: list, columns: list) -> ndarray_obj_2d: ...
154
+ def maybe_booleans_to_slice(
155
+ mask: npt.NDArray[np.uint8],
156
+ ) -> slice | npt.NDArray[np.uint8]: ...
157
+ def maybe_indices_to_slice(
158
+ indices: npt.NDArray[np.intp],
159
+ max_len: int,
160
+ ) -> slice | npt.NDArray[np.intp]: ...
161
+ def is_all_arraylike(obj: list) -> bool: ...
162
+
163
+ # -----------------------------------------------------------------
164
+ # Functions which in reality take memoryviews
165
+
166
+ def memory_usage_of_objects(arr: np.ndarray) -> int: ... # object[:] # np.int64
167
+ def map_infer_mask(
168
+ arr: np.ndarray,
169
+ f: Callable[[Any], Any],
170
+ mask: np.ndarray, # const uint8_t[:]
171
+ convert: bool = ...,
172
+ na_value: Any = ...,
173
+ dtype: np.dtype = ...,
174
+ ) -> np.ndarray: ...
175
+ def indices_fast(
176
+ index: npt.NDArray[np.intp],
177
+ labels: np.ndarray, # const int64_t[:]
178
+ keys: list,
179
+ sorted_labels: list[npt.NDArray[np.int64]],
180
+ ) -> dict[Hashable, npt.NDArray[np.intp]]: ...
181
+ def generate_slices(
182
+ labels: np.ndarray, ngroups: int # const intp_t[:]
183
+ ) -> tuple[npt.NDArray[np.int64], npt.NDArray[np.int64]]: ...
184
+ def count_level_2d(
185
+ mask: np.ndarray, # ndarray[uint8_t, ndim=2, cast=True],
186
+ labels: np.ndarray, # const intp_t[:]
187
+ max_bin: int,
188
+ ) -> np.ndarray: ... # np.ndarray[np.int64, ndim=2]
189
+ def get_level_sorter(
190
+ codes: np.ndarray, # const int64_t[:]
191
+ starts: np.ndarray, # const intp_t[:]
192
+ ) -> np.ndarray: ... # np.ndarray[np.intp, ndim=1]
193
+ def generate_bins_dt64(
194
+ values: npt.NDArray[np.int64],
195
+ binner: np.ndarray, # const int64_t[:]
196
+ closed: object = ...,
197
+ hasnans: bool = ...,
198
+ ) -> np.ndarray: ... # np.ndarray[np.int64, ndim=1]
199
+ def array_equivalent_object(
200
+ left: npt.NDArray[np.object_],
201
+ right: npt.NDArray[np.object_],
202
+ ) -> bool: ...
203
+ def has_infs(arr: np.ndarray) -> bool: ... # const floating[:]
204
+ def has_only_ints_or_nan(arr: np.ndarray) -> bool: ... # const floating[:]
205
+ def get_reverse_indexer(
206
+ indexer: np.ndarray, # const intp_t[:]
207
+ length: int,
208
+ ) -> npt.NDArray[np.intp]: ...
209
+ def is_bool_list(obj: list) -> bool: ...
210
+ def dtypes_all_equal(types: list[DtypeObj]) -> bool: ...
211
+ def is_range_indexer(
212
+ left: np.ndarray, n: int # np.ndarray[np.int64, ndim=1]
213
+ ) -> bool: ...
env-llmeval/lib/python3.10/site-packages/pandas/_libs/missing.pyi ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from numpy import typing as npt
3
+
4
+ class NAType:
5
+ def __new__(cls, *args, **kwargs): ...
6
+
7
+ NA: NAType
8
+
9
+ def is_matching_na(
10
+ left: object, right: object, nan_matches_none: bool = ...
11
+ ) -> bool: ...
12
+ def isposinf_scalar(val: object) -> bool: ...
13
+ def isneginf_scalar(val: object) -> bool: ...
14
+ def checknull(val: object, inf_as_na: bool = ...) -> bool: ...
15
+ def isnaobj(arr: np.ndarray, inf_as_na: bool = ...) -> npt.NDArray[np.bool_]: ...
16
+ def is_numeric_na(values: np.ndarray) -> npt.NDArray[np.bool_]: ...
env-llmeval/lib/python3.10/site-packages/pandas/_libs/ops.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (270 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/_libs/ops_dispatch.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (61.7 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/_libs/pandas_parser.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (43.4 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/_libs/properties.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (91.9 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/_libs/properties.pyi ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import (
2
+ Sequence,
3
+ overload,
4
+ )
5
+
6
+ from pandas._typing import (
7
+ AnyArrayLike,
8
+ DataFrame,
9
+ Index,
10
+ Series,
11
+ )
12
+
13
+ # note: this is a lie to make type checkers happy (they special
14
+ # case property). cache_readonly uses attribute names similar to
15
+ # property (fget) but it does not provide fset and fdel.
16
+ cache_readonly = property
17
+
18
+ class AxisProperty:
19
+ axis: int
20
+ def __init__(self, axis: int = ..., doc: str = ...) -> None: ...
21
+ @overload
22
+ def __get__(self, obj: DataFrame | Series, type) -> Index: ...
23
+ @overload
24
+ def __get__(self, obj: None, type) -> AxisProperty: ...
25
+ def __set__(
26
+ self, obj: DataFrame | Series, value: AnyArrayLike | Sequence
27
+ ) -> None: ...
env-llmeval/lib/python3.10/site-packages/pandas/_libs/reshape.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (310 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/_libs/reshape.pyi ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+
3
+ from pandas._typing import npt
4
+
5
+ def unstack(
6
+ values: np.ndarray, # reshape_t[:, :]
7
+ mask: np.ndarray, # const uint8_t[:]
8
+ stride: int,
9
+ length: int,
10
+ width: int,
11
+ new_values: np.ndarray, # reshape_t[:, :]
12
+ new_mask: np.ndarray, # uint8_t[:, :]
13
+ ) -> None: ...
14
+ def explode(
15
+ values: npt.NDArray[np.object_],
16
+ ) -> tuple[npt.NDArray[np.object_], npt.NDArray[np.int64]]: ...
env-llmeval/lib/python3.10/site-packages/pandas/_libs/sas.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (267 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/_libs/sas.pyi ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ from pandas.io.sas.sas7bdat import SAS7BDATReader
2
+
3
+ class Parser:
4
+ def __init__(self, parser: SAS7BDATReader) -> None: ...
5
+ def read(self, nrows: int) -> None: ...
6
+
7
+ def get_subheader_index(signature: bytes) -> int: ...
env-llmeval/lib/python3.10/site-packages/pandas/_libs/sparse.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (989 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/_libs/sparse.pyi ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Sequence
2
+
3
+ import numpy as np
4
+
5
+ from pandas._typing import (
6
+ Self,
7
+ npt,
8
+ )
9
+
10
+ class SparseIndex:
11
+ length: int
12
+ npoints: int
13
+ def __init__(self) -> None: ...
14
+ @property
15
+ def ngaps(self) -> int: ...
16
+ @property
17
+ def nbytes(self) -> int: ...
18
+ @property
19
+ def indices(self) -> npt.NDArray[np.int32]: ...
20
+ def equals(self, other) -> bool: ...
21
+ def lookup(self, index: int) -> np.int32: ...
22
+ def lookup_array(self, indexer: npt.NDArray[np.int32]) -> npt.NDArray[np.int32]: ...
23
+ def to_int_index(self) -> IntIndex: ...
24
+ def to_block_index(self) -> BlockIndex: ...
25
+ def intersect(self, y_: SparseIndex) -> Self: ...
26
+ def make_union(self, y_: SparseIndex) -> Self: ...
27
+
28
+ class IntIndex(SparseIndex):
29
+ indices: npt.NDArray[np.int32]
30
+ def __init__(
31
+ self, length: int, indices: Sequence[int], check_integrity: bool = ...
32
+ ) -> None: ...
33
+
34
+ class BlockIndex(SparseIndex):
35
+ nblocks: int
36
+ blocs: np.ndarray
37
+ blengths: np.ndarray
38
+ def __init__(
39
+ self, length: int, blocs: np.ndarray, blengths: np.ndarray
40
+ ) -> None: ...
41
+
42
+ # Override to have correct parameters
43
+ def intersect(self, other: SparseIndex) -> Self: ...
44
+ def make_union(self, y: SparseIndex) -> Self: ...
45
+
46
+ def make_mask_object_ndarray(
47
+ arr: npt.NDArray[np.object_], fill_value
48
+ ) -> npt.NDArray[np.bool_]: ...
49
+ def get_blocks(
50
+ indices: npt.NDArray[np.int32],
51
+ ) -> tuple[npt.NDArray[np.int32], npt.NDArray[np.int32]]: ...
env-llmeval/lib/python3.10/site-packages/pandas/_libs/testing.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (132 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/_libs/testing.pyi ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ def assert_dict_equal(a, b, compare_keys: bool = ...): ...
2
+ def assert_almost_equal(
3
+ a,
4
+ b,
5
+ rtol: float = ...,
6
+ atol: float = ...,
7
+ check_dtype: bool = ...,
8
+ obj=...,
9
+ lobj=...,
10
+ robj=...,
11
+ index_values=...,
12
+ ): ...
env-llmeval/lib/python3.10/site-packages/pandas/_libs/window/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (184 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/_libs/window/indexers.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (217 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/_libs/writers.pyi ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+
3
+ from pandas._typing import ArrayLike
4
+
5
+ def write_csv_rows(
6
+ data: list[ArrayLike],
7
+ data_index: np.ndarray,
8
+ nlevels: int,
9
+ cols: np.ndarray,
10
+ writer: object, # _csv.writer
11
+ ) -> None: ...
12
+ def convert_json_to_lines(arr: str) -> str: ...
13
+ def max_len_string_array(
14
+ arr: np.ndarray, # pandas_string[:]
15
+ ) -> int: ...
16
+ def word_len(val: object) -> int: ...
17
+ def string_array_replace_from_nan_rep(
18
+ arr: np.ndarray, # np.ndarray[object, ndim=1]
19
+ nan_rep: object,
20
+ ) -> None: ...
env-llmeval/lib/python3.10/site-packages/pandas/_testing/__init__.py ADDED
@@ -0,0 +1,638 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from decimal import Decimal
4
+ import operator
5
+ import os
6
+ from sys import byteorder
7
+ from typing import (
8
+ TYPE_CHECKING,
9
+ Callable,
10
+ ContextManager,
11
+ cast,
12
+ )
13
+ import warnings
14
+
15
+ import numpy as np
16
+
17
+ from pandas._config.localization import (
18
+ can_set_locale,
19
+ get_locales,
20
+ set_locale,
21
+ )
22
+
23
+ from pandas.compat import pa_version_under10p1
24
+
25
+ from pandas.core.dtypes.common import is_string_dtype
26
+
27
+ import pandas as pd
28
+ from pandas import (
29
+ ArrowDtype,
30
+ DataFrame,
31
+ Index,
32
+ MultiIndex,
33
+ RangeIndex,
34
+ Series,
35
+ )
36
+ from pandas._testing._io import (
37
+ round_trip_localpath,
38
+ round_trip_pathlib,
39
+ round_trip_pickle,
40
+ write_to_compressed,
41
+ )
42
+ from pandas._testing._warnings import (
43
+ assert_produces_warning,
44
+ maybe_produces_warning,
45
+ )
46
+ from pandas._testing.asserters import (
47
+ assert_almost_equal,
48
+ assert_attr_equal,
49
+ assert_categorical_equal,
50
+ assert_class_equal,
51
+ assert_contains_all,
52
+ assert_copy,
53
+ assert_datetime_array_equal,
54
+ assert_dict_equal,
55
+ assert_equal,
56
+ assert_extension_array_equal,
57
+ assert_frame_equal,
58
+ assert_index_equal,
59
+ assert_indexing_slices_equivalent,
60
+ assert_interval_array_equal,
61
+ assert_is_sorted,
62
+ assert_is_valid_plot_return_object,
63
+ assert_metadata_equivalent,
64
+ assert_numpy_array_equal,
65
+ assert_period_array_equal,
66
+ assert_series_equal,
67
+ assert_sp_array_equal,
68
+ assert_timedelta_array_equal,
69
+ raise_assert_detail,
70
+ )
71
+ from pandas._testing.compat import (
72
+ get_dtype,
73
+ get_obj,
74
+ )
75
+ from pandas._testing.contexts import (
76
+ assert_cow_warning,
77
+ decompress_file,
78
+ ensure_clean,
79
+ raises_chained_assignment_error,
80
+ set_timezone,
81
+ use_numexpr,
82
+ with_csv_dialect,
83
+ )
84
+ from pandas.core.arrays import (
85
+ BaseMaskedArray,
86
+ ExtensionArray,
87
+ NumpyExtensionArray,
88
+ )
89
+ from pandas.core.arrays._mixins import NDArrayBackedExtensionArray
90
+ from pandas.core.construction import extract_array
91
+
92
+ if TYPE_CHECKING:
93
+ from pandas._typing import (
94
+ Dtype,
95
+ NpDtype,
96
+ )
97
+
98
+ from pandas.core.arrays import ArrowExtensionArray
99
+
100
+ UNSIGNED_INT_NUMPY_DTYPES: list[NpDtype] = ["uint8", "uint16", "uint32", "uint64"]
101
+ UNSIGNED_INT_EA_DTYPES: list[Dtype] = ["UInt8", "UInt16", "UInt32", "UInt64"]
102
+ SIGNED_INT_NUMPY_DTYPES: list[NpDtype] = [int, "int8", "int16", "int32", "int64"]
103
+ SIGNED_INT_EA_DTYPES: list[Dtype] = ["Int8", "Int16", "Int32", "Int64"]
104
+ ALL_INT_NUMPY_DTYPES = UNSIGNED_INT_NUMPY_DTYPES + SIGNED_INT_NUMPY_DTYPES
105
+ ALL_INT_EA_DTYPES = UNSIGNED_INT_EA_DTYPES + SIGNED_INT_EA_DTYPES
106
+ ALL_INT_DTYPES: list[Dtype] = [*ALL_INT_NUMPY_DTYPES, *ALL_INT_EA_DTYPES]
107
+
108
+ FLOAT_NUMPY_DTYPES: list[NpDtype] = [float, "float32", "float64"]
109
+ FLOAT_EA_DTYPES: list[Dtype] = ["Float32", "Float64"]
110
+ ALL_FLOAT_DTYPES: list[Dtype] = [*FLOAT_NUMPY_DTYPES, *FLOAT_EA_DTYPES]
111
+
112
+ COMPLEX_DTYPES: list[Dtype] = [complex, "complex64", "complex128"]
113
+ STRING_DTYPES: list[Dtype] = [str, "str", "U"]
114
+
115
+ DATETIME64_DTYPES: list[Dtype] = ["datetime64[ns]", "M8[ns]"]
116
+ TIMEDELTA64_DTYPES: list[Dtype] = ["timedelta64[ns]", "m8[ns]"]
117
+
118
+ BOOL_DTYPES: list[Dtype] = [bool, "bool"]
119
+ BYTES_DTYPES: list[Dtype] = [bytes, "bytes"]
120
+ OBJECT_DTYPES: list[Dtype] = [object, "object"]
121
+
122
+ ALL_REAL_NUMPY_DTYPES = FLOAT_NUMPY_DTYPES + ALL_INT_NUMPY_DTYPES
123
+ ALL_REAL_EXTENSION_DTYPES = FLOAT_EA_DTYPES + ALL_INT_EA_DTYPES
124
+ ALL_REAL_DTYPES: list[Dtype] = [*ALL_REAL_NUMPY_DTYPES, *ALL_REAL_EXTENSION_DTYPES]
125
+ ALL_NUMERIC_DTYPES: list[Dtype] = [*ALL_REAL_DTYPES, *COMPLEX_DTYPES]
126
+
127
+ ALL_NUMPY_DTYPES = (
128
+ ALL_REAL_NUMPY_DTYPES
129
+ + COMPLEX_DTYPES
130
+ + STRING_DTYPES
131
+ + DATETIME64_DTYPES
132
+ + TIMEDELTA64_DTYPES
133
+ + BOOL_DTYPES
134
+ + OBJECT_DTYPES
135
+ + BYTES_DTYPES
136
+ )
137
+
138
+ NARROW_NP_DTYPES = [
139
+ np.float16,
140
+ np.float32,
141
+ np.int8,
142
+ np.int16,
143
+ np.int32,
144
+ np.uint8,
145
+ np.uint16,
146
+ np.uint32,
147
+ ]
148
+
149
+ PYTHON_DATA_TYPES = [
150
+ str,
151
+ int,
152
+ float,
153
+ complex,
154
+ list,
155
+ tuple,
156
+ range,
157
+ dict,
158
+ set,
159
+ frozenset,
160
+ bool,
161
+ bytes,
162
+ bytearray,
163
+ memoryview,
164
+ ]
165
+
166
+ ENDIAN = {"little": "<", "big": ">"}[byteorder]
167
+
168
+ NULL_OBJECTS = [None, np.nan, pd.NaT, float("nan"), pd.NA, Decimal("NaN")]
169
+ NP_NAT_OBJECTS = [
170
+ cls("NaT", unit)
171
+ for cls in [np.datetime64, np.timedelta64]
172
+ for unit in [
173
+ "Y",
174
+ "M",
175
+ "W",
176
+ "D",
177
+ "h",
178
+ "m",
179
+ "s",
180
+ "ms",
181
+ "us",
182
+ "ns",
183
+ "ps",
184
+ "fs",
185
+ "as",
186
+ ]
187
+ ]
188
+
189
+ if not pa_version_under10p1:
190
+ import pyarrow as pa
191
+
192
+ UNSIGNED_INT_PYARROW_DTYPES = [pa.uint8(), pa.uint16(), pa.uint32(), pa.uint64()]
193
+ SIGNED_INT_PYARROW_DTYPES = [pa.int8(), pa.int16(), pa.int32(), pa.int64()]
194
+ ALL_INT_PYARROW_DTYPES = UNSIGNED_INT_PYARROW_DTYPES + SIGNED_INT_PYARROW_DTYPES
195
+ ALL_INT_PYARROW_DTYPES_STR_REPR = [
196
+ str(ArrowDtype(typ)) for typ in ALL_INT_PYARROW_DTYPES
197
+ ]
198
+
199
+ # pa.float16 doesn't seem supported
200
+ # https://github.com/apache/arrow/blob/master/python/pyarrow/src/arrow/python/helpers.cc#L86
201
+ FLOAT_PYARROW_DTYPES = [pa.float32(), pa.float64()]
202
+ FLOAT_PYARROW_DTYPES_STR_REPR = [
203
+ str(ArrowDtype(typ)) for typ in FLOAT_PYARROW_DTYPES
204
+ ]
205
+ DECIMAL_PYARROW_DTYPES = [pa.decimal128(7, 3)]
206
+ STRING_PYARROW_DTYPES = [pa.string()]
207
+ BINARY_PYARROW_DTYPES = [pa.binary()]
208
+
209
+ TIME_PYARROW_DTYPES = [
210
+ pa.time32("s"),
211
+ pa.time32("ms"),
212
+ pa.time64("us"),
213
+ pa.time64("ns"),
214
+ ]
215
+ DATE_PYARROW_DTYPES = [pa.date32(), pa.date64()]
216
+ DATETIME_PYARROW_DTYPES = [
217
+ pa.timestamp(unit=unit, tz=tz)
218
+ for unit in ["s", "ms", "us", "ns"]
219
+ for tz in [None, "UTC", "US/Pacific", "US/Eastern"]
220
+ ]
221
+ TIMEDELTA_PYARROW_DTYPES = [pa.duration(unit) for unit in ["s", "ms", "us", "ns"]]
222
+
223
+ BOOL_PYARROW_DTYPES = [pa.bool_()]
224
+
225
+ # TODO: Add container like pyarrow types:
226
+ # https://arrow.apache.org/docs/python/api/datatypes.html#factory-functions
227
+ ALL_PYARROW_DTYPES = (
228
+ ALL_INT_PYARROW_DTYPES
229
+ + FLOAT_PYARROW_DTYPES
230
+ + DECIMAL_PYARROW_DTYPES
231
+ + STRING_PYARROW_DTYPES
232
+ + BINARY_PYARROW_DTYPES
233
+ + TIME_PYARROW_DTYPES
234
+ + DATE_PYARROW_DTYPES
235
+ + DATETIME_PYARROW_DTYPES
236
+ + TIMEDELTA_PYARROW_DTYPES
237
+ + BOOL_PYARROW_DTYPES
238
+ )
239
+ ALL_REAL_PYARROW_DTYPES_STR_REPR = (
240
+ ALL_INT_PYARROW_DTYPES_STR_REPR + FLOAT_PYARROW_DTYPES_STR_REPR
241
+ )
242
+ else:
243
+ FLOAT_PYARROW_DTYPES_STR_REPR = []
244
+ ALL_INT_PYARROW_DTYPES_STR_REPR = []
245
+ ALL_PYARROW_DTYPES = []
246
+ ALL_REAL_PYARROW_DTYPES_STR_REPR = []
247
+
248
+ ALL_REAL_NULLABLE_DTYPES = (
249
+ FLOAT_NUMPY_DTYPES + ALL_REAL_EXTENSION_DTYPES + ALL_REAL_PYARROW_DTYPES_STR_REPR
250
+ )
251
+
252
+ arithmetic_dunder_methods = [
253
+ "__add__",
254
+ "__radd__",
255
+ "__sub__",
256
+ "__rsub__",
257
+ "__mul__",
258
+ "__rmul__",
259
+ "__floordiv__",
260
+ "__rfloordiv__",
261
+ "__truediv__",
262
+ "__rtruediv__",
263
+ "__pow__",
264
+ "__rpow__",
265
+ "__mod__",
266
+ "__rmod__",
267
+ ]
268
+
269
+ comparison_dunder_methods = ["__eq__", "__ne__", "__le__", "__lt__", "__ge__", "__gt__"]
270
+
271
+
272
+ # -----------------------------------------------------------------------------
273
+ # Comparators
274
+
275
+
276
+ def box_expected(expected, box_cls, transpose: bool = True):
277
+ """
278
+ Helper function to wrap the expected output of a test in a given box_class.
279
+
280
+ Parameters
281
+ ----------
282
+ expected : np.ndarray, Index, Series
283
+ box_cls : {Index, Series, DataFrame}
284
+
285
+ Returns
286
+ -------
287
+ subclass of box_cls
288
+ """
289
+ if box_cls is pd.array:
290
+ if isinstance(expected, RangeIndex):
291
+ # pd.array would return an IntegerArray
292
+ expected = NumpyExtensionArray(np.asarray(expected._values))
293
+ else:
294
+ expected = pd.array(expected, copy=False)
295
+ elif box_cls is Index:
296
+ with warnings.catch_warnings():
297
+ warnings.filterwarnings("ignore", "Dtype inference", category=FutureWarning)
298
+ expected = Index(expected)
299
+ elif box_cls is Series:
300
+ with warnings.catch_warnings():
301
+ warnings.filterwarnings("ignore", "Dtype inference", category=FutureWarning)
302
+ expected = Series(expected)
303
+ elif box_cls is DataFrame:
304
+ with warnings.catch_warnings():
305
+ warnings.filterwarnings("ignore", "Dtype inference", category=FutureWarning)
306
+ expected = Series(expected).to_frame()
307
+ if transpose:
308
+ # for vector operations, we need a DataFrame to be a single-row,
309
+ # not a single-column, in order to operate against non-DataFrame
310
+ # vectors of the same length. But convert to two rows to avoid
311
+ # single-row special cases in datetime arithmetic
312
+ expected = expected.T
313
+ expected = pd.concat([expected] * 2, ignore_index=True)
314
+ elif box_cls is np.ndarray or box_cls is np.array:
315
+ expected = np.array(expected)
316
+ elif box_cls is to_array:
317
+ expected = to_array(expected)
318
+ else:
319
+ raise NotImplementedError(box_cls)
320
+ return expected
321
+
322
+
323
+ def to_array(obj):
324
+ """
325
+ Similar to pd.array, but does not cast numpy dtypes to nullable dtypes.
326
+ """
327
+ # temporary implementation until we get pd.array in place
328
+ dtype = getattr(obj, "dtype", None)
329
+
330
+ if dtype is None:
331
+ return np.asarray(obj)
332
+
333
+ return extract_array(obj, extract_numpy=True)
334
+
335
+
336
+ class SubclassedSeries(Series):
337
+ _metadata = ["testattr", "name"]
338
+
339
+ @property
340
+ def _constructor(self):
341
+ # For testing, those properties return a generic callable, and not
342
+ # the actual class. In this case that is equivalent, but it is to
343
+ # ensure we don't rely on the property returning a class
344
+ # See https://github.com/pandas-dev/pandas/pull/46018 and
345
+ # https://github.com/pandas-dev/pandas/issues/32638 and linked issues
346
+ return lambda *args, **kwargs: SubclassedSeries(*args, **kwargs)
347
+
348
+ @property
349
+ def _constructor_expanddim(self):
350
+ return lambda *args, **kwargs: SubclassedDataFrame(*args, **kwargs)
351
+
352
+
353
+ class SubclassedDataFrame(DataFrame):
354
+ _metadata = ["testattr"]
355
+
356
+ @property
357
+ def _constructor(self):
358
+ return lambda *args, **kwargs: SubclassedDataFrame(*args, **kwargs)
359
+
360
+ @property
361
+ def _constructor_sliced(self):
362
+ return lambda *args, **kwargs: SubclassedSeries(*args, **kwargs)
363
+
364
+
365
+ def convert_rows_list_to_csv_str(rows_list: list[str]) -> str:
366
+ """
367
+ Convert list of CSV rows to single CSV-formatted string for current OS.
368
+
369
+ This method is used for creating expected value of to_csv() method.
370
+
371
+ Parameters
372
+ ----------
373
+ rows_list : List[str]
374
+ Each element represents the row of csv.
375
+
376
+ Returns
377
+ -------
378
+ str
379
+ Expected output of to_csv() in current OS.
380
+ """
381
+ sep = os.linesep
382
+ return sep.join(rows_list) + sep
383
+
384
+
385
+ def external_error_raised(expected_exception: type[Exception]) -> ContextManager:
386
+ """
387
+ Helper function to mark pytest.raises that have an external error message.
388
+
389
+ Parameters
390
+ ----------
391
+ expected_exception : Exception
392
+ Expected error to raise.
393
+
394
+ Returns
395
+ -------
396
+ Callable
397
+ Regular `pytest.raises` function with `match` equal to `None`.
398
+ """
399
+ import pytest
400
+
401
+ return pytest.raises(expected_exception, match=None)
402
+
403
+
404
+ cython_table = pd.core.common._cython_table.items()
405
+
406
+
407
+ def get_cython_table_params(ndframe, func_names_and_expected):
408
+ """
409
+ Combine frame, functions from com._cython_table
410
+ keys and expected result.
411
+
412
+ Parameters
413
+ ----------
414
+ ndframe : DataFrame or Series
415
+ func_names_and_expected : Sequence of two items
416
+ The first item is a name of a NDFrame method ('sum', 'prod') etc.
417
+ The second item is the expected return value.
418
+
419
+ Returns
420
+ -------
421
+ list
422
+ List of three items (DataFrame, function, expected result)
423
+ """
424
+ results = []
425
+ for func_name, expected in func_names_and_expected:
426
+ results.append((ndframe, func_name, expected))
427
+ results += [
428
+ (ndframe, func, expected)
429
+ for func, name in cython_table
430
+ if name == func_name
431
+ ]
432
+ return results
433
+
434
+
435
+ def get_op_from_name(op_name: str) -> Callable:
436
+ """
437
+ The operator function for a given op name.
438
+
439
+ Parameters
440
+ ----------
441
+ op_name : str
442
+ The op name, in form of "add" or "__add__".
443
+
444
+ Returns
445
+ -------
446
+ function
447
+ A function performing the operation.
448
+ """
449
+ short_opname = op_name.strip("_")
450
+ try:
451
+ op = getattr(operator, short_opname)
452
+ except AttributeError:
453
+ # Assume it is the reverse operator
454
+ rop = getattr(operator, short_opname[1:])
455
+ op = lambda x, y: rop(y, x)
456
+
457
+ return op
458
+
459
+
460
+ # -----------------------------------------------------------------------------
461
+ # Indexing test helpers
462
+
463
+
464
+ def getitem(x):
465
+ return x
466
+
467
+
468
+ def setitem(x):
469
+ return x
470
+
471
+
472
+ def loc(x):
473
+ return x.loc
474
+
475
+
476
+ def iloc(x):
477
+ return x.iloc
478
+
479
+
480
+ def at(x):
481
+ return x.at
482
+
483
+
484
+ def iat(x):
485
+ return x.iat
486
+
487
+
488
+ # -----------------------------------------------------------------------------
489
+
490
+ _UNITS = ["s", "ms", "us", "ns"]
491
+
492
+
493
+ def get_finest_unit(left: str, right: str):
494
+ """
495
+ Find the higher of two datetime64 units.
496
+ """
497
+ if _UNITS.index(left) >= _UNITS.index(right):
498
+ return left
499
+ return right
500
+
501
+
502
+ def shares_memory(left, right) -> bool:
503
+ """
504
+ Pandas-compat for np.shares_memory.
505
+ """
506
+ if isinstance(left, np.ndarray) and isinstance(right, np.ndarray):
507
+ return np.shares_memory(left, right)
508
+ elif isinstance(left, np.ndarray):
509
+ # Call with reversed args to get to unpacking logic below.
510
+ return shares_memory(right, left)
511
+
512
+ if isinstance(left, RangeIndex):
513
+ return False
514
+ if isinstance(left, MultiIndex):
515
+ return shares_memory(left._codes, right)
516
+ if isinstance(left, (Index, Series)):
517
+ return shares_memory(left._values, right)
518
+
519
+ if isinstance(left, NDArrayBackedExtensionArray):
520
+ return shares_memory(left._ndarray, right)
521
+ if isinstance(left, pd.core.arrays.SparseArray):
522
+ return shares_memory(left.sp_values, right)
523
+ if isinstance(left, pd.core.arrays.IntervalArray):
524
+ return shares_memory(left._left, right) or shares_memory(left._right, right)
525
+
526
+ if (
527
+ isinstance(left, ExtensionArray)
528
+ and is_string_dtype(left.dtype)
529
+ and left.dtype.storage in ("pyarrow", "pyarrow_numpy") # type: ignore[attr-defined]
530
+ ):
531
+ # https://github.com/pandas-dev/pandas/pull/43930#discussion_r736862669
532
+ left = cast("ArrowExtensionArray", left)
533
+ if (
534
+ isinstance(right, ExtensionArray)
535
+ and is_string_dtype(right.dtype)
536
+ and right.dtype.storage in ("pyarrow", "pyarrow_numpy") # type: ignore[attr-defined]
537
+ ):
538
+ right = cast("ArrowExtensionArray", right)
539
+ left_pa_data = left._pa_array
540
+ right_pa_data = right._pa_array
541
+ left_buf1 = left_pa_data.chunk(0).buffers()[1]
542
+ right_buf1 = right_pa_data.chunk(0).buffers()[1]
543
+ return left_buf1 == right_buf1
544
+
545
+ if isinstance(left, BaseMaskedArray) and isinstance(right, BaseMaskedArray):
546
+ # By convention, we'll say these share memory if they share *either*
547
+ # the _data or the _mask
548
+ return np.shares_memory(left._data, right._data) or np.shares_memory(
549
+ left._mask, right._mask
550
+ )
551
+
552
+ if isinstance(left, DataFrame) and len(left._mgr.arrays) == 1:
553
+ arr = left._mgr.arrays[0]
554
+ return shares_memory(arr, right)
555
+
556
+ raise NotImplementedError(type(left), type(right))
557
+
558
+
559
+ __all__ = [
560
+ "ALL_INT_EA_DTYPES",
561
+ "ALL_INT_NUMPY_DTYPES",
562
+ "ALL_NUMPY_DTYPES",
563
+ "ALL_REAL_NUMPY_DTYPES",
564
+ "assert_almost_equal",
565
+ "assert_attr_equal",
566
+ "assert_categorical_equal",
567
+ "assert_class_equal",
568
+ "assert_contains_all",
569
+ "assert_copy",
570
+ "assert_datetime_array_equal",
571
+ "assert_dict_equal",
572
+ "assert_equal",
573
+ "assert_extension_array_equal",
574
+ "assert_frame_equal",
575
+ "assert_index_equal",
576
+ "assert_indexing_slices_equivalent",
577
+ "assert_interval_array_equal",
578
+ "assert_is_sorted",
579
+ "assert_is_valid_plot_return_object",
580
+ "assert_metadata_equivalent",
581
+ "assert_numpy_array_equal",
582
+ "assert_period_array_equal",
583
+ "assert_produces_warning",
584
+ "assert_series_equal",
585
+ "assert_sp_array_equal",
586
+ "assert_timedelta_array_equal",
587
+ "assert_cow_warning",
588
+ "at",
589
+ "BOOL_DTYPES",
590
+ "box_expected",
591
+ "BYTES_DTYPES",
592
+ "can_set_locale",
593
+ "COMPLEX_DTYPES",
594
+ "convert_rows_list_to_csv_str",
595
+ "DATETIME64_DTYPES",
596
+ "decompress_file",
597
+ "ENDIAN",
598
+ "ensure_clean",
599
+ "external_error_raised",
600
+ "FLOAT_EA_DTYPES",
601
+ "FLOAT_NUMPY_DTYPES",
602
+ "get_cython_table_params",
603
+ "get_dtype",
604
+ "getitem",
605
+ "get_locales",
606
+ "get_finest_unit",
607
+ "get_obj",
608
+ "get_op_from_name",
609
+ "iat",
610
+ "iloc",
611
+ "loc",
612
+ "maybe_produces_warning",
613
+ "NARROW_NP_DTYPES",
614
+ "NP_NAT_OBJECTS",
615
+ "NULL_OBJECTS",
616
+ "OBJECT_DTYPES",
617
+ "raise_assert_detail",
618
+ "raises_chained_assignment_error",
619
+ "round_trip_localpath",
620
+ "round_trip_pathlib",
621
+ "round_trip_pickle",
622
+ "setitem",
623
+ "set_locale",
624
+ "set_timezone",
625
+ "shares_memory",
626
+ "SIGNED_INT_EA_DTYPES",
627
+ "SIGNED_INT_NUMPY_DTYPES",
628
+ "STRING_DTYPES",
629
+ "SubclassedDataFrame",
630
+ "SubclassedSeries",
631
+ "TIMEDELTA64_DTYPES",
632
+ "to_array",
633
+ "UNSIGNED_INT_EA_DTYPES",
634
+ "UNSIGNED_INT_NUMPY_DTYPES",
635
+ "use_numexpr",
636
+ "with_csv_dialect",
637
+ "write_to_compressed",
638
+ ]
env-llmeval/lib/python3.10/site-packages/pandas/_testing/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (14.2 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/_testing/__pycache__/_hypothesis.cpython-310.pyc ADDED
Binary file (1.77 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/_testing/__pycache__/_io.cpython-310.pyc ADDED
Binary file (4.39 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/_testing/__pycache__/_warnings.cpython-310.pyc ADDED
Binary file (6.51 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/_testing/__pycache__/asserters.cpython-310.pyc ADDED
Binary file (32.9 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/_testing/__pycache__/compat.cpython-310.pyc ADDED
Binary file (950 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/_testing/__pycache__/contexts.cpython-310.pyc ADDED
Binary file (6.24 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/_testing/_hypothesis.py ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Hypothesis data generator helpers.
3
+ """
4
+ from datetime import datetime
5
+
6
+ from hypothesis import strategies as st
7
+ from hypothesis.extra.dateutil import timezones as dateutil_timezones
8
+ from hypothesis.extra.pytz import timezones as pytz_timezones
9
+
10
+ from pandas.compat import is_platform_windows
11
+
12
+ import pandas as pd
13
+
14
+ from pandas.tseries.offsets import (
15
+ BMonthBegin,
16
+ BMonthEnd,
17
+ BQuarterBegin,
18
+ BQuarterEnd,
19
+ BYearBegin,
20
+ BYearEnd,
21
+ MonthBegin,
22
+ MonthEnd,
23
+ QuarterBegin,
24
+ QuarterEnd,
25
+ YearBegin,
26
+ YearEnd,
27
+ )
28
+
29
+ OPTIONAL_INTS = st.lists(st.one_of(st.integers(), st.none()), max_size=10, min_size=3)
30
+
31
+ OPTIONAL_FLOATS = st.lists(st.one_of(st.floats(), st.none()), max_size=10, min_size=3)
32
+
33
+ OPTIONAL_TEXT = st.lists(st.one_of(st.none(), st.text()), max_size=10, min_size=3)
34
+
35
+ OPTIONAL_DICTS = st.lists(
36
+ st.one_of(st.none(), st.dictionaries(st.text(), st.integers())),
37
+ max_size=10,
38
+ min_size=3,
39
+ )
40
+
41
+ OPTIONAL_LISTS = st.lists(
42
+ st.one_of(st.none(), st.lists(st.text(), max_size=10, min_size=3)),
43
+ max_size=10,
44
+ min_size=3,
45
+ )
46
+
47
+ OPTIONAL_ONE_OF_ALL = st.one_of(
48
+ OPTIONAL_DICTS, OPTIONAL_FLOATS, OPTIONAL_INTS, OPTIONAL_LISTS, OPTIONAL_TEXT
49
+ )
50
+
51
+ if is_platform_windows():
52
+ DATETIME_NO_TZ = st.datetimes(min_value=datetime(1900, 1, 1))
53
+ else:
54
+ DATETIME_NO_TZ = st.datetimes()
55
+
56
+ DATETIME_JAN_1_1900_OPTIONAL_TZ = st.datetimes(
57
+ min_value=pd.Timestamp(
58
+ 1900, 1, 1
59
+ ).to_pydatetime(), # pyright: ignore[reportGeneralTypeIssues]
60
+ max_value=pd.Timestamp(
61
+ 1900, 1, 1
62
+ ).to_pydatetime(), # pyright: ignore[reportGeneralTypeIssues]
63
+ timezones=st.one_of(st.none(), dateutil_timezones(), pytz_timezones()),
64
+ )
65
+
66
+ DATETIME_IN_PD_TIMESTAMP_RANGE_NO_TZ = st.datetimes(
67
+ min_value=pd.Timestamp.min.to_pydatetime(warn=False),
68
+ max_value=pd.Timestamp.max.to_pydatetime(warn=False),
69
+ )
70
+
71
+ INT_NEG_999_TO_POS_999 = st.integers(-999, 999)
72
+
73
+ # The strategy for each type is registered in conftest.py, as they don't carry
74
+ # enough runtime information (e.g. type hints) to infer how to build them.
75
+ YQM_OFFSET = st.one_of(
76
+ *map(
77
+ st.from_type,
78
+ [
79
+ MonthBegin,
80
+ MonthEnd,
81
+ BMonthBegin,
82
+ BMonthEnd,
83
+ QuarterBegin,
84
+ QuarterEnd,
85
+ BQuarterBegin,
86
+ BQuarterEnd,
87
+ YearBegin,
88
+ YearEnd,
89
+ BYearBegin,
90
+ BYearEnd,
91
+ ],
92
+ )
93
+ )
env-llmeval/lib/python3.10/site-packages/pandas/_testing/_io.py ADDED
@@ -0,0 +1,170 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import gzip
4
+ import io
5
+ import pathlib
6
+ import tarfile
7
+ from typing import (
8
+ TYPE_CHECKING,
9
+ Any,
10
+ Callable,
11
+ )
12
+ import uuid
13
+ import zipfile
14
+
15
+ from pandas.compat import (
16
+ get_bz2_file,
17
+ get_lzma_file,
18
+ )
19
+ from pandas.compat._optional import import_optional_dependency
20
+
21
+ import pandas as pd
22
+ from pandas._testing.contexts import ensure_clean
23
+
24
+ if TYPE_CHECKING:
25
+ from pandas._typing import (
26
+ FilePath,
27
+ ReadPickleBuffer,
28
+ )
29
+
30
+ from pandas import (
31
+ DataFrame,
32
+ Series,
33
+ )
34
+
35
+ # ------------------------------------------------------------------
36
+ # File-IO
37
+
38
+
39
+ def round_trip_pickle(
40
+ obj: Any, path: FilePath | ReadPickleBuffer | None = None
41
+ ) -> DataFrame | Series:
42
+ """
43
+ Pickle an object and then read it again.
44
+
45
+ Parameters
46
+ ----------
47
+ obj : any object
48
+ The object to pickle and then re-read.
49
+ path : str, path object or file-like object, default None
50
+ The path where the pickled object is written and then read.
51
+
52
+ Returns
53
+ -------
54
+ pandas object
55
+ The original object that was pickled and then re-read.
56
+ """
57
+ _path = path
58
+ if _path is None:
59
+ _path = f"__{uuid.uuid4()}__.pickle"
60
+ with ensure_clean(_path) as temp_path:
61
+ pd.to_pickle(obj, temp_path)
62
+ return pd.read_pickle(temp_path)
63
+
64
+
65
+ def round_trip_pathlib(writer, reader, path: str | None = None):
66
+ """
67
+ Write an object to file specified by a pathlib.Path and read it back
68
+
69
+ Parameters
70
+ ----------
71
+ writer : callable bound to pandas object
72
+ IO writing function (e.g. DataFrame.to_csv )
73
+ reader : callable
74
+ IO reading function (e.g. pd.read_csv )
75
+ path : str, default None
76
+ The path where the object is written and then read.
77
+
78
+ Returns
79
+ -------
80
+ pandas object
81
+ The original object that was serialized and then re-read.
82
+ """
83
+ Path = pathlib.Path
84
+ if path is None:
85
+ path = "___pathlib___"
86
+ with ensure_clean(path) as path:
87
+ writer(Path(path)) # type: ignore[arg-type]
88
+ obj = reader(Path(path)) # type: ignore[arg-type]
89
+ return obj
90
+
91
+
92
+ def round_trip_localpath(writer, reader, path: str | None = None):
93
+ """
94
+ Write an object to file specified by a py.path LocalPath and read it back.
95
+
96
+ Parameters
97
+ ----------
98
+ writer : callable bound to pandas object
99
+ IO writing function (e.g. DataFrame.to_csv )
100
+ reader : callable
101
+ IO reading function (e.g. pd.read_csv )
102
+ path : str, default None
103
+ The path where the object is written and then read.
104
+
105
+ Returns
106
+ -------
107
+ pandas object
108
+ The original object that was serialized and then re-read.
109
+ """
110
+ import pytest
111
+
112
+ LocalPath = pytest.importorskip("py.path").local
113
+ if path is None:
114
+ path = "___localpath___"
115
+ with ensure_clean(path) as path:
116
+ writer(LocalPath(path))
117
+ obj = reader(LocalPath(path))
118
+ return obj
119
+
120
+
121
+ def write_to_compressed(compression, path, data, dest: str = "test") -> None:
122
+ """
123
+ Write data to a compressed file.
124
+
125
+ Parameters
126
+ ----------
127
+ compression : {'gzip', 'bz2', 'zip', 'xz', 'zstd'}
128
+ The compression type to use.
129
+ path : str
130
+ The file path to write the data.
131
+ data : str
132
+ The data to write.
133
+ dest : str, default "test"
134
+ The destination file (for ZIP only)
135
+
136
+ Raises
137
+ ------
138
+ ValueError : An invalid compression value was passed in.
139
+ """
140
+ args: tuple[Any, ...] = (data,)
141
+ mode = "wb"
142
+ method = "write"
143
+ compress_method: Callable
144
+
145
+ if compression == "zip":
146
+ compress_method = zipfile.ZipFile
147
+ mode = "w"
148
+ args = (dest, data)
149
+ method = "writestr"
150
+ elif compression == "tar":
151
+ compress_method = tarfile.TarFile
152
+ mode = "w"
153
+ file = tarfile.TarInfo(name=dest)
154
+ bytes = io.BytesIO(data)
155
+ file.size = len(data)
156
+ args = (file, bytes)
157
+ method = "addfile"
158
+ elif compression == "gzip":
159
+ compress_method = gzip.GzipFile
160
+ elif compression == "bz2":
161
+ compress_method = get_bz2_file()
162
+ elif compression == "zstd":
163
+ compress_method = import_optional_dependency("zstandard").open
164
+ elif compression == "xz":
165
+ compress_method = get_lzma_file()
166
+ else:
167
+ raise ValueError(f"Unrecognized compression type: {compression}")
168
+
169
+ with compress_method(path, mode=mode) as f:
170
+ getattr(f, method)(*args)
env-llmeval/lib/python3.10/site-packages/pandas/_testing/_warnings.py ADDED
@@ -0,0 +1,232 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from contextlib import (
4
+ contextmanager,
5
+ nullcontext,
6
+ )
7
+ import inspect
8
+ import re
9
+ import sys
10
+ from typing import (
11
+ TYPE_CHECKING,
12
+ Literal,
13
+ cast,
14
+ )
15
+ import warnings
16
+
17
+ from pandas.compat import PY311
18
+
19
+ if TYPE_CHECKING:
20
+ from collections.abc import (
21
+ Generator,
22
+ Sequence,
23
+ )
24
+
25
+
26
+ @contextmanager
27
+ def assert_produces_warning(
28
+ expected_warning: type[Warning] | bool | tuple[type[Warning], ...] | None = Warning,
29
+ filter_level: Literal[
30
+ "error", "ignore", "always", "default", "module", "once"
31
+ ] = "always",
32
+ check_stacklevel: bool = True,
33
+ raise_on_extra_warnings: bool = True,
34
+ match: str | None = None,
35
+ ) -> Generator[list[warnings.WarningMessage], None, None]:
36
+ """
37
+ Context manager for running code expected to either raise a specific warning,
38
+ multiple specific warnings, or not raise any warnings. Verifies that the code
39
+ raises the expected warning(s), and that it does not raise any other unexpected
40
+ warnings. It is basically a wrapper around ``warnings.catch_warnings``.
41
+
42
+ Parameters
43
+ ----------
44
+ expected_warning : {Warning, False, tuple[Warning, ...], None}, default Warning
45
+ The type of Exception raised. ``exception.Warning`` is the base
46
+ class for all warnings. To raise multiple types of exceptions,
47
+ pass them as a tuple. To check that no warning is returned,
48
+ specify ``False`` or ``None``.
49
+ filter_level : str or None, default "always"
50
+ Specifies whether warnings are ignored, displayed, or turned
51
+ into errors.
52
+ Valid values are:
53
+
54
+ * "error" - turns matching warnings into exceptions
55
+ * "ignore" - discard the warning
56
+ * "always" - always emit a warning
57
+ * "default" - print the warning the first time it is generated
58
+ from each location
59
+ * "module" - print the warning the first time it is generated
60
+ from each module
61
+ * "once" - print the warning the first time it is generated
62
+
63
+ check_stacklevel : bool, default True
64
+ If True, displays the line that called the function containing
65
+ the warning to show were the function is called. Otherwise, the
66
+ line that implements the function is displayed.
67
+ raise_on_extra_warnings : bool, default True
68
+ Whether extra warnings not of the type `expected_warning` should
69
+ cause the test to fail.
70
+ match : str, optional
71
+ Match warning message.
72
+
73
+ Examples
74
+ --------
75
+ >>> import warnings
76
+ >>> with assert_produces_warning():
77
+ ... warnings.warn(UserWarning())
78
+ ...
79
+ >>> with assert_produces_warning(False):
80
+ ... warnings.warn(RuntimeWarning())
81
+ ...
82
+ Traceback (most recent call last):
83
+ ...
84
+ AssertionError: Caused unexpected warning(s): ['RuntimeWarning'].
85
+ >>> with assert_produces_warning(UserWarning):
86
+ ... warnings.warn(RuntimeWarning())
87
+ Traceback (most recent call last):
88
+ ...
89
+ AssertionError: Did not see expected warning of class 'UserWarning'.
90
+
91
+ ..warn:: This is *not* thread-safe.
92
+ """
93
+ __tracebackhide__ = True
94
+
95
+ with warnings.catch_warnings(record=True) as w:
96
+ warnings.simplefilter(filter_level)
97
+ try:
98
+ yield w
99
+ finally:
100
+ if expected_warning:
101
+ expected_warning = cast(type[Warning], expected_warning)
102
+ _assert_caught_expected_warning(
103
+ caught_warnings=w,
104
+ expected_warning=expected_warning,
105
+ match=match,
106
+ check_stacklevel=check_stacklevel,
107
+ )
108
+ if raise_on_extra_warnings:
109
+ _assert_caught_no_extra_warnings(
110
+ caught_warnings=w,
111
+ expected_warning=expected_warning,
112
+ )
113
+
114
+
115
+ def maybe_produces_warning(warning: type[Warning], condition: bool, **kwargs):
116
+ """
117
+ Return a context manager that possibly checks a warning based on the condition
118
+ """
119
+ if condition:
120
+ return assert_produces_warning(warning, **kwargs)
121
+ else:
122
+ return nullcontext()
123
+
124
+
125
+ def _assert_caught_expected_warning(
126
+ *,
127
+ caught_warnings: Sequence[warnings.WarningMessage],
128
+ expected_warning: type[Warning],
129
+ match: str | None,
130
+ check_stacklevel: bool,
131
+ ) -> None:
132
+ """Assert that there was the expected warning among the caught warnings."""
133
+ saw_warning = False
134
+ matched_message = False
135
+ unmatched_messages = []
136
+
137
+ for actual_warning in caught_warnings:
138
+ if issubclass(actual_warning.category, expected_warning):
139
+ saw_warning = True
140
+
141
+ if check_stacklevel:
142
+ _assert_raised_with_correct_stacklevel(actual_warning)
143
+
144
+ if match is not None:
145
+ if re.search(match, str(actual_warning.message)):
146
+ matched_message = True
147
+ else:
148
+ unmatched_messages.append(actual_warning.message)
149
+
150
+ if not saw_warning:
151
+ raise AssertionError(
152
+ f"Did not see expected warning of class "
153
+ f"{repr(expected_warning.__name__)}"
154
+ )
155
+
156
+ if match and not matched_message:
157
+ raise AssertionError(
158
+ f"Did not see warning {repr(expected_warning.__name__)} "
159
+ f"matching '{match}'. The emitted warning messages are "
160
+ f"{unmatched_messages}"
161
+ )
162
+
163
+
164
+ def _assert_caught_no_extra_warnings(
165
+ *,
166
+ caught_warnings: Sequence[warnings.WarningMessage],
167
+ expected_warning: type[Warning] | bool | tuple[type[Warning], ...] | None,
168
+ ) -> None:
169
+ """Assert that no extra warnings apart from the expected ones are caught."""
170
+ extra_warnings = []
171
+
172
+ for actual_warning in caught_warnings:
173
+ if _is_unexpected_warning(actual_warning, expected_warning):
174
+ # GH#38630 pytest.filterwarnings does not suppress these.
175
+ if actual_warning.category == ResourceWarning:
176
+ # GH 44732: Don't make the CI flaky by filtering SSL-related
177
+ # ResourceWarning from dependencies
178
+ if "unclosed <ssl.SSLSocket" in str(actual_warning.message):
179
+ continue
180
+ # GH 44844: Matplotlib leaves font files open during the entire process
181
+ # upon import. Don't make CI flaky if ResourceWarning raised
182
+ # due to these open files.
183
+ if any("matplotlib" in mod for mod in sys.modules):
184
+ continue
185
+ if PY311 and actual_warning.category == EncodingWarning:
186
+ # EncodingWarnings are checked in the CI
187
+ # pyproject.toml errors on EncodingWarnings in pandas
188
+ # Ignore EncodingWarnings from other libraries
189
+ continue
190
+ extra_warnings.append(
191
+ (
192
+ actual_warning.category.__name__,
193
+ actual_warning.message,
194
+ actual_warning.filename,
195
+ actual_warning.lineno,
196
+ )
197
+ )
198
+
199
+ if extra_warnings:
200
+ raise AssertionError(f"Caused unexpected warning(s): {repr(extra_warnings)}")
201
+
202
+
203
+ def _is_unexpected_warning(
204
+ actual_warning: warnings.WarningMessage,
205
+ expected_warning: type[Warning] | bool | tuple[type[Warning], ...] | None,
206
+ ) -> bool:
207
+ """Check if the actual warning issued is unexpected."""
208
+ if actual_warning and not expected_warning:
209
+ return True
210
+ expected_warning = cast(type[Warning], expected_warning)
211
+ return bool(not issubclass(actual_warning.category, expected_warning))
212
+
213
+
214
+ def _assert_raised_with_correct_stacklevel(
215
+ actual_warning: warnings.WarningMessage,
216
+ ) -> None:
217
+ # https://stackoverflow.com/questions/17407119/python-inspect-stack-is-slow
218
+ frame = inspect.currentframe()
219
+ for _ in range(4):
220
+ frame = frame.f_back # type: ignore[union-attr]
221
+ try:
222
+ caller_filename = inspect.getfile(frame) # type: ignore[arg-type]
223
+ finally:
224
+ # See note in
225
+ # https://docs.python.org/3/library/inspect.html#inspect.Traceback
226
+ del frame
227
+ msg = (
228
+ "Warning not set with correct stacklevel. "
229
+ f"File where warning is raised: {actual_warning.filename} != "
230
+ f"{caller_filename}. Warning message: {actual_warning.message}"
231
+ )
232
+ assert actual_warning.filename == caller_filename, msg
env-llmeval/lib/python3.10/site-packages/pandas/_testing/asserters.py ADDED
@@ -0,0 +1,1435 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import operator
4
+ from typing import (
5
+ TYPE_CHECKING,
6
+ Literal,
7
+ NoReturn,
8
+ cast,
9
+ )
10
+
11
+ import numpy as np
12
+
13
+ from pandas._libs import lib
14
+ from pandas._libs.missing import is_matching_na
15
+ from pandas._libs.sparse import SparseIndex
16
+ import pandas._libs.testing as _testing
17
+ from pandas._libs.tslibs.np_datetime import compare_mismatched_resolutions
18
+
19
+ from pandas.core.dtypes.common import (
20
+ is_bool,
21
+ is_float_dtype,
22
+ is_integer_dtype,
23
+ is_number,
24
+ is_numeric_dtype,
25
+ needs_i8_conversion,
26
+ )
27
+ from pandas.core.dtypes.dtypes import (
28
+ CategoricalDtype,
29
+ DatetimeTZDtype,
30
+ ExtensionDtype,
31
+ NumpyEADtype,
32
+ )
33
+ from pandas.core.dtypes.missing import array_equivalent
34
+
35
+ import pandas as pd
36
+ from pandas import (
37
+ Categorical,
38
+ DataFrame,
39
+ DatetimeIndex,
40
+ Index,
41
+ IntervalDtype,
42
+ IntervalIndex,
43
+ MultiIndex,
44
+ PeriodIndex,
45
+ RangeIndex,
46
+ Series,
47
+ TimedeltaIndex,
48
+ )
49
+ from pandas.core.arrays import (
50
+ DatetimeArray,
51
+ ExtensionArray,
52
+ IntervalArray,
53
+ PeriodArray,
54
+ TimedeltaArray,
55
+ )
56
+ from pandas.core.arrays.datetimelike import DatetimeLikeArrayMixin
57
+ from pandas.core.arrays.string_ import StringDtype
58
+ from pandas.core.indexes.api import safe_sort_index
59
+
60
+ from pandas.io.formats.printing import pprint_thing
61
+
62
+ if TYPE_CHECKING:
63
+ from pandas._typing import DtypeObj
64
+
65
+
66
+ def assert_almost_equal(
67
+ left,
68
+ right,
69
+ check_dtype: bool | Literal["equiv"] = "equiv",
70
+ rtol: float = 1.0e-5,
71
+ atol: float = 1.0e-8,
72
+ **kwargs,
73
+ ) -> None:
74
+ """
75
+ Check that the left and right objects are approximately equal.
76
+
77
+ By approximately equal, we refer to objects that are numbers or that
78
+ contain numbers which may be equivalent to specific levels of precision.
79
+
80
+ Parameters
81
+ ----------
82
+ left : object
83
+ right : object
84
+ check_dtype : bool or {'equiv'}, default 'equiv'
85
+ Check dtype if both a and b are the same type. If 'equiv' is passed in,
86
+ then `RangeIndex` and `Index` with int64 dtype are also considered
87
+ equivalent when doing type checking.
88
+ rtol : float, default 1e-5
89
+ Relative tolerance.
90
+ atol : float, default 1e-8
91
+ Absolute tolerance.
92
+ """
93
+ if isinstance(left, Index):
94
+ assert_index_equal(
95
+ left,
96
+ right,
97
+ check_exact=False,
98
+ exact=check_dtype,
99
+ rtol=rtol,
100
+ atol=atol,
101
+ **kwargs,
102
+ )
103
+
104
+ elif isinstance(left, Series):
105
+ assert_series_equal(
106
+ left,
107
+ right,
108
+ check_exact=False,
109
+ check_dtype=check_dtype,
110
+ rtol=rtol,
111
+ atol=atol,
112
+ **kwargs,
113
+ )
114
+
115
+ elif isinstance(left, DataFrame):
116
+ assert_frame_equal(
117
+ left,
118
+ right,
119
+ check_exact=False,
120
+ check_dtype=check_dtype,
121
+ rtol=rtol,
122
+ atol=atol,
123
+ **kwargs,
124
+ )
125
+
126
+ else:
127
+ # Other sequences.
128
+ if check_dtype:
129
+ if is_number(left) and is_number(right):
130
+ # Do not compare numeric classes, like np.float64 and float.
131
+ pass
132
+ elif is_bool(left) and is_bool(right):
133
+ # Do not compare bool classes, like np.bool_ and bool.
134
+ pass
135
+ else:
136
+ if isinstance(left, np.ndarray) or isinstance(right, np.ndarray):
137
+ obj = "numpy array"
138
+ else:
139
+ obj = "Input"
140
+ assert_class_equal(left, right, obj=obj)
141
+
142
+ # if we have "equiv", this becomes True
143
+ _testing.assert_almost_equal(
144
+ left, right, check_dtype=bool(check_dtype), rtol=rtol, atol=atol, **kwargs
145
+ )
146
+
147
+
148
+ def _check_isinstance(left, right, cls) -> None:
149
+ """
150
+ Helper method for our assert_* methods that ensures that
151
+ the two objects being compared have the right type before
152
+ proceeding with the comparison.
153
+
154
+ Parameters
155
+ ----------
156
+ left : The first object being compared.
157
+ right : The second object being compared.
158
+ cls : The class type to check against.
159
+
160
+ Raises
161
+ ------
162
+ AssertionError : Either `left` or `right` is not an instance of `cls`.
163
+ """
164
+ cls_name = cls.__name__
165
+
166
+ if not isinstance(left, cls):
167
+ raise AssertionError(
168
+ f"{cls_name} Expected type {cls}, found {type(left)} instead"
169
+ )
170
+ if not isinstance(right, cls):
171
+ raise AssertionError(
172
+ f"{cls_name} Expected type {cls}, found {type(right)} instead"
173
+ )
174
+
175
+
176
+ def assert_dict_equal(left, right, compare_keys: bool = True) -> None:
177
+ _check_isinstance(left, right, dict)
178
+ _testing.assert_dict_equal(left, right, compare_keys=compare_keys)
179
+
180
+
181
+ def assert_index_equal(
182
+ left: Index,
183
+ right: Index,
184
+ exact: bool | str = "equiv",
185
+ check_names: bool = True,
186
+ check_exact: bool = True,
187
+ check_categorical: bool = True,
188
+ check_order: bool = True,
189
+ rtol: float = 1.0e-5,
190
+ atol: float = 1.0e-8,
191
+ obj: str = "Index",
192
+ ) -> None:
193
+ """
194
+ Check that left and right Index are equal.
195
+
196
+ Parameters
197
+ ----------
198
+ left : Index
199
+ right : Index
200
+ exact : bool or {'equiv'}, default 'equiv'
201
+ Whether to check the Index class, dtype and inferred_type
202
+ are identical. If 'equiv', then RangeIndex can be substituted for
203
+ Index with an int64 dtype as well.
204
+ check_names : bool, default True
205
+ Whether to check the names attribute.
206
+ check_exact : bool, default True
207
+ Whether to compare number exactly.
208
+ check_categorical : bool, default True
209
+ Whether to compare internal Categorical exactly.
210
+ check_order : bool, default True
211
+ Whether to compare the order of index entries as well as their values.
212
+ If True, both indexes must contain the same elements, in the same order.
213
+ If False, both indexes must contain the same elements, but in any order.
214
+ rtol : float, default 1e-5
215
+ Relative tolerance. Only used when check_exact is False.
216
+ atol : float, default 1e-8
217
+ Absolute tolerance. Only used when check_exact is False.
218
+ obj : str, default 'Index'
219
+ Specify object name being compared, internally used to show appropriate
220
+ assertion message.
221
+
222
+ Examples
223
+ --------
224
+ >>> from pandas import testing as tm
225
+ >>> a = pd.Index([1, 2, 3])
226
+ >>> b = pd.Index([1, 2, 3])
227
+ >>> tm.assert_index_equal(a, b)
228
+ """
229
+ __tracebackhide__ = True
230
+
231
+ def _check_types(left, right, obj: str = "Index") -> None:
232
+ if not exact:
233
+ return
234
+
235
+ assert_class_equal(left, right, exact=exact, obj=obj)
236
+ assert_attr_equal("inferred_type", left, right, obj=obj)
237
+
238
+ # Skip exact dtype checking when `check_categorical` is False
239
+ if isinstance(left.dtype, CategoricalDtype) and isinstance(
240
+ right.dtype, CategoricalDtype
241
+ ):
242
+ if check_categorical:
243
+ assert_attr_equal("dtype", left, right, obj=obj)
244
+ assert_index_equal(left.categories, right.categories, exact=exact)
245
+ return
246
+
247
+ assert_attr_equal("dtype", left, right, obj=obj)
248
+
249
+ # instance validation
250
+ _check_isinstance(left, right, Index)
251
+
252
+ # class / dtype comparison
253
+ _check_types(left, right, obj=obj)
254
+
255
+ # level comparison
256
+ if left.nlevels != right.nlevels:
257
+ msg1 = f"{obj} levels are different"
258
+ msg2 = f"{left.nlevels}, {left}"
259
+ msg3 = f"{right.nlevels}, {right}"
260
+ raise_assert_detail(obj, msg1, msg2, msg3)
261
+
262
+ # length comparison
263
+ if len(left) != len(right):
264
+ msg1 = f"{obj} length are different"
265
+ msg2 = f"{len(left)}, {left}"
266
+ msg3 = f"{len(right)}, {right}"
267
+ raise_assert_detail(obj, msg1, msg2, msg3)
268
+
269
+ # If order doesn't matter then sort the index entries
270
+ if not check_order:
271
+ left = safe_sort_index(left)
272
+ right = safe_sort_index(right)
273
+
274
+ # MultiIndex special comparison for little-friendly error messages
275
+ if isinstance(left, MultiIndex):
276
+ right = cast(MultiIndex, right)
277
+
278
+ for level in range(left.nlevels):
279
+ lobj = f"MultiIndex level [{level}]"
280
+ try:
281
+ # try comparison on levels/codes to avoid densifying MultiIndex
282
+ assert_index_equal(
283
+ left.levels[level],
284
+ right.levels[level],
285
+ exact=exact,
286
+ check_names=check_names,
287
+ check_exact=check_exact,
288
+ check_categorical=check_categorical,
289
+ rtol=rtol,
290
+ atol=atol,
291
+ obj=lobj,
292
+ )
293
+ assert_numpy_array_equal(left.codes[level], right.codes[level])
294
+ except AssertionError:
295
+ llevel = left.get_level_values(level)
296
+ rlevel = right.get_level_values(level)
297
+
298
+ assert_index_equal(
299
+ llevel,
300
+ rlevel,
301
+ exact=exact,
302
+ check_names=check_names,
303
+ check_exact=check_exact,
304
+ check_categorical=check_categorical,
305
+ rtol=rtol,
306
+ atol=atol,
307
+ obj=lobj,
308
+ )
309
+ # get_level_values may change dtype
310
+ _check_types(left.levels[level], right.levels[level], obj=obj)
311
+
312
+ # skip exact index checking when `check_categorical` is False
313
+ elif check_exact and check_categorical:
314
+ if not left.equals(right):
315
+ mismatch = left._values != right._values
316
+
317
+ if not isinstance(mismatch, np.ndarray):
318
+ mismatch = cast("ExtensionArray", mismatch).fillna(True)
319
+
320
+ diff = np.sum(mismatch.astype(int)) * 100.0 / len(left)
321
+ msg = f"{obj} values are different ({np.round(diff, 5)} %)"
322
+ raise_assert_detail(obj, msg, left, right)
323
+ else:
324
+ # if we have "equiv", this becomes True
325
+ exact_bool = bool(exact)
326
+ _testing.assert_almost_equal(
327
+ left.values,
328
+ right.values,
329
+ rtol=rtol,
330
+ atol=atol,
331
+ check_dtype=exact_bool,
332
+ obj=obj,
333
+ lobj=left,
334
+ robj=right,
335
+ )
336
+
337
+ # metadata comparison
338
+ if check_names:
339
+ assert_attr_equal("names", left, right, obj=obj)
340
+ if isinstance(left, PeriodIndex) or isinstance(right, PeriodIndex):
341
+ assert_attr_equal("dtype", left, right, obj=obj)
342
+ if isinstance(left, IntervalIndex) or isinstance(right, IntervalIndex):
343
+ assert_interval_array_equal(left._values, right._values)
344
+
345
+ if check_categorical:
346
+ if isinstance(left.dtype, CategoricalDtype) or isinstance(
347
+ right.dtype, CategoricalDtype
348
+ ):
349
+ assert_categorical_equal(left._values, right._values, obj=f"{obj} category")
350
+
351
+
352
+ def assert_class_equal(
353
+ left, right, exact: bool | str = True, obj: str = "Input"
354
+ ) -> None:
355
+ """
356
+ Checks classes are equal.
357
+ """
358
+ __tracebackhide__ = True
359
+
360
+ def repr_class(x):
361
+ if isinstance(x, Index):
362
+ # return Index as it is to include values in the error message
363
+ return x
364
+
365
+ return type(x).__name__
366
+
367
+ def is_class_equiv(idx: Index) -> bool:
368
+ """Classes that are a RangeIndex (sub-)instance or exactly an `Index` .
369
+
370
+ This only checks class equivalence. There is a separate check that the
371
+ dtype is int64.
372
+ """
373
+ return type(idx) is Index or isinstance(idx, RangeIndex)
374
+
375
+ if type(left) == type(right):
376
+ return
377
+
378
+ if exact == "equiv":
379
+ if is_class_equiv(left) and is_class_equiv(right):
380
+ return
381
+
382
+ msg = f"{obj} classes are different"
383
+ raise_assert_detail(obj, msg, repr_class(left), repr_class(right))
384
+
385
+
386
+ def assert_attr_equal(attr: str, left, right, obj: str = "Attributes") -> None:
387
+ """
388
+ Check attributes are equal. Both objects must have attribute.
389
+
390
+ Parameters
391
+ ----------
392
+ attr : str
393
+ Attribute name being compared.
394
+ left : object
395
+ right : object
396
+ obj : str, default 'Attributes'
397
+ Specify object name being compared, internally used to show appropriate
398
+ assertion message
399
+ """
400
+ __tracebackhide__ = True
401
+
402
+ left_attr = getattr(left, attr)
403
+ right_attr = getattr(right, attr)
404
+
405
+ if left_attr is right_attr or is_matching_na(left_attr, right_attr):
406
+ # e.g. both np.nan, both NaT, both pd.NA, ...
407
+ return None
408
+
409
+ try:
410
+ result = left_attr == right_attr
411
+ except TypeError:
412
+ # datetimetz on rhs may raise TypeError
413
+ result = False
414
+ if (left_attr is pd.NA) ^ (right_attr is pd.NA):
415
+ result = False
416
+ elif not isinstance(result, bool):
417
+ result = result.all()
418
+
419
+ if not result:
420
+ msg = f'Attribute "{attr}" are different'
421
+ raise_assert_detail(obj, msg, left_attr, right_attr)
422
+ return None
423
+
424
+
425
+ def assert_is_valid_plot_return_object(objs) -> None:
426
+ from matplotlib.artist import Artist
427
+ from matplotlib.axes import Axes
428
+
429
+ if isinstance(objs, (Series, np.ndarray)):
430
+ if isinstance(objs, Series):
431
+ objs = objs._values
432
+ for el in objs.ravel():
433
+ msg = (
434
+ "one of 'objs' is not a matplotlib Axes instance, "
435
+ f"type encountered {repr(type(el).__name__)}"
436
+ )
437
+ assert isinstance(el, (Axes, dict)), msg
438
+ else:
439
+ msg = (
440
+ "objs is neither an ndarray of Artist instances nor a single "
441
+ "ArtistArtist instance, tuple, or dict, 'objs' is a "
442
+ f"{repr(type(objs).__name__)}"
443
+ )
444
+ assert isinstance(objs, (Artist, tuple, dict)), msg
445
+
446
+
447
+ def assert_is_sorted(seq) -> None:
448
+ """Assert that the sequence is sorted."""
449
+ if isinstance(seq, (Index, Series)):
450
+ seq = seq.values
451
+ # sorting does not change precisions
452
+ if isinstance(seq, np.ndarray):
453
+ assert_numpy_array_equal(seq, np.sort(np.array(seq)))
454
+ else:
455
+ assert_extension_array_equal(seq, seq[seq.argsort()])
456
+
457
+
458
+ def assert_categorical_equal(
459
+ left,
460
+ right,
461
+ check_dtype: bool = True,
462
+ check_category_order: bool = True,
463
+ obj: str = "Categorical",
464
+ ) -> None:
465
+ """
466
+ Test that Categoricals are equivalent.
467
+
468
+ Parameters
469
+ ----------
470
+ left : Categorical
471
+ right : Categorical
472
+ check_dtype : bool, default True
473
+ Check that integer dtype of the codes are the same.
474
+ check_category_order : bool, default True
475
+ Whether the order of the categories should be compared, which
476
+ implies identical integer codes. If False, only the resulting
477
+ values are compared. The ordered attribute is
478
+ checked regardless.
479
+ obj : str, default 'Categorical'
480
+ Specify object name being compared, internally used to show appropriate
481
+ assertion message.
482
+ """
483
+ _check_isinstance(left, right, Categorical)
484
+
485
+ exact: bool | str
486
+ if isinstance(left.categories, RangeIndex) or isinstance(
487
+ right.categories, RangeIndex
488
+ ):
489
+ exact = "equiv"
490
+ else:
491
+ # We still want to require exact matches for Index
492
+ exact = True
493
+
494
+ if check_category_order:
495
+ assert_index_equal(
496
+ left.categories, right.categories, obj=f"{obj}.categories", exact=exact
497
+ )
498
+ assert_numpy_array_equal(
499
+ left.codes, right.codes, check_dtype=check_dtype, obj=f"{obj}.codes"
500
+ )
501
+ else:
502
+ try:
503
+ lc = left.categories.sort_values()
504
+ rc = right.categories.sort_values()
505
+ except TypeError:
506
+ # e.g. '<' not supported between instances of 'int' and 'str'
507
+ lc, rc = left.categories, right.categories
508
+ assert_index_equal(lc, rc, obj=f"{obj}.categories", exact=exact)
509
+ assert_index_equal(
510
+ left.categories.take(left.codes),
511
+ right.categories.take(right.codes),
512
+ obj=f"{obj}.values",
513
+ exact=exact,
514
+ )
515
+
516
+ assert_attr_equal("ordered", left, right, obj=obj)
517
+
518
+
519
+ def assert_interval_array_equal(
520
+ left, right, exact: bool | Literal["equiv"] = "equiv", obj: str = "IntervalArray"
521
+ ) -> None:
522
+ """
523
+ Test that two IntervalArrays are equivalent.
524
+
525
+ Parameters
526
+ ----------
527
+ left, right : IntervalArray
528
+ The IntervalArrays to compare.
529
+ exact : bool or {'equiv'}, default 'equiv'
530
+ Whether to check the Index class, dtype and inferred_type
531
+ are identical. If 'equiv', then RangeIndex can be substituted for
532
+ Index with an int64 dtype as well.
533
+ obj : str, default 'IntervalArray'
534
+ Specify object name being compared, internally used to show appropriate
535
+ assertion message
536
+ """
537
+ _check_isinstance(left, right, IntervalArray)
538
+
539
+ kwargs = {}
540
+ if left._left.dtype.kind in "mM":
541
+ # We have a DatetimeArray or TimedeltaArray
542
+ kwargs["check_freq"] = False
543
+
544
+ assert_equal(left._left, right._left, obj=f"{obj}.left", **kwargs)
545
+ assert_equal(left._right, right._right, obj=f"{obj}.left", **kwargs)
546
+
547
+ assert_attr_equal("closed", left, right, obj=obj)
548
+
549
+
550
+ def assert_period_array_equal(left, right, obj: str = "PeriodArray") -> None:
551
+ _check_isinstance(left, right, PeriodArray)
552
+
553
+ assert_numpy_array_equal(left._ndarray, right._ndarray, obj=f"{obj}._ndarray")
554
+ assert_attr_equal("dtype", left, right, obj=obj)
555
+
556
+
557
+ def assert_datetime_array_equal(
558
+ left, right, obj: str = "DatetimeArray", check_freq: bool = True
559
+ ) -> None:
560
+ __tracebackhide__ = True
561
+ _check_isinstance(left, right, DatetimeArray)
562
+
563
+ assert_numpy_array_equal(left._ndarray, right._ndarray, obj=f"{obj}._ndarray")
564
+ if check_freq:
565
+ assert_attr_equal("freq", left, right, obj=obj)
566
+ assert_attr_equal("tz", left, right, obj=obj)
567
+
568
+
569
+ def assert_timedelta_array_equal(
570
+ left, right, obj: str = "TimedeltaArray", check_freq: bool = True
571
+ ) -> None:
572
+ __tracebackhide__ = True
573
+ _check_isinstance(left, right, TimedeltaArray)
574
+ assert_numpy_array_equal(left._ndarray, right._ndarray, obj=f"{obj}._ndarray")
575
+ if check_freq:
576
+ assert_attr_equal("freq", left, right, obj=obj)
577
+
578
+
579
+ def raise_assert_detail(
580
+ obj, message, left, right, diff=None, first_diff=None, index_values=None
581
+ ) -> NoReturn:
582
+ __tracebackhide__ = True
583
+
584
+ msg = f"""{obj} are different
585
+
586
+ {message}"""
587
+
588
+ if isinstance(index_values, Index):
589
+ index_values = np.asarray(index_values)
590
+
591
+ if isinstance(index_values, np.ndarray):
592
+ msg += f"\n[index]: {pprint_thing(index_values)}"
593
+
594
+ if isinstance(left, np.ndarray):
595
+ left = pprint_thing(left)
596
+ elif isinstance(left, (CategoricalDtype, NumpyEADtype, StringDtype)):
597
+ left = repr(left)
598
+
599
+ if isinstance(right, np.ndarray):
600
+ right = pprint_thing(right)
601
+ elif isinstance(right, (CategoricalDtype, NumpyEADtype, StringDtype)):
602
+ right = repr(right)
603
+
604
+ msg += f"""
605
+ [left]: {left}
606
+ [right]: {right}"""
607
+
608
+ if diff is not None:
609
+ msg += f"\n[diff]: {diff}"
610
+
611
+ if first_diff is not None:
612
+ msg += f"\n{first_diff}"
613
+
614
+ raise AssertionError(msg)
615
+
616
+
617
+ def assert_numpy_array_equal(
618
+ left,
619
+ right,
620
+ strict_nan: bool = False,
621
+ check_dtype: bool | Literal["equiv"] = True,
622
+ err_msg=None,
623
+ check_same=None,
624
+ obj: str = "numpy array",
625
+ index_values=None,
626
+ ) -> None:
627
+ """
628
+ Check that 'np.ndarray' is equivalent.
629
+
630
+ Parameters
631
+ ----------
632
+ left, right : numpy.ndarray or iterable
633
+ The two arrays to be compared.
634
+ strict_nan : bool, default False
635
+ If True, consider NaN and None to be different.
636
+ check_dtype : bool, default True
637
+ Check dtype if both a and b are np.ndarray.
638
+ err_msg : str, default None
639
+ If provided, used as assertion message.
640
+ check_same : None|'copy'|'same', default None
641
+ Ensure left and right refer/do not refer to the same memory area.
642
+ obj : str, default 'numpy array'
643
+ Specify object name being compared, internally used to show appropriate
644
+ assertion message.
645
+ index_values : Index | numpy.ndarray, default None
646
+ optional index (shared by both left and right), used in output.
647
+ """
648
+ __tracebackhide__ = True
649
+
650
+ # instance validation
651
+ # Show a detailed error message when classes are different
652
+ assert_class_equal(left, right, obj=obj)
653
+ # both classes must be an np.ndarray
654
+ _check_isinstance(left, right, np.ndarray)
655
+
656
+ def _get_base(obj):
657
+ return obj.base if getattr(obj, "base", None) is not None else obj
658
+
659
+ left_base = _get_base(left)
660
+ right_base = _get_base(right)
661
+
662
+ if check_same == "same":
663
+ if left_base is not right_base:
664
+ raise AssertionError(f"{repr(left_base)} is not {repr(right_base)}")
665
+ elif check_same == "copy":
666
+ if left_base is right_base:
667
+ raise AssertionError(f"{repr(left_base)} is {repr(right_base)}")
668
+
669
+ def _raise(left, right, err_msg) -> NoReturn:
670
+ if err_msg is None:
671
+ if left.shape != right.shape:
672
+ raise_assert_detail(
673
+ obj, f"{obj} shapes are different", left.shape, right.shape
674
+ )
675
+
676
+ diff = 0
677
+ for left_arr, right_arr in zip(left, right):
678
+ # count up differences
679
+ if not array_equivalent(left_arr, right_arr, strict_nan=strict_nan):
680
+ diff += 1
681
+
682
+ diff = diff * 100.0 / left.size
683
+ msg = f"{obj} values are different ({np.round(diff, 5)} %)"
684
+ raise_assert_detail(obj, msg, left, right, index_values=index_values)
685
+
686
+ raise AssertionError(err_msg)
687
+
688
+ # compare shape and values
689
+ if not array_equivalent(left, right, strict_nan=strict_nan):
690
+ _raise(left, right, err_msg)
691
+
692
+ if check_dtype:
693
+ if isinstance(left, np.ndarray) and isinstance(right, np.ndarray):
694
+ assert_attr_equal("dtype", left, right, obj=obj)
695
+
696
+
697
+ def assert_extension_array_equal(
698
+ left,
699
+ right,
700
+ check_dtype: bool | Literal["equiv"] = True,
701
+ index_values=None,
702
+ check_exact: bool | lib.NoDefault = lib.no_default,
703
+ rtol: float | lib.NoDefault = lib.no_default,
704
+ atol: float | lib.NoDefault = lib.no_default,
705
+ obj: str = "ExtensionArray",
706
+ ) -> None:
707
+ """
708
+ Check that left and right ExtensionArrays are equal.
709
+
710
+ Parameters
711
+ ----------
712
+ left, right : ExtensionArray
713
+ The two arrays to compare.
714
+ check_dtype : bool, default True
715
+ Whether to check if the ExtensionArray dtypes are identical.
716
+ index_values : Index | numpy.ndarray, default None
717
+ Optional index (shared by both left and right), used in output.
718
+ check_exact : bool, default False
719
+ Whether to compare number exactly.
720
+
721
+ .. versionchanged:: 2.2.0
722
+
723
+ Defaults to True for integer dtypes if none of
724
+ ``check_exact``, ``rtol`` and ``atol`` are specified.
725
+ rtol : float, default 1e-5
726
+ Relative tolerance. Only used when check_exact is False.
727
+ atol : float, default 1e-8
728
+ Absolute tolerance. Only used when check_exact is False.
729
+ obj : str, default 'ExtensionArray'
730
+ Specify object name being compared, internally used to show appropriate
731
+ assertion message.
732
+
733
+ .. versionadded:: 2.0.0
734
+
735
+ Notes
736
+ -----
737
+ Missing values are checked separately from valid values.
738
+ A mask of missing values is computed for each and checked to match.
739
+ The remaining all-valid values are cast to object dtype and checked.
740
+
741
+ Examples
742
+ --------
743
+ >>> from pandas import testing as tm
744
+ >>> a = pd.Series([1, 2, 3, 4])
745
+ >>> b, c = a.array, a.array
746
+ >>> tm.assert_extension_array_equal(b, c)
747
+ """
748
+ if (
749
+ check_exact is lib.no_default
750
+ and rtol is lib.no_default
751
+ and atol is lib.no_default
752
+ ):
753
+ check_exact = (
754
+ is_numeric_dtype(left.dtype)
755
+ and not is_float_dtype(left.dtype)
756
+ or is_numeric_dtype(right.dtype)
757
+ and not is_float_dtype(right.dtype)
758
+ )
759
+ elif check_exact is lib.no_default:
760
+ check_exact = False
761
+
762
+ rtol = rtol if rtol is not lib.no_default else 1.0e-5
763
+ atol = atol if atol is not lib.no_default else 1.0e-8
764
+
765
+ assert isinstance(left, ExtensionArray), "left is not an ExtensionArray"
766
+ assert isinstance(right, ExtensionArray), "right is not an ExtensionArray"
767
+ if check_dtype:
768
+ assert_attr_equal("dtype", left, right, obj=f"Attributes of {obj}")
769
+
770
+ if (
771
+ isinstance(left, DatetimeLikeArrayMixin)
772
+ and isinstance(right, DatetimeLikeArrayMixin)
773
+ and type(right) == type(left)
774
+ ):
775
+ # GH 52449
776
+ if not check_dtype and left.dtype.kind in "mM":
777
+ if not isinstance(left.dtype, np.dtype):
778
+ l_unit = cast(DatetimeTZDtype, left.dtype).unit
779
+ else:
780
+ l_unit = np.datetime_data(left.dtype)[0]
781
+ if not isinstance(right.dtype, np.dtype):
782
+ r_unit = cast(DatetimeTZDtype, right.dtype).unit
783
+ else:
784
+ r_unit = np.datetime_data(right.dtype)[0]
785
+ if (
786
+ l_unit != r_unit
787
+ and compare_mismatched_resolutions(
788
+ left._ndarray, right._ndarray, operator.eq
789
+ ).all()
790
+ ):
791
+ return
792
+ # Avoid slow object-dtype comparisons
793
+ # np.asarray for case where we have a np.MaskedArray
794
+ assert_numpy_array_equal(
795
+ np.asarray(left.asi8),
796
+ np.asarray(right.asi8),
797
+ index_values=index_values,
798
+ obj=obj,
799
+ )
800
+ return
801
+
802
+ left_na = np.asarray(left.isna())
803
+ right_na = np.asarray(right.isna())
804
+ assert_numpy_array_equal(
805
+ left_na, right_na, obj=f"{obj} NA mask", index_values=index_values
806
+ )
807
+
808
+ left_valid = left[~left_na].to_numpy(dtype=object)
809
+ right_valid = right[~right_na].to_numpy(dtype=object)
810
+ if check_exact:
811
+ assert_numpy_array_equal(
812
+ left_valid, right_valid, obj=obj, index_values=index_values
813
+ )
814
+ else:
815
+ _testing.assert_almost_equal(
816
+ left_valid,
817
+ right_valid,
818
+ check_dtype=bool(check_dtype),
819
+ rtol=rtol,
820
+ atol=atol,
821
+ obj=obj,
822
+ index_values=index_values,
823
+ )
824
+
825
+
826
+ # This could be refactored to use the NDFrame.equals method
827
+ def assert_series_equal(
828
+ left,
829
+ right,
830
+ check_dtype: bool | Literal["equiv"] = True,
831
+ check_index_type: bool | Literal["equiv"] = "equiv",
832
+ check_series_type: bool = True,
833
+ check_names: bool = True,
834
+ check_exact: bool | lib.NoDefault = lib.no_default,
835
+ check_datetimelike_compat: bool = False,
836
+ check_categorical: bool = True,
837
+ check_category_order: bool = True,
838
+ check_freq: bool = True,
839
+ check_flags: bool = True,
840
+ rtol: float | lib.NoDefault = lib.no_default,
841
+ atol: float | lib.NoDefault = lib.no_default,
842
+ obj: str = "Series",
843
+ *,
844
+ check_index: bool = True,
845
+ check_like: bool = False,
846
+ ) -> None:
847
+ """
848
+ Check that left and right Series are equal.
849
+
850
+ Parameters
851
+ ----------
852
+ left : Series
853
+ right : Series
854
+ check_dtype : bool, default True
855
+ Whether to check the Series dtype is identical.
856
+ check_index_type : bool or {'equiv'}, default 'equiv'
857
+ Whether to check the Index class, dtype and inferred_type
858
+ are identical.
859
+ check_series_type : bool, default True
860
+ Whether to check the Series class is identical.
861
+ check_names : bool, default True
862
+ Whether to check the Series and Index names attribute.
863
+ check_exact : bool, default False
864
+ Whether to compare number exactly.
865
+
866
+ .. versionchanged:: 2.2.0
867
+
868
+ Defaults to True for integer dtypes if none of
869
+ ``check_exact``, ``rtol`` and ``atol`` are specified.
870
+ check_datetimelike_compat : bool, default False
871
+ Compare datetime-like which is comparable ignoring dtype.
872
+ check_categorical : bool, default True
873
+ Whether to compare internal Categorical exactly.
874
+ check_category_order : bool, default True
875
+ Whether to compare category order of internal Categoricals.
876
+ check_freq : bool, default True
877
+ Whether to check the `freq` attribute on a DatetimeIndex or TimedeltaIndex.
878
+ check_flags : bool, default True
879
+ Whether to check the `flags` attribute.
880
+ rtol : float, default 1e-5
881
+ Relative tolerance. Only used when check_exact is False.
882
+ atol : float, default 1e-8
883
+ Absolute tolerance. Only used when check_exact is False.
884
+ obj : str, default 'Series'
885
+ Specify object name being compared, internally used to show appropriate
886
+ assertion message.
887
+ check_index : bool, default True
888
+ Whether to check index equivalence. If False, then compare only values.
889
+
890
+ .. versionadded:: 1.3.0
891
+ check_like : bool, default False
892
+ If True, ignore the order of the index. Must be False if check_index is False.
893
+ Note: same labels must be with the same data.
894
+
895
+ .. versionadded:: 1.5.0
896
+
897
+ Examples
898
+ --------
899
+ >>> from pandas import testing as tm
900
+ >>> a = pd.Series([1, 2, 3, 4])
901
+ >>> b = pd.Series([1, 2, 3, 4])
902
+ >>> tm.assert_series_equal(a, b)
903
+ """
904
+ __tracebackhide__ = True
905
+ check_exact_index = False if check_exact is lib.no_default else check_exact
906
+ if (
907
+ check_exact is lib.no_default
908
+ and rtol is lib.no_default
909
+ and atol is lib.no_default
910
+ ):
911
+ check_exact = (
912
+ is_numeric_dtype(left.dtype)
913
+ and not is_float_dtype(left.dtype)
914
+ or is_numeric_dtype(right.dtype)
915
+ and not is_float_dtype(right.dtype)
916
+ )
917
+ elif check_exact is lib.no_default:
918
+ check_exact = False
919
+
920
+ rtol = rtol if rtol is not lib.no_default else 1.0e-5
921
+ atol = atol if atol is not lib.no_default else 1.0e-8
922
+
923
+ if not check_index and check_like:
924
+ raise ValueError("check_like must be False if check_index is False")
925
+
926
+ # instance validation
927
+ _check_isinstance(left, right, Series)
928
+
929
+ if check_series_type:
930
+ assert_class_equal(left, right, obj=obj)
931
+
932
+ # length comparison
933
+ if len(left) != len(right):
934
+ msg1 = f"{len(left)}, {left.index}"
935
+ msg2 = f"{len(right)}, {right.index}"
936
+ raise_assert_detail(obj, "Series length are different", msg1, msg2)
937
+
938
+ if check_flags:
939
+ assert left.flags == right.flags, f"{repr(left.flags)} != {repr(right.flags)}"
940
+
941
+ if check_index:
942
+ # GH #38183
943
+ assert_index_equal(
944
+ left.index,
945
+ right.index,
946
+ exact=check_index_type,
947
+ check_names=check_names,
948
+ check_exact=check_exact_index,
949
+ check_categorical=check_categorical,
950
+ check_order=not check_like,
951
+ rtol=rtol,
952
+ atol=atol,
953
+ obj=f"{obj}.index",
954
+ )
955
+
956
+ if check_like:
957
+ left = left.reindex_like(right)
958
+
959
+ if check_freq and isinstance(left.index, (DatetimeIndex, TimedeltaIndex)):
960
+ lidx = left.index
961
+ ridx = right.index
962
+ assert lidx.freq == ridx.freq, (lidx.freq, ridx.freq)
963
+
964
+ if check_dtype:
965
+ # We want to skip exact dtype checking when `check_categorical`
966
+ # is False. We'll still raise if only one is a `Categorical`,
967
+ # regardless of `check_categorical`
968
+ if (
969
+ isinstance(left.dtype, CategoricalDtype)
970
+ and isinstance(right.dtype, CategoricalDtype)
971
+ and not check_categorical
972
+ ):
973
+ pass
974
+ else:
975
+ assert_attr_equal("dtype", left, right, obj=f"Attributes of {obj}")
976
+ if check_exact:
977
+ left_values = left._values
978
+ right_values = right._values
979
+ # Only check exact if dtype is numeric
980
+ if isinstance(left_values, ExtensionArray) and isinstance(
981
+ right_values, ExtensionArray
982
+ ):
983
+ assert_extension_array_equal(
984
+ left_values,
985
+ right_values,
986
+ check_dtype=check_dtype,
987
+ index_values=left.index,
988
+ obj=str(obj),
989
+ )
990
+ else:
991
+ # convert both to NumPy if not, check_dtype would raise earlier
992
+ lv, rv = left_values, right_values
993
+ if isinstance(left_values, ExtensionArray):
994
+ lv = left_values.to_numpy()
995
+ if isinstance(right_values, ExtensionArray):
996
+ rv = right_values.to_numpy()
997
+ assert_numpy_array_equal(
998
+ lv,
999
+ rv,
1000
+ check_dtype=check_dtype,
1001
+ obj=str(obj),
1002
+ index_values=left.index,
1003
+ )
1004
+ elif check_datetimelike_compat and (
1005
+ needs_i8_conversion(left.dtype) or needs_i8_conversion(right.dtype)
1006
+ ):
1007
+ # we want to check only if we have compat dtypes
1008
+ # e.g. integer and M|m are NOT compat, but we can simply check
1009
+ # the values in that case
1010
+
1011
+ # datetimelike may have different objects (e.g. datetime.datetime
1012
+ # vs Timestamp) but will compare equal
1013
+ if not Index(left._values).equals(Index(right._values)):
1014
+ msg = (
1015
+ f"[datetimelike_compat=True] {left._values} "
1016
+ f"is not equal to {right._values}."
1017
+ )
1018
+ raise AssertionError(msg)
1019
+ elif isinstance(left.dtype, IntervalDtype) and isinstance(
1020
+ right.dtype, IntervalDtype
1021
+ ):
1022
+ assert_interval_array_equal(left.array, right.array)
1023
+ elif isinstance(left.dtype, CategoricalDtype) or isinstance(
1024
+ right.dtype, CategoricalDtype
1025
+ ):
1026
+ _testing.assert_almost_equal(
1027
+ left._values,
1028
+ right._values,
1029
+ rtol=rtol,
1030
+ atol=atol,
1031
+ check_dtype=bool(check_dtype),
1032
+ obj=str(obj),
1033
+ index_values=left.index,
1034
+ )
1035
+ elif isinstance(left.dtype, ExtensionDtype) and isinstance(
1036
+ right.dtype, ExtensionDtype
1037
+ ):
1038
+ assert_extension_array_equal(
1039
+ left._values,
1040
+ right._values,
1041
+ rtol=rtol,
1042
+ atol=atol,
1043
+ check_dtype=check_dtype,
1044
+ index_values=left.index,
1045
+ obj=str(obj),
1046
+ )
1047
+ elif is_extension_array_dtype_and_needs_i8_conversion(
1048
+ left.dtype, right.dtype
1049
+ ) or is_extension_array_dtype_and_needs_i8_conversion(right.dtype, left.dtype):
1050
+ assert_extension_array_equal(
1051
+ left._values,
1052
+ right._values,
1053
+ check_dtype=check_dtype,
1054
+ index_values=left.index,
1055
+ obj=str(obj),
1056
+ )
1057
+ elif needs_i8_conversion(left.dtype) and needs_i8_conversion(right.dtype):
1058
+ # DatetimeArray or TimedeltaArray
1059
+ assert_extension_array_equal(
1060
+ left._values,
1061
+ right._values,
1062
+ check_dtype=check_dtype,
1063
+ index_values=left.index,
1064
+ obj=str(obj),
1065
+ )
1066
+ else:
1067
+ _testing.assert_almost_equal(
1068
+ left._values,
1069
+ right._values,
1070
+ rtol=rtol,
1071
+ atol=atol,
1072
+ check_dtype=bool(check_dtype),
1073
+ obj=str(obj),
1074
+ index_values=left.index,
1075
+ )
1076
+
1077
+ # metadata comparison
1078
+ if check_names:
1079
+ assert_attr_equal("name", left, right, obj=obj)
1080
+
1081
+ if check_categorical:
1082
+ if isinstance(left.dtype, CategoricalDtype) or isinstance(
1083
+ right.dtype, CategoricalDtype
1084
+ ):
1085
+ assert_categorical_equal(
1086
+ left._values,
1087
+ right._values,
1088
+ obj=f"{obj} category",
1089
+ check_category_order=check_category_order,
1090
+ )
1091
+
1092
+
1093
+ # This could be refactored to use the NDFrame.equals method
1094
+ def assert_frame_equal(
1095
+ left,
1096
+ right,
1097
+ check_dtype: bool | Literal["equiv"] = True,
1098
+ check_index_type: bool | Literal["equiv"] = "equiv",
1099
+ check_column_type: bool | Literal["equiv"] = "equiv",
1100
+ check_frame_type: bool = True,
1101
+ check_names: bool = True,
1102
+ by_blocks: bool = False,
1103
+ check_exact: bool | lib.NoDefault = lib.no_default,
1104
+ check_datetimelike_compat: bool = False,
1105
+ check_categorical: bool = True,
1106
+ check_like: bool = False,
1107
+ check_freq: bool = True,
1108
+ check_flags: bool = True,
1109
+ rtol: float | lib.NoDefault = lib.no_default,
1110
+ atol: float | lib.NoDefault = lib.no_default,
1111
+ obj: str = "DataFrame",
1112
+ ) -> None:
1113
+ """
1114
+ Check that left and right DataFrame are equal.
1115
+
1116
+ This function is intended to compare two DataFrames and output any
1117
+ differences. It is mostly intended for use in unit tests.
1118
+ Additional parameters allow varying the strictness of the
1119
+ equality checks performed.
1120
+
1121
+ Parameters
1122
+ ----------
1123
+ left : DataFrame
1124
+ First DataFrame to compare.
1125
+ right : DataFrame
1126
+ Second DataFrame to compare.
1127
+ check_dtype : bool, default True
1128
+ Whether to check the DataFrame dtype is identical.
1129
+ check_index_type : bool or {'equiv'}, default 'equiv'
1130
+ Whether to check the Index class, dtype and inferred_type
1131
+ are identical.
1132
+ check_column_type : bool or {'equiv'}, default 'equiv'
1133
+ Whether to check the columns class, dtype and inferred_type
1134
+ are identical. Is passed as the ``exact`` argument of
1135
+ :func:`assert_index_equal`.
1136
+ check_frame_type : bool, default True
1137
+ Whether to check the DataFrame class is identical.
1138
+ check_names : bool, default True
1139
+ Whether to check that the `names` attribute for both the `index`
1140
+ and `column` attributes of the DataFrame is identical.
1141
+ by_blocks : bool, default False
1142
+ Specify how to compare internal data. If False, compare by columns.
1143
+ If True, compare by blocks.
1144
+ check_exact : bool, default False
1145
+ Whether to compare number exactly.
1146
+
1147
+ .. versionchanged:: 2.2.0
1148
+
1149
+ Defaults to True for integer dtypes if none of
1150
+ ``check_exact``, ``rtol`` and ``atol`` are specified.
1151
+ check_datetimelike_compat : bool, default False
1152
+ Compare datetime-like which is comparable ignoring dtype.
1153
+ check_categorical : bool, default True
1154
+ Whether to compare internal Categorical exactly.
1155
+ check_like : bool, default False
1156
+ If True, ignore the order of index & columns.
1157
+ Note: index labels must match their respective rows
1158
+ (same as in columns) - same labels must be with the same data.
1159
+ check_freq : bool, default True
1160
+ Whether to check the `freq` attribute on a DatetimeIndex or TimedeltaIndex.
1161
+ check_flags : bool, default True
1162
+ Whether to check the `flags` attribute.
1163
+ rtol : float, default 1e-5
1164
+ Relative tolerance. Only used when check_exact is False.
1165
+ atol : float, default 1e-8
1166
+ Absolute tolerance. Only used when check_exact is False.
1167
+ obj : str, default 'DataFrame'
1168
+ Specify object name being compared, internally used to show appropriate
1169
+ assertion message.
1170
+
1171
+ See Also
1172
+ --------
1173
+ assert_series_equal : Equivalent method for asserting Series equality.
1174
+ DataFrame.equals : Check DataFrame equality.
1175
+
1176
+ Examples
1177
+ --------
1178
+ This example shows comparing two DataFrames that are equal
1179
+ but with columns of differing dtypes.
1180
+
1181
+ >>> from pandas.testing import assert_frame_equal
1182
+ >>> df1 = pd.DataFrame({'a': [1, 2], 'b': [3, 4]})
1183
+ >>> df2 = pd.DataFrame({'a': [1, 2], 'b': [3.0, 4.0]})
1184
+
1185
+ df1 equals itself.
1186
+
1187
+ >>> assert_frame_equal(df1, df1)
1188
+
1189
+ df1 differs from df2 as column 'b' is of a different type.
1190
+
1191
+ >>> assert_frame_equal(df1, df2)
1192
+ Traceback (most recent call last):
1193
+ ...
1194
+ AssertionError: Attributes of DataFrame.iloc[:, 1] (column name="b") are different
1195
+
1196
+ Attribute "dtype" are different
1197
+ [left]: int64
1198
+ [right]: float64
1199
+
1200
+ Ignore differing dtypes in columns with check_dtype.
1201
+
1202
+ >>> assert_frame_equal(df1, df2, check_dtype=False)
1203
+ """
1204
+ __tracebackhide__ = True
1205
+ _rtol = rtol if rtol is not lib.no_default else 1.0e-5
1206
+ _atol = atol if atol is not lib.no_default else 1.0e-8
1207
+ _check_exact = check_exact if check_exact is not lib.no_default else False
1208
+
1209
+ # instance validation
1210
+ _check_isinstance(left, right, DataFrame)
1211
+
1212
+ if check_frame_type:
1213
+ assert isinstance(left, type(right))
1214
+ # assert_class_equal(left, right, obj=obj)
1215
+
1216
+ # shape comparison
1217
+ if left.shape != right.shape:
1218
+ raise_assert_detail(
1219
+ obj, f"{obj} shape mismatch", f"{repr(left.shape)}", f"{repr(right.shape)}"
1220
+ )
1221
+
1222
+ if check_flags:
1223
+ assert left.flags == right.flags, f"{repr(left.flags)} != {repr(right.flags)}"
1224
+
1225
+ # index comparison
1226
+ assert_index_equal(
1227
+ left.index,
1228
+ right.index,
1229
+ exact=check_index_type,
1230
+ check_names=check_names,
1231
+ check_exact=_check_exact,
1232
+ check_categorical=check_categorical,
1233
+ check_order=not check_like,
1234
+ rtol=_rtol,
1235
+ atol=_atol,
1236
+ obj=f"{obj}.index",
1237
+ )
1238
+
1239
+ # column comparison
1240
+ assert_index_equal(
1241
+ left.columns,
1242
+ right.columns,
1243
+ exact=check_column_type,
1244
+ check_names=check_names,
1245
+ check_exact=_check_exact,
1246
+ check_categorical=check_categorical,
1247
+ check_order=not check_like,
1248
+ rtol=_rtol,
1249
+ atol=_atol,
1250
+ obj=f"{obj}.columns",
1251
+ )
1252
+
1253
+ if check_like:
1254
+ left = left.reindex_like(right)
1255
+
1256
+ # compare by blocks
1257
+ if by_blocks:
1258
+ rblocks = right._to_dict_of_blocks()
1259
+ lblocks = left._to_dict_of_blocks()
1260
+ for dtype in list(set(list(lblocks.keys()) + list(rblocks.keys()))):
1261
+ assert dtype in lblocks
1262
+ assert dtype in rblocks
1263
+ assert_frame_equal(
1264
+ lblocks[dtype], rblocks[dtype], check_dtype=check_dtype, obj=obj
1265
+ )
1266
+
1267
+ # compare by columns
1268
+ else:
1269
+ for i, col in enumerate(left.columns):
1270
+ # We have already checked that columns match, so we can do
1271
+ # fast location-based lookups
1272
+ lcol = left._ixs(i, axis=1)
1273
+ rcol = right._ixs(i, axis=1)
1274
+
1275
+ # GH #38183
1276
+ # use check_index=False, because we do not want to run
1277
+ # assert_index_equal for each column,
1278
+ # as we already checked it for the whole dataframe before.
1279
+ assert_series_equal(
1280
+ lcol,
1281
+ rcol,
1282
+ check_dtype=check_dtype,
1283
+ check_index_type=check_index_type,
1284
+ check_exact=check_exact,
1285
+ check_names=check_names,
1286
+ check_datetimelike_compat=check_datetimelike_compat,
1287
+ check_categorical=check_categorical,
1288
+ check_freq=check_freq,
1289
+ obj=f'{obj}.iloc[:, {i}] (column name="{col}")',
1290
+ rtol=rtol,
1291
+ atol=atol,
1292
+ check_index=False,
1293
+ check_flags=False,
1294
+ )
1295
+
1296
+
1297
+ def assert_equal(left, right, **kwargs) -> None:
1298
+ """
1299
+ Wrapper for tm.assert_*_equal to dispatch to the appropriate test function.
1300
+
1301
+ Parameters
1302
+ ----------
1303
+ left, right : Index, Series, DataFrame, ExtensionArray, or np.ndarray
1304
+ The two items to be compared.
1305
+ **kwargs
1306
+ All keyword arguments are passed through to the underlying assert method.
1307
+ """
1308
+ __tracebackhide__ = True
1309
+
1310
+ if isinstance(left, Index):
1311
+ assert_index_equal(left, right, **kwargs)
1312
+ if isinstance(left, (DatetimeIndex, TimedeltaIndex)):
1313
+ assert left.freq == right.freq, (left.freq, right.freq)
1314
+ elif isinstance(left, Series):
1315
+ assert_series_equal(left, right, **kwargs)
1316
+ elif isinstance(left, DataFrame):
1317
+ assert_frame_equal(left, right, **kwargs)
1318
+ elif isinstance(left, IntervalArray):
1319
+ assert_interval_array_equal(left, right, **kwargs)
1320
+ elif isinstance(left, PeriodArray):
1321
+ assert_period_array_equal(left, right, **kwargs)
1322
+ elif isinstance(left, DatetimeArray):
1323
+ assert_datetime_array_equal(left, right, **kwargs)
1324
+ elif isinstance(left, TimedeltaArray):
1325
+ assert_timedelta_array_equal(left, right, **kwargs)
1326
+ elif isinstance(left, ExtensionArray):
1327
+ assert_extension_array_equal(left, right, **kwargs)
1328
+ elif isinstance(left, np.ndarray):
1329
+ assert_numpy_array_equal(left, right, **kwargs)
1330
+ elif isinstance(left, str):
1331
+ assert kwargs == {}
1332
+ assert left == right
1333
+ else:
1334
+ assert kwargs == {}
1335
+ assert_almost_equal(left, right)
1336
+
1337
+
1338
+ def assert_sp_array_equal(left, right) -> None:
1339
+ """
1340
+ Check that the left and right SparseArray are equal.
1341
+
1342
+ Parameters
1343
+ ----------
1344
+ left : SparseArray
1345
+ right : SparseArray
1346
+ """
1347
+ _check_isinstance(left, right, pd.arrays.SparseArray)
1348
+
1349
+ assert_numpy_array_equal(left.sp_values, right.sp_values)
1350
+
1351
+ # SparseIndex comparison
1352
+ assert isinstance(left.sp_index, SparseIndex)
1353
+ assert isinstance(right.sp_index, SparseIndex)
1354
+
1355
+ left_index = left.sp_index
1356
+ right_index = right.sp_index
1357
+
1358
+ if not left_index.equals(right_index):
1359
+ raise_assert_detail(
1360
+ "SparseArray.index", "index are not equal", left_index, right_index
1361
+ )
1362
+ else:
1363
+ # Just ensure a
1364
+ pass
1365
+
1366
+ assert_attr_equal("fill_value", left, right)
1367
+ assert_attr_equal("dtype", left, right)
1368
+ assert_numpy_array_equal(left.to_dense(), right.to_dense())
1369
+
1370
+
1371
+ def assert_contains_all(iterable, dic) -> None:
1372
+ for k in iterable:
1373
+ assert k in dic, f"Did not contain item: {repr(k)}"
1374
+
1375
+
1376
+ def assert_copy(iter1, iter2, **eql_kwargs) -> None:
1377
+ """
1378
+ iter1, iter2: iterables that produce elements
1379
+ comparable with assert_almost_equal
1380
+
1381
+ Checks that the elements are equal, but not
1382
+ the same object. (Does not check that items
1383
+ in sequences are also not the same object)
1384
+ """
1385
+ for elem1, elem2 in zip(iter1, iter2):
1386
+ assert_almost_equal(elem1, elem2, **eql_kwargs)
1387
+ msg = (
1388
+ f"Expected object {repr(type(elem1))} and object {repr(type(elem2))} to be "
1389
+ "different objects, but they were the same object."
1390
+ )
1391
+ assert elem1 is not elem2, msg
1392
+
1393
+
1394
+ def is_extension_array_dtype_and_needs_i8_conversion(
1395
+ left_dtype: DtypeObj, right_dtype: DtypeObj
1396
+ ) -> bool:
1397
+ """
1398
+ Checks that we have the combination of an ExtensionArraydtype and
1399
+ a dtype that should be converted to int64
1400
+
1401
+ Returns
1402
+ -------
1403
+ bool
1404
+
1405
+ Related to issue #37609
1406
+ """
1407
+ return isinstance(left_dtype, ExtensionDtype) and needs_i8_conversion(right_dtype)
1408
+
1409
+
1410
+ def assert_indexing_slices_equivalent(ser: Series, l_slc: slice, i_slc: slice) -> None:
1411
+ """
1412
+ Check that ser.iloc[i_slc] matches ser.loc[l_slc] and, if applicable,
1413
+ ser[l_slc].
1414
+ """
1415
+ expected = ser.iloc[i_slc]
1416
+
1417
+ assert_series_equal(ser.loc[l_slc], expected)
1418
+
1419
+ if not is_integer_dtype(ser.index):
1420
+ # For integer indices, .loc and plain getitem are position-based.
1421
+ assert_series_equal(ser[l_slc], expected)
1422
+
1423
+
1424
+ def assert_metadata_equivalent(
1425
+ left: DataFrame | Series, right: DataFrame | Series | None = None
1426
+ ) -> None:
1427
+ """
1428
+ Check that ._metadata attributes are equivalent.
1429
+ """
1430
+ for attr in left._metadata:
1431
+ val = getattr(left, attr, None)
1432
+ if right is None:
1433
+ assert val is None
1434
+ else:
1435
+ assert val == getattr(right, attr, None)
env-llmeval/lib/python3.10/site-packages/pandas/_testing/compat.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Helpers for sharing tests between DataFrame/Series
3
+ """
4
+ from __future__ import annotations
5
+
6
+ from typing import TYPE_CHECKING
7
+
8
+ from pandas import DataFrame
9
+
10
+ if TYPE_CHECKING:
11
+ from pandas._typing import DtypeObj
12
+
13
+
14
+ def get_dtype(obj) -> DtypeObj:
15
+ if isinstance(obj, DataFrame):
16
+ # Note: we are assuming only one column
17
+ return obj.dtypes.iat[0]
18
+ else:
19
+ return obj.dtype
20
+
21
+
22
+ def get_obj(df: DataFrame, klass):
23
+ """
24
+ For sharing tests using frame_or_series, either return the DataFrame
25
+ unchanged or return it's first column as a Series.
26
+ """
27
+ if klass is DataFrame:
28
+ return df
29
+ return df._ixs(0, axis=1)
env-llmeval/lib/python3.10/site-packages/pandas/_testing/contexts.py ADDED
@@ -0,0 +1,257 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from contextlib import contextmanager
4
+ import os
5
+ from pathlib import Path
6
+ import tempfile
7
+ from typing import (
8
+ IO,
9
+ TYPE_CHECKING,
10
+ Any,
11
+ )
12
+ import uuid
13
+
14
+ from pandas._config import using_copy_on_write
15
+
16
+ from pandas.compat import PYPY
17
+ from pandas.errors import ChainedAssignmentError
18
+
19
+ from pandas import set_option
20
+
21
+ from pandas.io.common import get_handle
22
+
23
+ if TYPE_CHECKING:
24
+ from collections.abc import Generator
25
+
26
+ from pandas._typing import (
27
+ BaseBuffer,
28
+ CompressionOptions,
29
+ FilePath,
30
+ )
31
+
32
+
33
+ @contextmanager
34
+ def decompress_file(
35
+ path: FilePath | BaseBuffer, compression: CompressionOptions
36
+ ) -> Generator[IO[bytes], None, None]:
37
+ """
38
+ Open a compressed file and return a file object.
39
+
40
+ Parameters
41
+ ----------
42
+ path : str
43
+ The path where the file is read from.
44
+
45
+ compression : {'gzip', 'bz2', 'zip', 'xz', 'zstd', None}
46
+ Name of the decompression to use
47
+
48
+ Returns
49
+ -------
50
+ file object
51
+ """
52
+ with get_handle(path, "rb", compression=compression, is_text=False) as handle:
53
+ yield handle.handle
54
+
55
+
56
+ @contextmanager
57
+ def set_timezone(tz: str) -> Generator[None, None, None]:
58
+ """
59
+ Context manager for temporarily setting a timezone.
60
+
61
+ Parameters
62
+ ----------
63
+ tz : str
64
+ A string representing a valid timezone.
65
+
66
+ Examples
67
+ --------
68
+ >>> from datetime import datetime
69
+ >>> from dateutil.tz import tzlocal
70
+ >>> tzlocal().tzname(datetime(2021, 1, 1)) # doctest: +SKIP
71
+ 'IST'
72
+
73
+ >>> with set_timezone('US/Eastern'):
74
+ ... tzlocal().tzname(datetime(2021, 1, 1))
75
+ ...
76
+ 'EST'
77
+ """
78
+ import time
79
+
80
+ def setTZ(tz) -> None:
81
+ if tz is None:
82
+ try:
83
+ del os.environ["TZ"]
84
+ except KeyError:
85
+ pass
86
+ else:
87
+ os.environ["TZ"] = tz
88
+ time.tzset()
89
+
90
+ orig_tz = os.environ.get("TZ")
91
+ setTZ(tz)
92
+ try:
93
+ yield
94
+ finally:
95
+ setTZ(orig_tz)
96
+
97
+
98
+ @contextmanager
99
+ def ensure_clean(
100
+ filename=None, return_filelike: bool = False, **kwargs: Any
101
+ ) -> Generator[Any, None, None]:
102
+ """
103
+ Gets a temporary path and agrees to remove on close.
104
+
105
+ This implementation does not use tempfile.mkstemp to avoid having a file handle.
106
+ If the code using the returned path wants to delete the file itself, windows
107
+ requires that no program has a file handle to it.
108
+
109
+ Parameters
110
+ ----------
111
+ filename : str (optional)
112
+ suffix of the created file.
113
+ return_filelike : bool (default False)
114
+ if True, returns a file-like which is *always* cleaned. Necessary for
115
+ savefig and other functions which want to append extensions.
116
+ **kwargs
117
+ Additional keywords are passed to open().
118
+
119
+ """
120
+ folder = Path(tempfile.gettempdir())
121
+
122
+ if filename is None:
123
+ filename = ""
124
+ filename = str(uuid.uuid4()) + filename
125
+ path = folder / filename
126
+
127
+ path.touch()
128
+
129
+ handle_or_str: str | IO = str(path)
130
+ encoding = kwargs.pop("encoding", None)
131
+ if return_filelike:
132
+ kwargs.setdefault("mode", "w+b")
133
+ if encoding is None and "b" not in kwargs["mode"]:
134
+ encoding = "utf-8"
135
+ handle_or_str = open(path, encoding=encoding, **kwargs)
136
+
137
+ try:
138
+ yield handle_or_str
139
+ finally:
140
+ if not isinstance(handle_or_str, str):
141
+ handle_or_str.close()
142
+ if path.is_file():
143
+ path.unlink()
144
+
145
+
146
+ @contextmanager
147
+ def with_csv_dialect(name: str, **kwargs) -> Generator[None, None, None]:
148
+ """
149
+ Context manager to temporarily register a CSV dialect for parsing CSV.
150
+
151
+ Parameters
152
+ ----------
153
+ name : str
154
+ The name of the dialect.
155
+ kwargs : mapping
156
+ The parameters for the dialect.
157
+
158
+ Raises
159
+ ------
160
+ ValueError : the name of the dialect conflicts with a builtin one.
161
+
162
+ See Also
163
+ --------
164
+ csv : Python's CSV library.
165
+ """
166
+ import csv
167
+
168
+ _BUILTIN_DIALECTS = {"excel", "excel-tab", "unix"}
169
+
170
+ if name in _BUILTIN_DIALECTS:
171
+ raise ValueError("Cannot override builtin dialect.")
172
+
173
+ csv.register_dialect(name, **kwargs)
174
+ try:
175
+ yield
176
+ finally:
177
+ csv.unregister_dialect(name)
178
+
179
+
180
+ @contextmanager
181
+ def use_numexpr(use, min_elements=None) -> Generator[None, None, None]:
182
+ from pandas.core.computation import expressions as expr
183
+
184
+ if min_elements is None:
185
+ min_elements = expr._MIN_ELEMENTS
186
+
187
+ olduse = expr.USE_NUMEXPR
188
+ oldmin = expr._MIN_ELEMENTS
189
+ set_option("compute.use_numexpr", use)
190
+ expr._MIN_ELEMENTS = min_elements
191
+ try:
192
+ yield
193
+ finally:
194
+ expr._MIN_ELEMENTS = oldmin
195
+ set_option("compute.use_numexpr", olduse)
196
+
197
+
198
+ def raises_chained_assignment_error(warn=True, extra_warnings=(), extra_match=()):
199
+ from pandas._testing import assert_produces_warning
200
+
201
+ if not warn:
202
+ from contextlib import nullcontext
203
+
204
+ return nullcontext()
205
+
206
+ if PYPY and not extra_warnings:
207
+ from contextlib import nullcontext
208
+
209
+ return nullcontext()
210
+ elif PYPY and extra_warnings:
211
+ return assert_produces_warning(
212
+ extra_warnings,
213
+ match="|".join(extra_match),
214
+ )
215
+ else:
216
+ if using_copy_on_write():
217
+ warning = ChainedAssignmentError
218
+ match = (
219
+ "A value is trying to be set on a copy of a DataFrame or Series "
220
+ "through chained assignment"
221
+ )
222
+ else:
223
+ warning = FutureWarning # type: ignore[assignment]
224
+ # TODO update match
225
+ match = "ChainedAssignmentError"
226
+ if extra_warnings:
227
+ warning = (warning, *extra_warnings) # type: ignore[assignment]
228
+ return assert_produces_warning(
229
+ warning,
230
+ match="|".join((match, *extra_match)),
231
+ )
232
+
233
+
234
+ def assert_cow_warning(warn=True, match=None, **kwargs):
235
+ """
236
+ Assert that a warning is raised in the CoW warning mode.
237
+
238
+ Parameters
239
+ ----------
240
+ warn : bool, default True
241
+ By default, check that a warning is raised. Can be turned off by passing False.
242
+ match : str
243
+ The warning message to match against, if different from the default.
244
+ kwargs
245
+ Passed through to assert_produces_warning
246
+ """
247
+ from pandas._testing import assert_produces_warning
248
+
249
+ if not warn:
250
+ from contextlib import nullcontext
251
+
252
+ return nullcontext()
253
+
254
+ if not match:
255
+ match = "Setting a value on a view"
256
+
257
+ return assert_produces_warning(FutureWarning, match=match, **kwargs)