applied-ai-018 commited on
Commit
6590dfb
·
verified ·
1 Parent(s): 5cc660e

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. env-llmeval/lib/python3.10/site-packages/pandas/core/__pycache__/__init__.cpython-310.pyc +0 -0
  2. env-llmeval/lib/python3.10/site-packages/pandas/core/__pycache__/accessor.cpython-310.pyc +0 -0
  3. env-llmeval/lib/python3.10/site-packages/pandas/core/__pycache__/algorithms.cpython-310.pyc +0 -0
  4. env-llmeval/lib/python3.10/site-packages/pandas/core/__pycache__/api.cpython-310.pyc +0 -0
  5. env-llmeval/lib/python3.10/site-packages/pandas/core/__pycache__/apply.cpython-310.pyc +0 -0
  6. env-llmeval/lib/python3.10/site-packages/pandas/core/__pycache__/arraylike.cpython-310.pyc +0 -0
  7. env-llmeval/lib/python3.10/site-packages/pandas/core/__pycache__/base.cpython-310.pyc +0 -0
  8. env-llmeval/lib/python3.10/site-packages/pandas/core/__pycache__/config_init.cpython-310.pyc +0 -0
  9. env-llmeval/lib/python3.10/site-packages/pandas/core/__pycache__/construction.cpython-310.pyc +0 -0
  10. env-llmeval/lib/python3.10/site-packages/pandas/core/__pycache__/flags.cpython-310.pyc +0 -0
  11. env-llmeval/lib/python3.10/site-packages/pandas/core/__pycache__/frame.cpython-310.pyc +0 -0
  12. env-llmeval/lib/python3.10/site-packages/pandas/core/__pycache__/generic.cpython-310.pyc +0 -0
  13. env-llmeval/lib/python3.10/site-packages/pandas/core/__pycache__/indexing.cpython-310.pyc +0 -0
  14. env-llmeval/lib/python3.10/site-packages/pandas/core/__pycache__/missing.cpython-310.pyc +0 -0
  15. env-llmeval/lib/python3.10/site-packages/pandas/core/__pycache__/nanops.cpython-310.pyc +0 -0
  16. env-llmeval/lib/python3.10/site-packages/pandas/core/__pycache__/resample.cpython-310.pyc +0 -0
  17. env-llmeval/lib/python3.10/site-packages/pandas/core/__pycache__/sample.cpython-310.pyc +0 -0
  18. env-llmeval/lib/python3.10/site-packages/pandas/core/__pycache__/series.cpython-310.pyc +0 -0
  19. env-llmeval/lib/python3.10/site-packages/pandas/core/__pycache__/sorting.cpython-310.pyc +0 -0
  20. env-llmeval/lib/python3.10/site-packages/pandas/core/array_algos/take.py +594 -0
  21. env-llmeval/lib/python3.10/site-packages/pandas/core/computation/__pycache__/__init__.cpython-310.pyc +0 -0
  22. env-llmeval/lib/python3.10/site-packages/pandas/core/computation/__pycache__/align.cpython-310.pyc +0 -0
  23. env-llmeval/lib/python3.10/site-packages/pandas/core/computation/__pycache__/api.cpython-310.pyc +0 -0
  24. env-llmeval/lib/python3.10/site-packages/pandas/core/computation/__pycache__/check.cpython-310.pyc +0 -0
  25. env-llmeval/lib/python3.10/site-packages/pandas/core/computation/__pycache__/common.cpython-310.pyc +0 -0
  26. env-llmeval/lib/python3.10/site-packages/pandas/core/computation/__pycache__/eval.cpython-310.pyc +0 -0
  27. env-llmeval/lib/python3.10/site-packages/pandas/core/computation/__pycache__/ops.cpython-310.pyc +0 -0
  28. env-llmeval/lib/python3.10/site-packages/pandas/core/computation/__pycache__/parsing.cpython-310.pyc +0 -0
  29. env-llmeval/lib/python3.10/site-packages/pandas/core/computation/__pycache__/pytables.cpython-310.pyc +0 -0
  30. env-llmeval/lib/python3.10/site-packages/pandas/core/computation/check.py +8 -0
  31. env-llmeval/lib/python3.10/site-packages/pandas/core/computation/common.py +48 -0
  32. env-llmeval/lib/python3.10/site-packages/pandas/core/computation/ops.py +621 -0
  33. env-llmeval/lib/python3.10/site-packages/pandas/core/computation/parsing.py +198 -0
  34. env-llmeval/lib/python3.10/site-packages/pandas/core/computation/scope.py +355 -0
  35. env-llmeval/lib/python3.10/site-packages/pandas/core/strings/base.py +262 -0
  36. env-llmeval/lib/python3.10/site-packages/pandas/core/tools/__pycache__/__init__.cpython-310.pyc +0 -0
  37. env-llmeval/lib/python3.10/site-packages/pandas/core/tools/__pycache__/datetimes.cpython-310.pyc +0 -0
  38. env-llmeval/lib/python3.10/site-packages/pandas/core/tools/__pycache__/numeric.cpython-310.pyc +0 -0
  39. env-llmeval/lib/python3.10/site-packages/pandas/core/tools/__pycache__/timedeltas.cpython-310.pyc +0 -0
  40. env-llmeval/lib/python3.10/site-packages/pandas/core/tools/__pycache__/times.cpython-310.pyc +0 -0
  41. env-llmeval/lib/python3.10/site-packages/pandas/core/tools/datetimes.py +1235 -0
  42. env-llmeval/lib/python3.10/site-packages/pandas/core/tools/numeric.py +329 -0
  43. env-llmeval/lib/python3.10/site-packages/pandas/core/window/__init__.py +23 -0
  44. env-llmeval/lib/python3.10/site-packages/pandas/core/window/__pycache__/__init__.cpython-310.pyc +0 -0
  45. env-llmeval/lib/python3.10/site-packages/pandas/core/window/__pycache__/common.cpython-310.pyc +0 -0
  46. env-llmeval/lib/python3.10/site-packages/pandas/core/window/__pycache__/doc.cpython-310.pyc +0 -0
  47. env-llmeval/lib/python3.10/site-packages/pandas/core/window/__pycache__/ewm.cpython-310.pyc +0 -0
  48. env-llmeval/lib/python3.10/site-packages/pandas/core/window/__pycache__/expanding.cpython-310.pyc +0 -0
  49. env-llmeval/lib/python3.10/site-packages/pandas/core/window/__pycache__/numba_.cpython-310.pyc +0 -0
  50. env-llmeval/lib/python3.10/site-packages/pandas/core/window/__pycache__/online.cpython-310.pyc +0 -0
env-llmeval/lib/python3.10/site-packages/pandas/core/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (176 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/__pycache__/accessor.cpython-310.pyc ADDED
Binary file (11.2 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/__pycache__/algorithms.cpython-310.pyc ADDED
Binary file (39.6 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/__pycache__/api.cpython-310.pyc ADDED
Binary file (2.62 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/__pycache__/apply.cpython-310.pyc ADDED
Binary file (49.8 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/__pycache__/arraylike.cpython-310.pyc ADDED
Binary file (14.4 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/__pycache__/base.cpython-310.pyc ADDED
Binary file (37.8 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/__pycache__/config_init.cpython-310.pyc ADDED
Binary file (20.7 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/__pycache__/construction.cpython-310.pyc ADDED
Binary file (19.7 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/__pycache__/flags.cpython-310.pyc ADDED
Binary file (4.34 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/__pycache__/frame.cpython-310.pyc ADDED
Binary file (363 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/__pycache__/generic.cpython-310.pyc ADDED
Binary file (386 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/__pycache__/indexing.cpython-310.pyc ADDED
Binary file (68.7 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/__pycache__/missing.cpython-310.pyc ADDED
Binary file (26.6 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/__pycache__/nanops.cpython-310.pyc ADDED
Binary file (37 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/__pycache__/resample.cpython-310.pyc ADDED
Binary file (74.8 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/__pycache__/sample.cpython-310.pyc ADDED
Binary file (3.96 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/__pycache__/series.cpython-310.pyc ADDED
Binary file (176 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/__pycache__/sorting.cpython-310.pyc ADDED
Binary file (19.9 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/array_algos/take.py ADDED
@@ -0,0 +1,594 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import functools
4
+ from typing import (
5
+ TYPE_CHECKING,
6
+ cast,
7
+ overload,
8
+ )
9
+
10
+ import numpy as np
11
+
12
+ from pandas._libs import (
13
+ algos as libalgos,
14
+ lib,
15
+ )
16
+
17
+ from pandas.core.dtypes.cast import maybe_promote
18
+ from pandas.core.dtypes.common import (
19
+ ensure_platform_int,
20
+ is_1d_only_ea_dtype,
21
+ )
22
+ from pandas.core.dtypes.missing import na_value_for_dtype
23
+
24
+ from pandas.core.construction import ensure_wrapped_if_datetimelike
25
+
26
+ if TYPE_CHECKING:
27
+ from pandas._typing import (
28
+ ArrayLike,
29
+ AxisInt,
30
+ npt,
31
+ )
32
+
33
+ from pandas.core.arrays._mixins import NDArrayBackedExtensionArray
34
+ from pandas.core.arrays.base import ExtensionArray
35
+
36
+
37
+ @overload
38
+ def take_nd(
39
+ arr: np.ndarray,
40
+ indexer,
41
+ axis: AxisInt = ...,
42
+ fill_value=...,
43
+ allow_fill: bool = ...,
44
+ ) -> np.ndarray:
45
+ ...
46
+
47
+
48
+ @overload
49
+ def take_nd(
50
+ arr: ExtensionArray,
51
+ indexer,
52
+ axis: AxisInt = ...,
53
+ fill_value=...,
54
+ allow_fill: bool = ...,
55
+ ) -> ArrayLike:
56
+ ...
57
+
58
+
59
+ def take_nd(
60
+ arr: ArrayLike,
61
+ indexer,
62
+ axis: AxisInt = 0,
63
+ fill_value=lib.no_default,
64
+ allow_fill: bool = True,
65
+ ) -> ArrayLike:
66
+ """
67
+ Specialized Cython take which sets NaN values in one pass
68
+
69
+ This dispatches to ``take`` defined on ExtensionArrays.
70
+
71
+ Note: this function assumes that the indexer is a valid(ated) indexer with
72
+ no out of bound indices.
73
+
74
+ Parameters
75
+ ----------
76
+ arr : np.ndarray or ExtensionArray
77
+ Input array.
78
+ indexer : ndarray
79
+ 1-D array of indices to take, subarrays corresponding to -1 value
80
+ indices are filed with fill_value
81
+ axis : int, default 0
82
+ Axis to take from
83
+ fill_value : any, default np.nan
84
+ Fill value to replace -1 values with
85
+ allow_fill : bool, default True
86
+ If False, indexer is assumed to contain no -1 values so no filling
87
+ will be done. This short-circuits computation of a mask. Result is
88
+ undefined if allow_fill == False and -1 is present in indexer.
89
+
90
+ Returns
91
+ -------
92
+ subarray : np.ndarray or ExtensionArray
93
+ May be the same type as the input, or cast to an ndarray.
94
+ """
95
+ if fill_value is lib.no_default:
96
+ fill_value = na_value_for_dtype(arr.dtype, compat=False)
97
+ elif lib.is_np_dtype(arr.dtype, "mM"):
98
+ dtype, fill_value = maybe_promote(arr.dtype, fill_value)
99
+ if arr.dtype != dtype:
100
+ # EA.take is strict about returning a new object of the same type
101
+ # so for that case cast upfront
102
+ arr = arr.astype(dtype)
103
+
104
+ if not isinstance(arr, np.ndarray):
105
+ # i.e. ExtensionArray,
106
+ # includes for EA to catch DatetimeArray, TimedeltaArray
107
+ if not is_1d_only_ea_dtype(arr.dtype):
108
+ # i.e. DatetimeArray, TimedeltaArray
109
+ arr = cast("NDArrayBackedExtensionArray", arr)
110
+ return arr.take(
111
+ indexer, fill_value=fill_value, allow_fill=allow_fill, axis=axis
112
+ )
113
+
114
+ return arr.take(indexer, fill_value=fill_value, allow_fill=allow_fill)
115
+
116
+ arr = np.asarray(arr)
117
+ return _take_nd_ndarray(arr, indexer, axis, fill_value, allow_fill)
118
+
119
+
120
+ def _take_nd_ndarray(
121
+ arr: np.ndarray,
122
+ indexer: npt.NDArray[np.intp] | None,
123
+ axis: AxisInt,
124
+ fill_value,
125
+ allow_fill: bool,
126
+ ) -> np.ndarray:
127
+ if indexer is None:
128
+ indexer = np.arange(arr.shape[axis], dtype=np.intp)
129
+ dtype, fill_value = arr.dtype, arr.dtype.type()
130
+ else:
131
+ indexer = ensure_platform_int(indexer)
132
+
133
+ dtype, fill_value, mask_info = _take_preprocess_indexer_and_fill_value(
134
+ arr, indexer, fill_value, allow_fill
135
+ )
136
+
137
+ flip_order = False
138
+ if arr.ndim == 2 and arr.flags.f_contiguous:
139
+ flip_order = True
140
+
141
+ if flip_order:
142
+ arr = arr.T
143
+ axis = arr.ndim - axis - 1
144
+
145
+ # at this point, it's guaranteed that dtype can hold both the arr values
146
+ # and the fill_value
147
+ out_shape_ = list(arr.shape)
148
+ out_shape_[axis] = len(indexer)
149
+ out_shape = tuple(out_shape_)
150
+ if arr.flags.f_contiguous and axis == arr.ndim - 1:
151
+ # minor tweak that can make an order-of-magnitude difference
152
+ # for dataframes initialized directly from 2-d ndarrays
153
+ # (s.t. df.values is c-contiguous and df._mgr.blocks[0] is its
154
+ # f-contiguous transpose)
155
+ out = np.empty(out_shape, dtype=dtype, order="F")
156
+ else:
157
+ out = np.empty(out_shape, dtype=dtype)
158
+
159
+ func = _get_take_nd_function(
160
+ arr.ndim, arr.dtype, out.dtype, axis=axis, mask_info=mask_info
161
+ )
162
+ func(arr, indexer, out, fill_value)
163
+
164
+ if flip_order:
165
+ out = out.T
166
+ return out
167
+
168
+
169
+ def take_1d(
170
+ arr: ArrayLike,
171
+ indexer: npt.NDArray[np.intp],
172
+ fill_value=None,
173
+ allow_fill: bool = True,
174
+ mask: npt.NDArray[np.bool_] | None = None,
175
+ ) -> ArrayLike:
176
+ """
177
+ Specialized version for 1D arrays. Differences compared to `take_nd`:
178
+
179
+ - Assumes input array has already been converted to numpy array / EA
180
+ - Assumes indexer is already guaranteed to be intp dtype ndarray
181
+ - Only works for 1D arrays
182
+
183
+ To ensure the lowest possible overhead.
184
+
185
+ Note: similarly to `take_nd`, this function assumes that the indexer is
186
+ a valid(ated) indexer with no out of bound indices.
187
+
188
+ Parameters
189
+ ----------
190
+ arr : np.ndarray or ExtensionArray
191
+ Input array.
192
+ indexer : ndarray
193
+ 1-D array of indices to take (validated indices, intp dtype).
194
+ fill_value : any, default np.nan
195
+ Fill value to replace -1 values with
196
+ allow_fill : bool, default True
197
+ If False, indexer is assumed to contain no -1 values so no filling
198
+ will be done. This short-circuits computation of a mask. Result is
199
+ undefined if allow_fill == False and -1 is present in indexer.
200
+ mask : np.ndarray, optional, default None
201
+ If `allow_fill` is True, and the mask (where indexer == -1) is already
202
+ known, it can be passed to avoid recomputation.
203
+ """
204
+ if not isinstance(arr, np.ndarray):
205
+ # ExtensionArray -> dispatch to their method
206
+ return arr.take(indexer, fill_value=fill_value, allow_fill=allow_fill)
207
+
208
+ if not allow_fill:
209
+ return arr.take(indexer)
210
+
211
+ dtype, fill_value, mask_info = _take_preprocess_indexer_and_fill_value(
212
+ arr, indexer, fill_value, True, mask
213
+ )
214
+
215
+ # at this point, it's guaranteed that dtype can hold both the arr values
216
+ # and the fill_value
217
+ out = np.empty(indexer.shape, dtype=dtype)
218
+
219
+ func = _get_take_nd_function(
220
+ arr.ndim, arr.dtype, out.dtype, axis=0, mask_info=mask_info
221
+ )
222
+ func(arr, indexer, out, fill_value)
223
+
224
+ return out
225
+
226
+
227
+ def take_2d_multi(
228
+ arr: np.ndarray,
229
+ indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]],
230
+ fill_value=np.nan,
231
+ ) -> np.ndarray:
232
+ """
233
+ Specialized Cython take which sets NaN values in one pass.
234
+ """
235
+ # This is only called from one place in DataFrame._reindex_multi,
236
+ # so we know indexer is well-behaved.
237
+ assert indexer is not None
238
+ assert indexer[0] is not None
239
+ assert indexer[1] is not None
240
+
241
+ row_idx, col_idx = indexer
242
+
243
+ row_idx = ensure_platform_int(row_idx)
244
+ col_idx = ensure_platform_int(col_idx)
245
+ indexer = row_idx, col_idx
246
+ mask_info = None
247
+
248
+ # check for promotion based on types only (do this first because
249
+ # it's faster than computing a mask)
250
+ dtype, fill_value = maybe_promote(arr.dtype, fill_value)
251
+ if dtype != arr.dtype:
252
+ # check if promotion is actually required based on indexer
253
+ row_mask = row_idx == -1
254
+ col_mask = col_idx == -1
255
+ row_needs = row_mask.any()
256
+ col_needs = col_mask.any()
257
+ mask_info = (row_mask, col_mask), (row_needs, col_needs)
258
+
259
+ if not (row_needs or col_needs):
260
+ # if not, then depromote, set fill_value to dummy
261
+ # (it won't be used but we don't want the cython code
262
+ # to crash when trying to cast it to dtype)
263
+ dtype, fill_value = arr.dtype, arr.dtype.type()
264
+
265
+ # at this point, it's guaranteed that dtype can hold both the arr values
266
+ # and the fill_value
267
+ out_shape = len(row_idx), len(col_idx)
268
+ out = np.empty(out_shape, dtype=dtype)
269
+
270
+ func = _take_2d_multi_dict.get((arr.dtype.name, out.dtype.name), None)
271
+ if func is None and arr.dtype != out.dtype:
272
+ func = _take_2d_multi_dict.get((out.dtype.name, out.dtype.name), None)
273
+ if func is not None:
274
+ func = _convert_wrapper(func, out.dtype)
275
+
276
+ if func is not None:
277
+ func(arr, indexer, out=out, fill_value=fill_value)
278
+ else:
279
+ # test_reindex_multi
280
+ _take_2d_multi_object(
281
+ arr, indexer, out, fill_value=fill_value, mask_info=mask_info
282
+ )
283
+
284
+ return out
285
+
286
+
287
+ @functools.lru_cache
288
+ def _get_take_nd_function_cached(
289
+ ndim: int, arr_dtype: np.dtype, out_dtype: np.dtype, axis: AxisInt
290
+ ):
291
+ """
292
+ Part of _get_take_nd_function below that doesn't need `mask_info` and thus
293
+ can be cached (mask_info potentially contains a numpy ndarray which is not
294
+ hashable and thus cannot be used as argument for cached function).
295
+ """
296
+ tup = (arr_dtype.name, out_dtype.name)
297
+ if ndim == 1:
298
+ func = _take_1d_dict.get(tup, None)
299
+ elif ndim == 2:
300
+ if axis == 0:
301
+ func = _take_2d_axis0_dict.get(tup, None)
302
+ else:
303
+ func = _take_2d_axis1_dict.get(tup, None)
304
+ if func is not None:
305
+ return func
306
+
307
+ # We get here with string, uint, float16, and complex dtypes that could
308
+ # potentially be handled in algos_take_helper.
309
+ # Also a couple with (M8[ns], object) and (m8[ns], object)
310
+ tup = (out_dtype.name, out_dtype.name)
311
+ if ndim == 1:
312
+ func = _take_1d_dict.get(tup, None)
313
+ elif ndim == 2:
314
+ if axis == 0:
315
+ func = _take_2d_axis0_dict.get(tup, None)
316
+ else:
317
+ func = _take_2d_axis1_dict.get(tup, None)
318
+ if func is not None:
319
+ func = _convert_wrapper(func, out_dtype)
320
+ return func
321
+
322
+ return None
323
+
324
+
325
+ def _get_take_nd_function(
326
+ ndim: int,
327
+ arr_dtype: np.dtype,
328
+ out_dtype: np.dtype,
329
+ axis: AxisInt = 0,
330
+ mask_info=None,
331
+ ):
332
+ """
333
+ Get the appropriate "take" implementation for the given dimension, axis
334
+ and dtypes.
335
+ """
336
+ func = None
337
+ if ndim <= 2:
338
+ # for this part we don't need `mask_info` -> use the cached algo lookup
339
+ func = _get_take_nd_function_cached(ndim, arr_dtype, out_dtype, axis)
340
+
341
+ if func is None:
342
+
343
+ def func(arr, indexer, out, fill_value=np.nan) -> None:
344
+ indexer = ensure_platform_int(indexer)
345
+ _take_nd_object(
346
+ arr, indexer, out, axis=axis, fill_value=fill_value, mask_info=mask_info
347
+ )
348
+
349
+ return func
350
+
351
+
352
+ def _view_wrapper(f, arr_dtype=None, out_dtype=None, fill_wrap=None):
353
+ def wrapper(
354
+ arr: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=np.nan
355
+ ) -> None:
356
+ if arr_dtype is not None:
357
+ arr = arr.view(arr_dtype)
358
+ if out_dtype is not None:
359
+ out = out.view(out_dtype)
360
+ if fill_wrap is not None:
361
+ # FIXME: if we get here with dt64/td64 we need to be sure we have
362
+ # matching resos
363
+ if fill_value.dtype.kind == "m":
364
+ fill_value = fill_value.astype("m8[ns]")
365
+ else:
366
+ fill_value = fill_value.astype("M8[ns]")
367
+ fill_value = fill_wrap(fill_value)
368
+
369
+ f(arr, indexer, out, fill_value=fill_value)
370
+
371
+ return wrapper
372
+
373
+
374
+ def _convert_wrapper(f, conv_dtype):
375
+ def wrapper(
376
+ arr: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=np.nan
377
+ ) -> None:
378
+ if conv_dtype == object:
379
+ # GH#39755 avoid casting dt64/td64 to integers
380
+ arr = ensure_wrapped_if_datetimelike(arr)
381
+ arr = arr.astype(conv_dtype)
382
+ f(arr, indexer, out, fill_value=fill_value)
383
+
384
+ return wrapper
385
+
386
+
387
+ _take_1d_dict = {
388
+ ("int8", "int8"): libalgos.take_1d_int8_int8,
389
+ ("int8", "int32"): libalgos.take_1d_int8_int32,
390
+ ("int8", "int64"): libalgos.take_1d_int8_int64,
391
+ ("int8", "float64"): libalgos.take_1d_int8_float64,
392
+ ("int16", "int16"): libalgos.take_1d_int16_int16,
393
+ ("int16", "int32"): libalgos.take_1d_int16_int32,
394
+ ("int16", "int64"): libalgos.take_1d_int16_int64,
395
+ ("int16", "float64"): libalgos.take_1d_int16_float64,
396
+ ("int32", "int32"): libalgos.take_1d_int32_int32,
397
+ ("int32", "int64"): libalgos.take_1d_int32_int64,
398
+ ("int32", "float64"): libalgos.take_1d_int32_float64,
399
+ ("int64", "int64"): libalgos.take_1d_int64_int64,
400
+ ("int64", "float64"): libalgos.take_1d_int64_float64,
401
+ ("float32", "float32"): libalgos.take_1d_float32_float32,
402
+ ("float32", "float64"): libalgos.take_1d_float32_float64,
403
+ ("float64", "float64"): libalgos.take_1d_float64_float64,
404
+ ("object", "object"): libalgos.take_1d_object_object,
405
+ ("bool", "bool"): _view_wrapper(libalgos.take_1d_bool_bool, np.uint8, np.uint8),
406
+ ("bool", "object"): _view_wrapper(libalgos.take_1d_bool_object, np.uint8, None),
407
+ ("datetime64[ns]", "datetime64[ns]"): _view_wrapper(
408
+ libalgos.take_1d_int64_int64, np.int64, np.int64, np.int64
409
+ ),
410
+ ("timedelta64[ns]", "timedelta64[ns]"): _view_wrapper(
411
+ libalgos.take_1d_int64_int64, np.int64, np.int64, np.int64
412
+ ),
413
+ }
414
+
415
+ _take_2d_axis0_dict = {
416
+ ("int8", "int8"): libalgos.take_2d_axis0_int8_int8,
417
+ ("int8", "int32"): libalgos.take_2d_axis0_int8_int32,
418
+ ("int8", "int64"): libalgos.take_2d_axis0_int8_int64,
419
+ ("int8", "float64"): libalgos.take_2d_axis0_int8_float64,
420
+ ("int16", "int16"): libalgos.take_2d_axis0_int16_int16,
421
+ ("int16", "int32"): libalgos.take_2d_axis0_int16_int32,
422
+ ("int16", "int64"): libalgos.take_2d_axis0_int16_int64,
423
+ ("int16", "float64"): libalgos.take_2d_axis0_int16_float64,
424
+ ("int32", "int32"): libalgos.take_2d_axis0_int32_int32,
425
+ ("int32", "int64"): libalgos.take_2d_axis0_int32_int64,
426
+ ("int32", "float64"): libalgos.take_2d_axis0_int32_float64,
427
+ ("int64", "int64"): libalgos.take_2d_axis0_int64_int64,
428
+ ("int64", "float64"): libalgos.take_2d_axis0_int64_float64,
429
+ ("float32", "float32"): libalgos.take_2d_axis0_float32_float32,
430
+ ("float32", "float64"): libalgos.take_2d_axis0_float32_float64,
431
+ ("float64", "float64"): libalgos.take_2d_axis0_float64_float64,
432
+ ("object", "object"): libalgos.take_2d_axis0_object_object,
433
+ ("bool", "bool"): _view_wrapper(
434
+ libalgos.take_2d_axis0_bool_bool, np.uint8, np.uint8
435
+ ),
436
+ ("bool", "object"): _view_wrapper(
437
+ libalgos.take_2d_axis0_bool_object, np.uint8, None
438
+ ),
439
+ ("datetime64[ns]", "datetime64[ns]"): _view_wrapper(
440
+ libalgos.take_2d_axis0_int64_int64, np.int64, np.int64, fill_wrap=np.int64
441
+ ),
442
+ ("timedelta64[ns]", "timedelta64[ns]"): _view_wrapper(
443
+ libalgos.take_2d_axis0_int64_int64, np.int64, np.int64, fill_wrap=np.int64
444
+ ),
445
+ }
446
+
447
+ _take_2d_axis1_dict = {
448
+ ("int8", "int8"): libalgos.take_2d_axis1_int8_int8,
449
+ ("int8", "int32"): libalgos.take_2d_axis1_int8_int32,
450
+ ("int8", "int64"): libalgos.take_2d_axis1_int8_int64,
451
+ ("int8", "float64"): libalgos.take_2d_axis1_int8_float64,
452
+ ("int16", "int16"): libalgos.take_2d_axis1_int16_int16,
453
+ ("int16", "int32"): libalgos.take_2d_axis1_int16_int32,
454
+ ("int16", "int64"): libalgos.take_2d_axis1_int16_int64,
455
+ ("int16", "float64"): libalgos.take_2d_axis1_int16_float64,
456
+ ("int32", "int32"): libalgos.take_2d_axis1_int32_int32,
457
+ ("int32", "int64"): libalgos.take_2d_axis1_int32_int64,
458
+ ("int32", "float64"): libalgos.take_2d_axis1_int32_float64,
459
+ ("int64", "int64"): libalgos.take_2d_axis1_int64_int64,
460
+ ("int64", "float64"): libalgos.take_2d_axis1_int64_float64,
461
+ ("float32", "float32"): libalgos.take_2d_axis1_float32_float32,
462
+ ("float32", "float64"): libalgos.take_2d_axis1_float32_float64,
463
+ ("float64", "float64"): libalgos.take_2d_axis1_float64_float64,
464
+ ("object", "object"): libalgos.take_2d_axis1_object_object,
465
+ ("bool", "bool"): _view_wrapper(
466
+ libalgos.take_2d_axis1_bool_bool, np.uint8, np.uint8
467
+ ),
468
+ ("bool", "object"): _view_wrapper(
469
+ libalgos.take_2d_axis1_bool_object, np.uint8, None
470
+ ),
471
+ ("datetime64[ns]", "datetime64[ns]"): _view_wrapper(
472
+ libalgos.take_2d_axis1_int64_int64, np.int64, np.int64, fill_wrap=np.int64
473
+ ),
474
+ ("timedelta64[ns]", "timedelta64[ns]"): _view_wrapper(
475
+ libalgos.take_2d_axis1_int64_int64, np.int64, np.int64, fill_wrap=np.int64
476
+ ),
477
+ }
478
+
479
+ _take_2d_multi_dict = {
480
+ ("int8", "int8"): libalgos.take_2d_multi_int8_int8,
481
+ ("int8", "int32"): libalgos.take_2d_multi_int8_int32,
482
+ ("int8", "int64"): libalgos.take_2d_multi_int8_int64,
483
+ ("int8", "float64"): libalgos.take_2d_multi_int8_float64,
484
+ ("int16", "int16"): libalgos.take_2d_multi_int16_int16,
485
+ ("int16", "int32"): libalgos.take_2d_multi_int16_int32,
486
+ ("int16", "int64"): libalgos.take_2d_multi_int16_int64,
487
+ ("int16", "float64"): libalgos.take_2d_multi_int16_float64,
488
+ ("int32", "int32"): libalgos.take_2d_multi_int32_int32,
489
+ ("int32", "int64"): libalgos.take_2d_multi_int32_int64,
490
+ ("int32", "float64"): libalgos.take_2d_multi_int32_float64,
491
+ ("int64", "int64"): libalgos.take_2d_multi_int64_int64,
492
+ ("int64", "float64"): libalgos.take_2d_multi_int64_float64,
493
+ ("float32", "float32"): libalgos.take_2d_multi_float32_float32,
494
+ ("float32", "float64"): libalgos.take_2d_multi_float32_float64,
495
+ ("float64", "float64"): libalgos.take_2d_multi_float64_float64,
496
+ ("object", "object"): libalgos.take_2d_multi_object_object,
497
+ ("bool", "bool"): _view_wrapper(
498
+ libalgos.take_2d_multi_bool_bool, np.uint8, np.uint8
499
+ ),
500
+ ("bool", "object"): _view_wrapper(
501
+ libalgos.take_2d_multi_bool_object, np.uint8, None
502
+ ),
503
+ ("datetime64[ns]", "datetime64[ns]"): _view_wrapper(
504
+ libalgos.take_2d_multi_int64_int64, np.int64, np.int64, fill_wrap=np.int64
505
+ ),
506
+ ("timedelta64[ns]", "timedelta64[ns]"): _view_wrapper(
507
+ libalgos.take_2d_multi_int64_int64, np.int64, np.int64, fill_wrap=np.int64
508
+ ),
509
+ }
510
+
511
+
512
+ def _take_nd_object(
513
+ arr: np.ndarray,
514
+ indexer: npt.NDArray[np.intp],
515
+ out: np.ndarray,
516
+ axis: AxisInt,
517
+ fill_value,
518
+ mask_info,
519
+ ) -> None:
520
+ if mask_info is not None:
521
+ mask, needs_masking = mask_info
522
+ else:
523
+ mask = indexer == -1
524
+ needs_masking = mask.any()
525
+ if arr.dtype != out.dtype:
526
+ arr = arr.astype(out.dtype)
527
+ if arr.shape[axis] > 0:
528
+ arr.take(indexer, axis=axis, out=out)
529
+ if needs_masking:
530
+ outindexer = [slice(None)] * arr.ndim
531
+ outindexer[axis] = mask
532
+ out[tuple(outindexer)] = fill_value
533
+
534
+
535
+ def _take_2d_multi_object(
536
+ arr: np.ndarray,
537
+ indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]],
538
+ out: np.ndarray,
539
+ fill_value,
540
+ mask_info,
541
+ ) -> None:
542
+ # this is not ideal, performance-wise, but it's better than raising
543
+ # an exception (best to optimize in Cython to avoid getting here)
544
+ row_idx, col_idx = indexer # both np.intp
545
+ if mask_info is not None:
546
+ (row_mask, col_mask), (row_needs, col_needs) = mask_info
547
+ else:
548
+ row_mask = row_idx == -1
549
+ col_mask = col_idx == -1
550
+ row_needs = row_mask.any()
551
+ col_needs = col_mask.any()
552
+ if fill_value is not None:
553
+ if row_needs:
554
+ out[row_mask, :] = fill_value
555
+ if col_needs:
556
+ out[:, col_mask] = fill_value
557
+ for i, u_ in enumerate(row_idx):
558
+ if u_ != -1:
559
+ for j, v in enumerate(col_idx):
560
+ if v != -1:
561
+ out[i, j] = arr[u_, v]
562
+
563
+
564
+ def _take_preprocess_indexer_and_fill_value(
565
+ arr: np.ndarray,
566
+ indexer: npt.NDArray[np.intp],
567
+ fill_value,
568
+ allow_fill: bool,
569
+ mask: npt.NDArray[np.bool_] | None = None,
570
+ ):
571
+ mask_info: tuple[np.ndarray | None, bool] | None = None
572
+
573
+ if not allow_fill:
574
+ dtype, fill_value = arr.dtype, arr.dtype.type()
575
+ mask_info = None, False
576
+ else:
577
+ # check for promotion based on types only (do this first because
578
+ # it's faster than computing a mask)
579
+ dtype, fill_value = maybe_promote(arr.dtype, fill_value)
580
+ if dtype != arr.dtype:
581
+ # check if promotion is actually required based on indexer
582
+ if mask is not None:
583
+ needs_masking = True
584
+ else:
585
+ mask = indexer == -1
586
+ needs_masking = bool(mask.any())
587
+ mask_info = mask, needs_masking
588
+ if not needs_masking:
589
+ # if not, then depromote, set fill_value to dummy
590
+ # (it won't be used but we don't want the cython code
591
+ # to crash when trying to cast it to dtype)
592
+ dtype, fill_value = arr.dtype, arr.dtype.type()
593
+
594
+ return dtype, fill_value, mask_info
env-llmeval/lib/python3.10/site-packages/pandas/core/computation/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (188 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/computation/__pycache__/align.cpython-310.pyc ADDED
Binary file (6.12 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/computation/__pycache__/api.cpython-310.pyc ADDED
Binary file (262 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/computation/__pycache__/check.cpython-310.pyc ADDED
Binary file (399 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/computation/__pycache__/common.cpython-310.pyc ADDED
Binary file (1.36 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/computation/__pycache__/eval.cpython-310.pyc ADDED
Binary file (11.9 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/computation/__pycache__/ops.cpython-310.pyc ADDED
Binary file (17 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/computation/__pycache__/parsing.cpython-310.pyc ADDED
Binary file (6.08 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/computation/__pycache__/pytables.cpython-310.pyc ADDED
Binary file (19.7 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/computation/check.py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from pandas.compat._optional import import_optional_dependency
4
+
5
+ ne = import_optional_dependency("numexpr", errors="warn")
6
+ NUMEXPR_INSTALLED = ne is not None
7
+
8
+ __all__ = ["NUMEXPR_INSTALLED"]
env-llmeval/lib/python3.10/site-packages/pandas/core/computation/common.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from functools import reduce
4
+
5
+ import numpy as np
6
+
7
+ from pandas._config import get_option
8
+
9
+
10
+ def ensure_decoded(s) -> str:
11
+ """
12
+ If we have bytes, decode them to unicode.
13
+ """
14
+ if isinstance(s, (np.bytes_, bytes)):
15
+ s = s.decode(get_option("display.encoding"))
16
+ return s
17
+
18
+
19
+ def result_type_many(*arrays_and_dtypes):
20
+ """
21
+ Wrapper around numpy.result_type which overcomes the NPY_MAXARGS (32)
22
+ argument limit.
23
+ """
24
+ try:
25
+ return np.result_type(*arrays_and_dtypes)
26
+ except ValueError:
27
+ # we have > NPY_MAXARGS terms in our expression
28
+ return reduce(np.result_type, arrays_and_dtypes)
29
+ except TypeError:
30
+ from pandas.core.dtypes.cast import find_common_type
31
+ from pandas.core.dtypes.common import is_extension_array_dtype
32
+
33
+ arr_and_dtypes = list(arrays_and_dtypes)
34
+ ea_dtypes, non_ea_dtypes = [], []
35
+ for arr_or_dtype in arr_and_dtypes:
36
+ if is_extension_array_dtype(arr_or_dtype):
37
+ ea_dtypes.append(arr_or_dtype)
38
+ else:
39
+ non_ea_dtypes.append(arr_or_dtype)
40
+
41
+ if non_ea_dtypes:
42
+ try:
43
+ np_dtype = np.result_type(*non_ea_dtypes)
44
+ except ValueError:
45
+ np_dtype = reduce(np.result_type, arrays_and_dtypes)
46
+ return find_common_type(ea_dtypes + [np_dtype])
47
+
48
+ return find_common_type(ea_dtypes)
env-llmeval/lib/python3.10/site-packages/pandas/core/computation/ops.py ADDED
@@ -0,0 +1,621 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Operator classes for eval.
3
+ """
4
+
5
+ from __future__ import annotations
6
+
7
+ from datetime import datetime
8
+ from functools import partial
9
+ import operator
10
+ from typing import (
11
+ TYPE_CHECKING,
12
+ Callable,
13
+ Literal,
14
+ )
15
+
16
+ import numpy as np
17
+
18
+ from pandas._libs.tslibs import Timestamp
19
+
20
+ from pandas.core.dtypes.common import (
21
+ is_list_like,
22
+ is_scalar,
23
+ )
24
+
25
+ import pandas.core.common as com
26
+ from pandas.core.computation.common import (
27
+ ensure_decoded,
28
+ result_type_many,
29
+ )
30
+ from pandas.core.computation.scope import DEFAULT_GLOBALS
31
+
32
+ from pandas.io.formats.printing import (
33
+ pprint_thing,
34
+ pprint_thing_encoded,
35
+ )
36
+
37
+ if TYPE_CHECKING:
38
+ from collections.abc import (
39
+ Iterable,
40
+ Iterator,
41
+ )
42
+
43
+ REDUCTIONS = ("sum", "prod", "min", "max")
44
+
45
+ _unary_math_ops = (
46
+ "sin",
47
+ "cos",
48
+ "exp",
49
+ "log",
50
+ "expm1",
51
+ "log1p",
52
+ "sqrt",
53
+ "sinh",
54
+ "cosh",
55
+ "tanh",
56
+ "arcsin",
57
+ "arccos",
58
+ "arctan",
59
+ "arccosh",
60
+ "arcsinh",
61
+ "arctanh",
62
+ "abs",
63
+ "log10",
64
+ "floor",
65
+ "ceil",
66
+ )
67
+ _binary_math_ops = ("arctan2",)
68
+
69
+ MATHOPS = _unary_math_ops + _binary_math_ops
70
+
71
+
72
+ LOCAL_TAG = "__pd_eval_local_"
73
+
74
+
75
+ class Term:
76
+ def __new__(cls, name, env, side=None, encoding=None):
77
+ klass = Constant if not isinstance(name, str) else cls
78
+ # error: Argument 2 for "super" not an instance of argument 1
79
+ supr_new = super(Term, klass).__new__ # type: ignore[misc]
80
+ return supr_new(klass)
81
+
82
+ is_local: bool
83
+
84
+ def __init__(self, name, env, side=None, encoding=None) -> None:
85
+ # name is a str for Term, but may be something else for subclasses
86
+ self._name = name
87
+ self.env = env
88
+ self.side = side
89
+ tname = str(name)
90
+ self.is_local = tname.startswith(LOCAL_TAG) or tname in DEFAULT_GLOBALS
91
+ self._value = self._resolve_name()
92
+ self.encoding = encoding
93
+
94
+ @property
95
+ def local_name(self) -> str:
96
+ return self.name.replace(LOCAL_TAG, "")
97
+
98
+ def __repr__(self) -> str:
99
+ return pprint_thing(self.name)
100
+
101
+ def __call__(self, *args, **kwargs):
102
+ return self.value
103
+
104
+ def evaluate(self, *args, **kwargs) -> Term:
105
+ return self
106
+
107
+ def _resolve_name(self):
108
+ local_name = str(self.local_name)
109
+ is_local = self.is_local
110
+ if local_name in self.env.scope and isinstance(
111
+ self.env.scope[local_name], type
112
+ ):
113
+ is_local = False
114
+
115
+ res = self.env.resolve(local_name, is_local=is_local)
116
+ self.update(res)
117
+
118
+ if hasattr(res, "ndim") and res.ndim > 2:
119
+ raise NotImplementedError(
120
+ "N-dimensional objects, where N > 2, are not supported with eval"
121
+ )
122
+ return res
123
+
124
+ def update(self, value) -> None:
125
+ """
126
+ search order for local (i.e., @variable) variables:
127
+
128
+ scope, key_variable
129
+ [('locals', 'local_name'),
130
+ ('globals', 'local_name'),
131
+ ('locals', 'key'),
132
+ ('globals', 'key')]
133
+ """
134
+ key = self.name
135
+
136
+ # if it's a variable name (otherwise a constant)
137
+ if isinstance(key, str):
138
+ self.env.swapkey(self.local_name, key, new_value=value)
139
+
140
+ self.value = value
141
+
142
+ @property
143
+ def is_scalar(self) -> bool:
144
+ return is_scalar(self._value)
145
+
146
+ @property
147
+ def type(self):
148
+ try:
149
+ # potentially very slow for large, mixed dtype frames
150
+ return self._value.values.dtype
151
+ except AttributeError:
152
+ try:
153
+ # ndarray
154
+ return self._value.dtype
155
+ except AttributeError:
156
+ # scalar
157
+ return type(self._value)
158
+
159
+ return_type = type
160
+
161
+ @property
162
+ def raw(self) -> str:
163
+ return f"{type(self).__name__}(name={repr(self.name)}, type={self.type})"
164
+
165
+ @property
166
+ def is_datetime(self) -> bool:
167
+ try:
168
+ t = self.type.type
169
+ except AttributeError:
170
+ t = self.type
171
+
172
+ return issubclass(t, (datetime, np.datetime64))
173
+
174
+ @property
175
+ def value(self):
176
+ return self._value
177
+
178
+ @value.setter
179
+ def value(self, new_value) -> None:
180
+ self._value = new_value
181
+
182
+ @property
183
+ def name(self):
184
+ return self._name
185
+
186
+ @property
187
+ def ndim(self) -> int:
188
+ return self._value.ndim
189
+
190
+
191
+ class Constant(Term):
192
+ def _resolve_name(self):
193
+ return self._name
194
+
195
+ @property
196
+ def name(self):
197
+ return self.value
198
+
199
+ def __repr__(self) -> str:
200
+ # in python 2 str() of float
201
+ # can truncate shorter than repr()
202
+ return repr(self.name)
203
+
204
+
205
+ _bool_op_map = {"not": "~", "and": "&", "or": "|"}
206
+
207
+
208
+ class Op:
209
+ """
210
+ Hold an operator of arbitrary arity.
211
+ """
212
+
213
+ op: str
214
+
215
+ def __init__(self, op: str, operands: Iterable[Term | Op], encoding=None) -> None:
216
+ self.op = _bool_op_map.get(op, op)
217
+ self.operands = operands
218
+ self.encoding = encoding
219
+
220
+ def __iter__(self) -> Iterator:
221
+ return iter(self.operands)
222
+
223
+ def __repr__(self) -> str:
224
+ """
225
+ Print a generic n-ary operator and its operands using infix notation.
226
+ """
227
+ # recurse over the operands
228
+ parened = (f"({pprint_thing(opr)})" for opr in self.operands)
229
+ return pprint_thing(f" {self.op} ".join(parened))
230
+
231
+ @property
232
+ def return_type(self):
233
+ # clobber types to bool if the op is a boolean operator
234
+ if self.op in (CMP_OPS_SYMS + BOOL_OPS_SYMS):
235
+ return np.bool_
236
+ return result_type_many(*(term.type for term in com.flatten(self)))
237
+
238
+ @property
239
+ def has_invalid_return_type(self) -> bool:
240
+ types = self.operand_types
241
+ obj_dtype_set = frozenset([np.dtype("object")])
242
+ return self.return_type == object and types - obj_dtype_set
243
+
244
+ @property
245
+ def operand_types(self):
246
+ return frozenset(term.type for term in com.flatten(self))
247
+
248
+ @property
249
+ def is_scalar(self) -> bool:
250
+ return all(operand.is_scalar for operand in self.operands)
251
+
252
+ @property
253
+ def is_datetime(self) -> bool:
254
+ try:
255
+ t = self.return_type.type
256
+ except AttributeError:
257
+ t = self.return_type
258
+
259
+ return issubclass(t, (datetime, np.datetime64))
260
+
261
+
262
+ def _in(x, y):
263
+ """
264
+ Compute the vectorized membership of ``x in y`` if possible, otherwise
265
+ use Python.
266
+ """
267
+ try:
268
+ return x.isin(y)
269
+ except AttributeError:
270
+ if is_list_like(x):
271
+ try:
272
+ return y.isin(x)
273
+ except AttributeError:
274
+ pass
275
+ return x in y
276
+
277
+
278
+ def _not_in(x, y):
279
+ """
280
+ Compute the vectorized membership of ``x not in y`` if possible,
281
+ otherwise use Python.
282
+ """
283
+ try:
284
+ return ~x.isin(y)
285
+ except AttributeError:
286
+ if is_list_like(x):
287
+ try:
288
+ return ~y.isin(x)
289
+ except AttributeError:
290
+ pass
291
+ return x not in y
292
+
293
+
294
+ CMP_OPS_SYMS = (">", "<", ">=", "<=", "==", "!=", "in", "not in")
295
+ _cmp_ops_funcs = (
296
+ operator.gt,
297
+ operator.lt,
298
+ operator.ge,
299
+ operator.le,
300
+ operator.eq,
301
+ operator.ne,
302
+ _in,
303
+ _not_in,
304
+ )
305
+ _cmp_ops_dict = dict(zip(CMP_OPS_SYMS, _cmp_ops_funcs))
306
+
307
+ BOOL_OPS_SYMS = ("&", "|", "and", "or")
308
+ _bool_ops_funcs = (operator.and_, operator.or_, operator.and_, operator.or_)
309
+ _bool_ops_dict = dict(zip(BOOL_OPS_SYMS, _bool_ops_funcs))
310
+
311
+ ARITH_OPS_SYMS = ("+", "-", "*", "/", "**", "//", "%")
312
+ _arith_ops_funcs = (
313
+ operator.add,
314
+ operator.sub,
315
+ operator.mul,
316
+ operator.truediv,
317
+ operator.pow,
318
+ operator.floordiv,
319
+ operator.mod,
320
+ )
321
+ _arith_ops_dict = dict(zip(ARITH_OPS_SYMS, _arith_ops_funcs))
322
+
323
+ SPECIAL_CASE_ARITH_OPS_SYMS = ("**", "//", "%")
324
+ _special_case_arith_ops_funcs = (operator.pow, operator.floordiv, operator.mod)
325
+ _special_case_arith_ops_dict = dict(
326
+ zip(SPECIAL_CASE_ARITH_OPS_SYMS, _special_case_arith_ops_funcs)
327
+ )
328
+
329
+ _binary_ops_dict = {}
330
+
331
+ for d in (_cmp_ops_dict, _bool_ops_dict, _arith_ops_dict):
332
+ _binary_ops_dict.update(d)
333
+
334
+
335
+ def _cast_inplace(terms, acceptable_dtypes, dtype) -> None:
336
+ """
337
+ Cast an expression inplace.
338
+
339
+ Parameters
340
+ ----------
341
+ terms : Op
342
+ The expression that should cast.
343
+ acceptable_dtypes : list of acceptable numpy.dtype
344
+ Will not cast if term's dtype in this list.
345
+ dtype : str or numpy.dtype
346
+ The dtype to cast to.
347
+ """
348
+ dt = np.dtype(dtype)
349
+ for term in terms:
350
+ if term.type in acceptable_dtypes:
351
+ continue
352
+
353
+ try:
354
+ new_value = term.value.astype(dt)
355
+ except AttributeError:
356
+ new_value = dt.type(term.value)
357
+ term.update(new_value)
358
+
359
+
360
+ def is_term(obj) -> bool:
361
+ return isinstance(obj, Term)
362
+
363
+
364
+ class BinOp(Op):
365
+ """
366
+ Hold a binary operator and its operands.
367
+
368
+ Parameters
369
+ ----------
370
+ op : str
371
+ lhs : Term or Op
372
+ rhs : Term or Op
373
+ """
374
+
375
+ def __init__(self, op: str, lhs, rhs) -> None:
376
+ super().__init__(op, (lhs, rhs))
377
+ self.lhs = lhs
378
+ self.rhs = rhs
379
+
380
+ self._disallow_scalar_only_bool_ops()
381
+
382
+ self.convert_values()
383
+
384
+ try:
385
+ self.func = _binary_ops_dict[op]
386
+ except KeyError as err:
387
+ # has to be made a list for python3
388
+ keys = list(_binary_ops_dict.keys())
389
+ raise ValueError(
390
+ f"Invalid binary operator {repr(op)}, valid operators are {keys}"
391
+ ) from err
392
+
393
+ def __call__(self, env):
394
+ """
395
+ Recursively evaluate an expression in Python space.
396
+
397
+ Parameters
398
+ ----------
399
+ env : Scope
400
+
401
+ Returns
402
+ -------
403
+ object
404
+ The result of an evaluated expression.
405
+ """
406
+ # recurse over the left/right nodes
407
+ left = self.lhs(env)
408
+ right = self.rhs(env)
409
+
410
+ return self.func(left, right)
411
+
412
+ def evaluate(self, env, engine: str, parser, term_type, eval_in_python):
413
+ """
414
+ Evaluate a binary operation *before* being passed to the engine.
415
+
416
+ Parameters
417
+ ----------
418
+ env : Scope
419
+ engine : str
420
+ parser : str
421
+ term_type : type
422
+ eval_in_python : list
423
+
424
+ Returns
425
+ -------
426
+ term_type
427
+ The "pre-evaluated" expression as an instance of ``term_type``
428
+ """
429
+ if engine == "python":
430
+ res = self(env)
431
+ else:
432
+ # recurse over the left/right nodes
433
+
434
+ left = self.lhs.evaluate(
435
+ env,
436
+ engine=engine,
437
+ parser=parser,
438
+ term_type=term_type,
439
+ eval_in_python=eval_in_python,
440
+ )
441
+
442
+ right = self.rhs.evaluate(
443
+ env,
444
+ engine=engine,
445
+ parser=parser,
446
+ term_type=term_type,
447
+ eval_in_python=eval_in_python,
448
+ )
449
+
450
+ # base cases
451
+ if self.op in eval_in_python:
452
+ res = self.func(left.value, right.value)
453
+ else:
454
+ from pandas.core.computation.eval import eval
455
+
456
+ res = eval(self, local_dict=env, engine=engine, parser=parser)
457
+
458
+ name = env.add_tmp(res)
459
+ return term_type(name, env=env)
460
+
461
+ def convert_values(self) -> None:
462
+ """
463
+ Convert datetimes to a comparable value in an expression.
464
+ """
465
+
466
+ def stringify(value):
467
+ encoder: Callable
468
+ if self.encoding is not None:
469
+ encoder = partial(pprint_thing_encoded, encoding=self.encoding)
470
+ else:
471
+ encoder = pprint_thing
472
+ return encoder(value)
473
+
474
+ lhs, rhs = self.lhs, self.rhs
475
+
476
+ if is_term(lhs) and lhs.is_datetime and is_term(rhs) and rhs.is_scalar:
477
+ v = rhs.value
478
+ if isinstance(v, (int, float)):
479
+ v = stringify(v)
480
+ v = Timestamp(ensure_decoded(v))
481
+ if v.tz is not None:
482
+ v = v.tz_convert("UTC")
483
+ self.rhs.update(v)
484
+
485
+ if is_term(rhs) and rhs.is_datetime and is_term(lhs) and lhs.is_scalar:
486
+ v = lhs.value
487
+ if isinstance(v, (int, float)):
488
+ v = stringify(v)
489
+ v = Timestamp(ensure_decoded(v))
490
+ if v.tz is not None:
491
+ v = v.tz_convert("UTC")
492
+ self.lhs.update(v)
493
+
494
+ def _disallow_scalar_only_bool_ops(self):
495
+ rhs = self.rhs
496
+ lhs = self.lhs
497
+
498
+ # GH#24883 unwrap dtype if necessary to ensure we have a type object
499
+ rhs_rt = rhs.return_type
500
+ rhs_rt = getattr(rhs_rt, "type", rhs_rt)
501
+ lhs_rt = lhs.return_type
502
+ lhs_rt = getattr(lhs_rt, "type", lhs_rt)
503
+ if (
504
+ (lhs.is_scalar or rhs.is_scalar)
505
+ and self.op in _bool_ops_dict
506
+ and (
507
+ not (
508
+ issubclass(rhs_rt, (bool, np.bool_))
509
+ and issubclass(lhs_rt, (bool, np.bool_))
510
+ )
511
+ )
512
+ ):
513
+ raise NotImplementedError("cannot evaluate scalar only bool ops")
514
+
515
+
516
+ def isnumeric(dtype) -> bool:
517
+ return issubclass(np.dtype(dtype).type, np.number)
518
+
519
+
520
+ class Div(BinOp):
521
+ """
522
+ Div operator to special case casting.
523
+
524
+ Parameters
525
+ ----------
526
+ lhs, rhs : Term or Op
527
+ The Terms or Ops in the ``/`` expression.
528
+ """
529
+
530
+ def __init__(self, lhs, rhs) -> None:
531
+ super().__init__("/", lhs, rhs)
532
+
533
+ if not isnumeric(lhs.return_type) or not isnumeric(rhs.return_type):
534
+ raise TypeError(
535
+ f"unsupported operand type(s) for {self.op}: "
536
+ f"'{lhs.return_type}' and '{rhs.return_type}'"
537
+ )
538
+
539
+ # do not upcast float32s to float64 un-necessarily
540
+ acceptable_dtypes = [np.float32, np.float64]
541
+ _cast_inplace(com.flatten(self), acceptable_dtypes, np.float64)
542
+
543
+
544
+ UNARY_OPS_SYMS = ("+", "-", "~", "not")
545
+ _unary_ops_funcs = (operator.pos, operator.neg, operator.invert, operator.invert)
546
+ _unary_ops_dict = dict(zip(UNARY_OPS_SYMS, _unary_ops_funcs))
547
+
548
+
549
+ class UnaryOp(Op):
550
+ """
551
+ Hold a unary operator and its operands.
552
+
553
+ Parameters
554
+ ----------
555
+ op : str
556
+ The token used to represent the operator.
557
+ operand : Term or Op
558
+ The Term or Op operand to the operator.
559
+
560
+ Raises
561
+ ------
562
+ ValueError
563
+ * If no function associated with the passed operator token is found.
564
+ """
565
+
566
+ def __init__(self, op: Literal["+", "-", "~", "not"], operand) -> None:
567
+ super().__init__(op, (operand,))
568
+ self.operand = operand
569
+
570
+ try:
571
+ self.func = _unary_ops_dict[op]
572
+ except KeyError as err:
573
+ raise ValueError(
574
+ f"Invalid unary operator {repr(op)}, "
575
+ f"valid operators are {UNARY_OPS_SYMS}"
576
+ ) from err
577
+
578
+ def __call__(self, env) -> MathCall:
579
+ operand = self.operand(env)
580
+ # error: Cannot call function of unknown type
581
+ return self.func(operand) # type: ignore[operator]
582
+
583
+ def __repr__(self) -> str:
584
+ return pprint_thing(f"{self.op}({self.operand})")
585
+
586
+ @property
587
+ def return_type(self) -> np.dtype:
588
+ operand = self.operand
589
+ if operand.return_type == np.dtype("bool"):
590
+ return np.dtype("bool")
591
+ if isinstance(operand, Op) and (
592
+ operand.op in _cmp_ops_dict or operand.op in _bool_ops_dict
593
+ ):
594
+ return np.dtype("bool")
595
+ return np.dtype("int")
596
+
597
+
598
+ class MathCall(Op):
599
+ def __init__(self, func, args) -> None:
600
+ super().__init__(func.name, args)
601
+ self.func = func
602
+
603
+ def __call__(self, env):
604
+ # error: "Op" not callable
605
+ operands = [op(env) for op in self.operands] # type: ignore[operator]
606
+ return self.func.func(*operands)
607
+
608
+ def __repr__(self) -> str:
609
+ operands = map(str, self.operands)
610
+ return pprint_thing(f"{self.op}({','.join(operands)})")
611
+
612
+
613
+ class FuncNode:
614
+ def __init__(self, name: str) -> None:
615
+ if name not in MATHOPS:
616
+ raise ValueError(f'"{name}" is not a supported function')
617
+ self.name = name
618
+ self.func = getattr(np, name)
619
+
620
+ def __call__(self, *args) -> MathCall:
621
+ return MathCall(self, args)
env-llmeval/lib/python3.10/site-packages/pandas/core/computation/parsing.py ADDED
@@ -0,0 +1,198 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ :func:`~pandas.eval` source string parsing functions
3
+ """
4
+ from __future__ import annotations
5
+
6
+ from io import StringIO
7
+ from keyword import iskeyword
8
+ import token
9
+ import tokenize
10
+ from typing import TYPE_CHECKING
11
+
12
+ if TYPE_CHECKING:
13
+ from collections.abc import (
14
+ Hashable,
15
+ Iterator,
16
+ )
17
+
18
+ # A token value Python's tokenizer probably will never use.
19
+ BACKTICK_QUOTED_STRING = 100
20
+
21
+
22
+ def create_valid_python_identifier(name: str) -> str:
23
+ """
24
+ Create valid Python identifiers from any string.
25
+
26
+ Check if name contains any special characters. If it contains any
27
+ special characters, the special characters will be replaced by
28
+ a special string and a prefix is added.
29
+
30
+ Raises
31
+ ------
32
+ SyntaxError
33
+ If the returned name is not a Python valid identifier, raise an exception.
34
+ This can happen if there is a hashtag in the name, as the tokenizer will
35
+ than terminate and not find the backtick.
36
+ But also for characters that fall out of the range of (U+0001..U+007F).
37
+ """
38
+ if name.isidentifier() and not iskeyword(name):
39
+ return name
40
+
41
+ # Create a dict with the special characters and their replacement string.
42
+ # EXACT_TOKEN_TYPES contains these special characters
43
+ # token.tok_name contains a readable description of the replacement string.
44
+ special_characters_replacements = {
45
+ char: f"_{token.tok_name[tokval]}_"
46
+ for char, tokval in (tokenize.EXACT_TOKEN_TYPES.items())
47
+ }
48
+ special_characters_replacements.update(
49
+ {
50
+ " ": "_",
51
+ "?": "_QUESTIONMARK_",
52
+ "!": "_EXCLAMATIONMARK_",
53
+ "$": "_DOLLARSIGN_",
54
+ "€": "_EUROSIGN_",
55
+ "°": "_DEGREESIGN_",
56
+ # Including quotes works, but there are exceptions.
57
+ "'": "_SINGLEQUOTE_",
58
+ '"': "_DOUBLEQUOTE_",
59
+ # Currently not possible. Terminates parser and won't find backtick.
60
+ # "#": "_HASH_",
61
+ }
62
+ )
63
+
64
+ name = "".join([special_characters_replacements.get(char, char) for char in name])
65
+ name = f"BACKTICK_QUOTED_STRING_{name}"
66
+
67
+ if not name.isidentifier():
68
+ raise SyntaxError(f"Could not convert '{name}' to a valid Python identifier.")
69
+
70
+ return name
71
+
72
+
73
+ def clean_backtick_quoted_toks(tok: tuple[int, str]) -> tuple[int, str]:
74
+ """
75
+ Clean up a column name if surrounded by backticks.
76
+
77
+ Backtick quoted string are indicated by a certain tokval value. If a string
78
+ is a backtick quoted token it will processed by
79
+ :func:`_create_valid_python_identifier` so that the parser can find this
80
+ string when the query is executed.
81
+ In this case the tok will get the NAME tokval.
82
+
83
+ Parameters
84
+ ----------
85
+ tok : tuple of int, str
86
+ ints correspond to the all caps constants in the tokenize module
87
+
88
+ Returns
89
+ -------
90
+ tok : Tuple[int, str]
91
+ Either the input or token or the replacement values
92
+ """
93
+ toknum, tokval = tok
94
+ if toknum == BACKTICK_QUOTED_STRING:
95
+ return tokenize.NAME, create_valid_python_identifier(tokval)
96
+ return toknum, tokval
97
+
98
+
99
+ def clean_column_name(name: Hashable) -> Hashable:
100
+ """
101
+ Function to emulate the cleaning of a backtick quoted name.
102
+
103
+ The purpose for this function is to see what happens to the name of
104
+ identifier if it goes to the process of being parsed a Python code
105
+ inside a backtick quoted string and than being cleaned
106
+ (removed of any special characters).
107
+
108
+ Parameters
109
+ ----------
110
+ name : hashable
111
+ Name to be cleaned.
112
+
113
+ Returns
114
+ -------
115
+ name : hashable
116
+ Returns the name after tokenizing and cleaning.
117
+
118
+ Notes
119
+ -----
120
+ For some cases, a name cannot be converted to a valid Python identifier.
121
+ In that case :func:`tokenize_string` raises a SyntaxError.
122
+ In that case, we just return the name unmodified.
123
+
124
+ If this name was used in the query string (this makes the query call impossible)
125
+ an error will be raised by :func:`tokenize_backtick_quoted_string` instead,
126
+ which is not caught and propagates to the user level.
127
+ """
128
+ try:
129
+ tokenized = tokenize_string(f"`{name}`")
130
+ tokval = next(tokenized)[1]
131
+ return create_valid_python_identifier(tokval)
132
+ except SyntaxError:
133
+ return name
134
+
135
+
136
+ def tokenize_backtick_quoted_string(
137
+ token_generator: Iterator[tokenize.TokenInfo], source: str, string_start: int
138
+ ) -> tuple[int, str]:
139
+ """
140
+ Creates a token from a backtick quoted string.
141
+
142
+ Moves the token_generator forwards till right after the next backtick.
143
+
144
+ Parameters
145
+ ----------
146
+ token_generator : Iterator[tokenize.TokenInfo]
147
+ The generator that yields the tokens of the source string (Tuple[int, str]).
148
+ The generator is at the first token after the backtick (`)
149
+
150
+ source : str
151
+ The Python source code string.
152
+
153
+ string_start : int
154
+ This is the start of backtick quoted string inside the source string.
155
+
156
+ Returns
157
+ -------
158
+ tok: Tuple[int, str]
159
+ The token that represents the backtick quoted string.
160
+ The integer is equal to BACKTICK_QUOTED_STRING (100).
161
+ """
162
+ for _, tokval, start, _, _ in token_generator:
163
+ if tokval == "`":
164
+ string_end = start[1]
165
+ break
166
+
167
+ return BACKTICK_QUOTED_STRING, source[string_start:string_end]
168
+
169
+
170
+ def tokenize_string(source: str) -> Iterator[tuple[int, str]]:
171
+ """
172
+ Tokenize a Python source code string.
173
+
174
+ Parameters
175
+ ----------
176
+ source : str
177
+ The Python source code string.
178
+
179
+ Returns
180
+ -------
181
+ tok_generator : Iterator[Tuple[int, str]]
182
+ An iterator yielding all tokens with only toknum and tokval (Tuple[ing, str]).
183
+ """
184
+ line_reader = StringIO(source).readline
185
+ token_generator = tokenize.generate_tokens(line_reader)
186
+
187
+ # Loop over all tokens till a backtick (`) is found.
188
+ # Then, take all tokens till the next backtick to form a backtick quoted string
189
+ for toknum, tokval, start, _, _ in token_generator:
190
+ if tokval == "`":
191
+ try:
192
+ yield tokenize_backtick_quoted_string(
193
+ token_generator, source, string_start=start[1] + 1
194
+ )
195
+ except Exception as err:
196
+ raise SyntaxError(f"Failed to parse backticks in '{source}'.") from err
197
+ else:
198
+ yield toknum, tokval
env-llmeval/lib/python3.10/site-packages/pandas/core/computation/scope.py ADDED
@@ -0,0 +1,355 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Module for scope operations
3
+ """
4
+ from __future__ import annotations
5
+
6
+ from collections import ChainMap
7
+ import datetime
8
+ import inspect
9
+ from io import StringIO
10
+ import itertools
11
+ import pprint
12
+ import struct
13
+ import sys
14
+ from typing import TypeVar
15
+
16
+ import numpy as np
17
+
18
+ from pandas._libs.tslibs import Timestamp
19
+ from pandas.errors import UndefinedVariableError
20
+
21
+ _KT = TypeVar("_KT")
22
+ _VT = TypeVar("_VT")
23
+
24
+
25
+ # https://docs.python.org/3/library/collections.html#chainmap-examples-and-recipes
26
+ class DeepChainMap(ChainMap[_KT, _VT]):
27
+ """
28
+ Variant of ChainMap that allows direct updates to inner scopes.
29
+
30
+ Only works when all passed mapping are mutable.
31
+ """
32
+
33
+ def __setitem__(self, key: _KT, value: _VT) -> None:
34
+ for mapping in self.maps:
35
+ if key in mapping:
36
+ mapping[key] = value
37
+ return
38
+ self.maps[0][key] = value
39
+
40
+ def __delitem__(self, key: _KT) -> None:
41
+ """
42
+ Raises
43
+ ------
44
+ KeyError
45
+ If `key` doesn't exist.
46
+ """
47
+ for mapping in self.maps:
48
+ if key in mapping:
49
+ del mapping[key]
50
+ return
51
+ raise KeyError(key)
52
+
53
+
54
+ def ensure_scope(
55
+ level: int, global_dict=None, local_dict=None, resolvers=(), target=None
56
+ ) -> Scope:
57
+ """Ensure that we are grabbing the correct scope."""
58
+ return Scope(
59
+ level + 1,
60
+ global_dict=global_dict,
61
+ local_dict=local_dict,
62
+ resolvers=resolvers,
63
+ target=target,
64
+ )
65
+
66
+
67
+ def _replacer(x) -> str:
68
+ """
69
+ Replace a number with its hexadecimal representation. Used to tag
70
+ temporary variables with their calling scope's id.
71
+ """
72
+ # get the hex repr of the binary char and remove 0x and pad by pad_size
73
+ # zeros
74
+ try:
75
+ hexin = ord(x)
76
+ except TypeError:
77
+ # bytes literals masquerade as ints when iterating in py3
78
+ hexin = x
79
+
80
+ return hex(hexin)
81
+
82
+
83
+ def _raw_hex_id(obj) -> str:
84
+ """Return the padded hexadecimal id of ``obj``."""
85
+ # interpret as a pointer since that's what really what id returns
86
+ packed = struct.pack("@P", id(obj))
87
+ return "".join([_replacer(x) for x in packed])
88
+
89
+
90
+ DEFAULT_GLOBALS = {
91
+ "Timestamp": Timestamp,
92
+ "datetime": datetime.datetime,
93
+ "True": True,
94
+ "False": False,
95
+ "list": list,
96
+ "tuple": tuple,
97
+ "inf": np.inf,
98
+ "Inf": np.inf,
99
+ }
100
+
101
+
102
+ def _get_pretty_string(obj) -> str:
103
+ """
104
+ Return a prettier version of obj.
105
+
106
+ Parameters
107
+ ----------
108
+ obj : object
109
+ Object to pretty print
110
+
111
+ Returns
112
+ -------
113
+ str
114
+ Pretty print object repr
115
+ """
116
+ sio = StringIO()
117
+ pprint.pprint(obj, stream=sio)
118
+ return sio.getvalue()
119
+
120
+
121
+ class Scope:
122
+ """
123
+ Object to hold scope, with a few bells to deal with some custom syntax
124
+ and contexts added by pandas.
125
+
126
+ Parameters
127
+ ----------
128
+ level : int
129
+ global_dict : dict or None, optional, default None
130
+ local_dict : dict or Scope or None, optional, default None
131
+ resolvers : list-like or None, optional, default None
132
+ target : object
133
+
134
+ Attributes
135
+ ----------
136
+ level : int
137
+ scope : DeepChainMap
138
+ target : object
139
+ temps : dict
140
+ """
141
+
142
+ __slots__ = ["level", "scope", "target", "resolvers", "temps"]
143
+ level: int
144
+ scope: DeepChainMap
145
+ resolvers: DeepChainMap
146
+ temps: dict
147
+
148
+ def __init__(
149
+ self, level: int, global_dict=None, local_dict=None, resolvers=(), target=None
150
+ ) -> None:
151
+ self.level = level + 1
152
+
153
+ # shallow copy because we don't want to keep filling this up with what
154
+ # was there before if there are multiple calls to Scope/_ensure_scope
155
+ self.scope = DeepChainMap(DEFAULT_GLOBALS.copy())
156
+ self.target = target
157
+
158
+ if isinstance(local_dict, Scope):
159
+ self.scope.update(local_dict.scope)
160
+ if local_dict.target is not None:
161
+ self.target = local_dict.target
162
+ self._update(local_dict.level)
163
+
164
+ frame = sys._getframe(self.level)
165
+
166
+ try:
167
+ # shallow copy here because we don't want to replace what's in
168
+ # scope when we align terms (alignment accesses the underlying
169
+ # numpy array of pandas objects)
170
+ scope_global = self.scope.new_child(
171
+ (global_dict if global_dict is not None else frame.f_globals).copy()
172
+ )
173
+ self.scope = DeepChainMap(scope_global)
174
+ if not isinstance(local_dict, Scope):
175
+ scope_local = self.scope.new_child(
176
+ (local_dict if local_dict is not None else frame.f_locals).copy()
177
+ )
178
+ self.scope = DeepChainMap(scope_local)
179
+ finally:
180
+ del frame
181
+
182
+ # assumes that resolvers are going from outermost scope to inner
183
+ if isinstance(local_dict, Scope):
184
+ resolvers += tuple(local_dict.resolvers.maps)
185
+ self.resolvers = DeepChainMap(*resolvers)
186
+ self.temps = {}
187
+
188
+ def __repr__(self) -> str:
189
+ scope_keys = _get_pretty_string(list(self.scope.keys()))
190
+ res_keys = _get_pretty_string(list(self.resolvers.keys()))
191
+ return f"{type(self).__name__}(scope={scope_keys}, resolvers={res_keys})"
192
+
193
+ @property
194
+ def has_resolvers(self) -> bool:
195
+ """
196
+ Return whether we have any extra scope.
197
+
198
+ For example, DataFrames pass Their columns as resolvers during calls to
199
+ ``DataFrame.eval()`` and ``DataFrame.query()``.
200
+
201
+ Returns
202
+ -------
203
+ hr : bool
204
+ """
205
+ return bool(len(self.resolvers))
206
+
207
+ def resolve(self, key: str, is_local: bool):
208
+ """
209
+ Resolve a variable name in a possibly local context.
210
+
211
+ Parameters
212
+ ----------
213
+ key : str
214
+ A variable name
215
+ is_local : bool
216
+ Flag indicating whether the variable is local or not (prefixed with
217
+ the '@' symbol)
218
+
219
+ Returns
220
+ -------
221
+ value : object
222
+ The value of a particular variable
223
+ """
224
+ try:
225
+ # only look for locals in outer scope
226
+ if is_local:
227
+ return self.scope[key]
228
+
229
+ # not a local variable so check in resolvers if we have them
230
+ if self.has_resolvers:
231
+ return self.resolvers[key]
232
+
233
+ # if we're here that means that we have no locals and we also have
234
+ # no resolvers
235
+ assert not is_local and not self.has_resolvers
236
+ return self.scope[key]
237
+ except KeyError:
238
+ try:
239
+ # last ditch effort we look in temporaries
240
+ # these are created when parsing indexing expressions
241
+ # e.g., df[df > 0]
242
+ return self.temps[key]
243
+ except KeyError as err:
244
+ raise UndefinedVariableError(key, is_local) from err
245
+
246
+ def swapkey(self, old_key: str, new_key: str, new_value=None) -> None:
247
+ """
248
+ Replace a variable name, with a potentially new value.
249
+
250
+ Parameters
251
+ ----------
252
+ old_key : str
253
+ Current variable name to replace
254
+ new_key : str
255
+ New variable name to replace `old_key` with
256
+ new_value : object
257
+ Value to be replaced along with the possible renaming
258
+ """
259
+ if self.has_resolvers:
260
+ maps = self.resolvers.maps + self.scope.maps
261
+ else:
262
+ maps = self.scope.maps
263
+
264
+ maps.append(self.temps)
265
+
266
+ for mapping in maps:
267
+ if old_key in mapping:
268
+ mapping[new_key] = new_value
269
+ return
270
+
271
+ def _get_vars(self, stack, scopes: list[str]) -> None:
272
+ """
273
+ Get specifically scoped variables from a list of stack frames.
274
+
275
+ Parameters
276
+ ----------
277
+ stack : list
278
+ A list of stack frames as returned by ``inspect.stack()``
279
+ scopes : sequence of strings
280
+ A sequence containing valid stack frame attribute names that
281
+ evaluate to a dictionary. For example, ('locals', 'globals')
282
+ """
283
+ variables = itertools.product(scopes, stack)
284
+ for scope, (frame, _, _, _, _, _) in variables:
285
+ try:
286
+ d = getattr(frame, f"f_{scope}")
287
+ self.scope = DeepChainMap(self.scope.new_child(d))
288
+ finally:
289
+ # won't remove it, but DECREF it
290
+ # in Py3 this probably isn't necessary since frame won't be
291
+ # scope after the loop
292
+ del frame
293
+
294
+ def _update(self, level: int) -> None:
295
+ """
296
+ Update the current scope by going back `level` levels.
297
+
298
+ Parameters
299
+ ----------
300
+ level : int
301
+ """
302
+ sl = level + 1
303
+
304
+ # add sl frames to the scope starting with the
305
+ # most distant and overwriting with more current
306
+ # makes sure that we can capture variable scope
307
+ stack = inspect.stack()
308
+
309
+ try:
310
+ self._get_vars(stack[:sl], scopes=["locals"])
311
+ finally:
312
+ del stack[:], stack
313
+
314
+ def add_tmp(self, value) -> str:
315
+ """
316
+ Add a temporary variable to the scope.
317
+
318
+ Parameters
319
+ ----------
320
+ value : object
321
+ An arbitrary object to be assigned to a temporary variable.
322
+
323
+ Returns
324
+ -------
325
+ str
326
+ The name of the temporary variable created.
327
+ """
328
+ name = f"{type(value).__name__}_{self.ntemps}_{_raw_hex_id(self)}"
329
+
330
+ # add to inner most scope
331
+ assert name not in self.temps
332
+ self.temps[name] = value
333
+ assert name in self.temps
334
+
335
+ # only increment if the variable gets put in the scope
336
+ return name
337
+
338
+ @property
339
+ def ntemps(self) -> int:
340
+ """The number of temporary variables in this scope"""
341
+ return len(self.temps)
342
+
343
+ @property
344
+ def full_scope(self) -> DeepChainMap:
345
+ """
346
+ Return the full scope for use with passing to engines transparently
347
+ as a mapping.
348
+
349
+ Returns
350
+ -------
351
+ vars : DeepChainMap
352
+ All variables in this scope.
353
+ """
354
+ maps = [self.temps] + self.resolvers.maps + self.scope.maps
355
+ return DeepChainMap(*maps)
env-llmeval/lib/python3.10/site-packages/pandas/core/strings/base.py ADDED
@@ -0,0 +1,262 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import abc
4
+ from typing import (
5
+ TYPE_CHECKING,
6
+ Callable,
7
+ Literal,
8
+ )
9
+
10
+ import numpy as np
11
+
12
+ if TYPE_CHECKING:
13
+ from collections.abc import Sequence
14
+ import re
15
+
16
+ from pandas._typing import Scalar
17
+
18
+ from pandas import Series
19
+
20
+
21
+ class BaseStringArrayMethods(abc.ABC):
22
+ """
23
+ Base class for extension arrays implementing string methods.
24
+
25
+ This is where our ExtensionArrays can override the implementation of
26
+ Series.str.<method>. We don't expect this to work with
27
+ 3rd-party extension arrays.
28
+
29
+ * User calls Series.str.<method>
30
+ * pandas extracts the extension array from the Series
31
+ * pandas calls ``extension_array._str_<method>(*args, **kwargs)``
32
+ * pandas wraps the result, to return to the user.
33
+
34
+ See :ref:`Series.str` for the docstring of each method.
35
+ """
36
+
37
+ def _str_getitem(self, key):
38
+ if isinstance(key, slice):
39
+ return self._str_slice(start=key.start, stop=key.stop, step=key.step)
40
+ else:
41
+ return self._str_get(key)
42
+
43
+ @abc.abstractmethod
44
+ def _str_count(self, pat, flags: int = 0):
45
+ pass
46
+
47
+ @abc.abstractmethod
48
+ def _str_pad(
49
+ self,
50
+ width: int,
51
+ side: Literal["left", "right", "both"] = "left",
52
+ fillchar: str = " ",
53
+ ):
54
+ pass
55
+
56
+ @abc.abstractmethod
57
+ def _str_contains(
58
+ self, pat, case: bool = True, flags: int = 0, na=None, regex: bool = True
59
+ ):
60
+ pass
61
+
62
+ @abc.abstractmethod
63
+ def _str_startswith(self, pat, na=None):
64
+ pass
65
+
66
+ @abc.abstractmethod
67
+ def _str_endswith(self, pat, na=None):
68
+ pass
69
+
70
+ @abc.abstractmethod
71
+ def _str_replace(
72
+ self,
73
+ pat: str | re.Pattern,
74
+ repl: str | Callable,
75
+ n: int = -1,
76
+ case: bool = True,
77
+ flags: int = 0,
78
+ regex: bool = True,
79
+ ):
80
+ pass
81
+
82
+ @abc.abstractmethod
83
+ def _str_repeat(self, repeats: int | Sequence[int]):
84
+ pass
85
+
86
+ @abc.abstractmethod
87
+ def _str_match(
88
+ self, pat: str, case: bool = True, flags: int = 0, na: Scalar = np.nan
89
+ ):
90
+ pass
91
+
92
+ @abc.abstractmethod
93
+ def _str_fullmatch(
94
+ self,
95
+ pat: str | re.Pattern,
96
+ case: bool = True,
97
+ flags: int = 0,
98
+ na: Scalar = np.nan,
99
+ ):
100
+ pass
101
+
102
+ @abc.abstractmethod
103
+ def _str_encode(self, encoding, errors: str = "strict"):
104
+ pass
105
+
106
+ @abc.abstractmethod
107
+ def _str_find(self, sub, start: int = 0, end=None):
108
+ pass
109
+
110
+ @abc.abstractmethod
111
+ def _str_rfind(self, sub, start: int = 0, end=None):
112
+ pass
113
+
114
+ @abc.abstractmethod
115
+ def _str_findall(self, pat, flags: int = 0):
116
+ pass
117
+
118
+ @abc.abstractmethod
119
+ def _str_get(self, i):
120
+ pass
121
+
122
+ @abc.abstractmethod
123
+ def _str_index(self, sub, start: int = 0, end=None):
124
+ pass
125
+
126
+ @abc.abstractmethod
127
+ def _str_rindex(self, sub, start: int = 0, end=None):
128
+ pass
129
+
130
+ @abc.abstractmethod
131
+ def _str_join(self, sep: str):
132
+ pass
133
+
134
+ @abc.abstractmethod
135
+ def _str_partition(self, sep: str, expand):
136
+ pass
137
+
138
+ @abc.abstractmethod
139
+ def _str_rpartition(self, sep: str, expand):
140
+ pass
141
+
142
+ @abc.abstractmethod
143
+ def _str_len(self):
144
+ pass
145
+
146
+ @abc.abstractmethod
147
+ def _str_slice(self, start=None, stop=None, step=None):
148
+ pass
149
+
150
+ @abc.abstractmethod
151
+ def _str_slice_replace(self, start=None, stop=None, repl=None):
152
+ pass
153
+
154
+ @abc.abstractmethod
155
+ def _str_translate(self, table):
156
+ pass
157
+
158
+ @abc.abstractmethod
159
+ def _str_wrap(self, width: int, **kwargs):
160
+ pass
161
+
162
+ @abc.abstractmethod
163
+ def _str_get_dummies(self, sep: str = "|"):
164
+ pass
165
+
166
+ @abc.abstractmethod
167
+ def _str_isalnum(self):
168
+ pass
169
+
170
+ @abc.abstractmethod
171
+ def _str_isalpha(self):
172
+ pass
173
+
174
+ @abc.abstractmethod
175
+ def _str_isdecimal(self):
176
+ pass
177
+
178
+ @abc.abstractmethod
179
+ def _str_isdigit(self):
180
+ pass
181
+
182
+ @abc.abstractmethod
183
+ def _str_islower(self):
184
+ pass
185
+
186
+ @abc.abstractmethod
187
+ def _str_isnumeric(self):
188
+ pass
189
+
190
+ @abc.abstractmethod
191
+ def _str_isspace(self):
192
+ pass
193
+
194
+ @abc.abstractmethod
195
+ def _str_istitle(self):
196
+ pass
197
+
198
+ @abc.abstractmethod
199
+ def _str_isupper(self):
200
+ pass
201
+
202
+ @abc.abstractmethod
203
+ def _str_capitalize(self):
204
+ pass
205
+
206
+ @abc.abstractmethod
207
+ def _str_casefold(self):
208
+ pass
209
+
210
+ @abc.abstractmethod
211
+ def _str_title(self):
212
+ pass
213
+
214
+ @abc.abstractmethod
215
+ def _str_swapcase(self):
216
+ pass
217
+
218
+ @abc.abstractmethod
219
+ def _str_lower(self):
220
+ pass
221
+
222
+ @abc.abstractmethod
223
+ def _str_upper(self):
224
+ pass
225
+
226
+ @abc.abstractmethod
227
+ def _str_normalize(self, form):
228
+ pass
229
+
230
+ @abc.abstractmethod
231
+ def _str_strip(self, to_strip=None):
232
+ pass
233
+
234
+ @abc.abstractmethod
235
+ def _str_lstrip(self, to_strip=None):
236
+ pass
237
+
238
+ @abc.abstractmethod
239
+ def _str_rstrip(self, to_strip=None):
240
+ pass
241
+
242
+ @abc.abstractmethod
243
+ def _str_removeprefix(self, prefix: str) -> Series:
244
+ pass
245
+
246
+ @abc.abstractmethod
247
+ def _str_removesuffix(self, suffix: str) -> Series:
248
+ pass
249
+
250
+ @abc.abstractmethod
251
+ def _str_split(
252
+ self, pat=None, n=-1, expand: bool = False, regex: bool | None = None
253
+ ):
254
+ pass
255
+
256
+ @abc.abstractmethod
257
+ def _str_rsplit(self, pat=None, n=-1):
258
+ pass
259
+
260
+ @abc.abstractmethod
261
+ def _str_extract(self, pat: str, flags: int = 0, expand: bool = True):
262
+ pass
env-llmeval/lib/python3.10/site-packages/pandas/core/tools/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (182 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/tools/__pycache__/datetimes.cpython-310.pyc ADDED
Binary file (35 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/tools/__pycache__/numeric.cpython-310.pyc ADDED
Binary file (8.14 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/tools/__pycache__/timedeltas.cpython-310.pyc ADDED
Binary file (7.33 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/tools/__pycache__/times.cpython-310.pyc ADDED
Binary file (3.95 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/tools/datetimes.py ADDED
@@ -0,0 +1,1235 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from collections import abc
4
+ from datetime import date
5
+ from functools import partial
6
+ from itertools import islice
7
+ from typing import (
8
+ TYPE_CHECKING,
9
+ Callable,
10
+ TypedDict,
11
+ Union,
12
+ cast,
13
+ overload,
14
+ )
15
+ import warnings
16
+
17
+ import numpy as np
18
+
19
+ from pandas._libs import (
20
+ lib,
21
+ tslib,
22
+ )
23
+ from pandas._libs.tslibs import (
24
+ OutOfBoundsDatetime,
25
+ Timedelta,
26
+ Timestamp,
27
+ astype_overflowsafe,
28
+ is_supported_dtype,
29
+ timezones as libtimezones,
30
+ )
31
+ from pandas._libs.tslibs.conversion import cast_from_unit_vectorized
32
+ from pandas._libs.tslibs.parsing import (
33
+ DateParseError,
34
+ guess_datetime_format,
35
+ )
36
+ from pandas._libs.tslibs.strptime import array_strptime
37
+ from pandas._typing import (
38
+ AnyArrayLike,
39
+ ArrayLike,
40
+ DateTimeErrorChoices,
41
+ )
42
+ from pandas.util._exceptions import find_stack_level
43
+
44
+ from pandas.core.dtypes.common import (
45
+ ensure_object,
46
+ is_float,
47
+ is_integer,
48
+ is_integer_dtype,
49
+ is_list_like,
50
+ is_numeric_dtype,
51
+ )
52
+ from pandas.core.dtypes.dtypes import (
53
+ ArrowDtype,
54
+ DatetimeTZDtype,
55
+ )
56
+ from pandas.core.dtypes.generic import (
57
+ ABCDataFrame,
58
+ ABCSeries,
59
+ )
60
+
61
+ from pandas.arrays import (
62
+ DatetimeArray,
63
+ IntegerArray,
64
+ NumpyExtensionArray,
65
+ )
66
+ from pandas.core.algorithms import unique
67
+ from pandas.core.arrays import ArrowExtensionArray
68
+ from pandas.core.arrays.base import ExtensionArray
69
+ from pandas.core.arrays.datetimes import (
70
+ maybe_convert_dtype,
71
+ objects_to_datetime64,
72
+ tz_to_dtype,
73
+ )
74
+ from pandas.core.construction import extract_array
75
+ from pandas.core.indexes.base import Index
76
+ from pandas.core.indexes.datetimes import DatetimeIndex
77
+
78
+ if TYPE_CHECKING:
79
+ from collections.abc import Hashable
80
+
81
+ from pandas._libs.tslibs.nattype import NaTType
82
+ from pandas._libs.tslibs.timedeltas import UnitChoices
83
+
84
+ from pandas import (
85
+ DataFrame,
86
+ Series,
87
+ )
88
+
89
+ # ---------------------------------------------------------------------
90
+ # types used in annotations
91
+
92
+ ArrayConvertible = Union[list, tuple, AnyArrayLike]
93
+ Scalar = Union[float, str]
94
+ DatetimeScalar = Union[Scalar, date, np.datetime64]
95
+
96
+ DatetimeScalarOrArrayConvertible = Union[DatetimeScalar, ArrayConvertible]
97
+
98
+ DatetimeDictArg = Union[list[Scalar], tuple[Scalar, ...], AnyArrayLike]
99
+
100
+
101
+ class YearMonthDayDict(TypedDict, total=True):
102
+ year: DatetimeDictArg
103
+ month: DatetimeDictArg
104
+ day: DatetimeDictArg
105
+
106
+
107
+ class FulldatetimeDict(YearMonthDayDict, total=False):
108
+ hour: DatetimeDictArg
109
+ hours: DatetimeDictArg
110
+ minute: DatetimeDictArg
111
+ minutes: DatetimeDictArg
112
+ second: DatetimeDictArg
113
+ seconds: DatetimeDictArg
114
+ ms: DatetimeDictArg
115
+ us: DatetimeDictArg
116
+ ns: DatetimeDictArg
117
+
118
+
119
+ DictConvertible = Union[FulldatetimeDict, "DataFrame"]
120
+ start_caching_at = 50
121
+
122
+
123
+ # ---------------------------------------------------------------------
124
+
125
+
126
+ def _guess_datetime_format_for_array(arr, dayfirst: bool | None = False) -> str | None:
127
+ # Try to guess the format based on the first non-NaN element, return None if can't
128
+ if (first_non_null := tslib.first_non_null(arr)) != -1:
129
+ if type(first_non_nan_element := arr[first_non_null]) is str: # noqa: E721
130
+ # GH#32264 np.str_ object
131
+ guessed_format = guess_datetime_format(
132
+ first_non_nan_element, dayfirst=dayfirst
133
+ )
134
+ if guessed_format is not None:
135
+ return guessed_format
136
+ # If there are multiple non-null elements, warn about
137
+ # how parsing might not be consistent
138
+ if tslib.first_non_null(arr[first_non_null + 1 :]) != -1:
139
+ warnings.warn(
140
+ "Could not infer format, so each element will be parsed "
141
+ "individually, falling back to `dateutil`. To ensure parsing is "
142
+ "consistent and as-expected, please specify a format.",
143
+ UserWarning,
144
+ stacklevel=find_stack_level(),
145
+ )
146
+ return None
147
+
148
+
149
+ def should_cache(
150
+ arg: ArrayConvertible, unique_share: float = 0.7, check_count: int | None = None
151
+ ) -> bool:
152
+ """
153
+ Decides whether to do caching.
154
+
155
+ If the percent of unique elements among `check_count` elements less
156
+ than `unique_share * 100` then we can do caching.
157
+
158
+ Parameters
159
+ ----------
160
+ arg: listlike, tuple, 1-d array, Series
161
+ unique_share: float, default=0.7, optional
162
+ 0 < unique_share < 1
163
+ check_count: int, optional
164
+ 0 <= check_count <= len(arg)
165
+
166
+ Returns
167
+ -------
168
+ do_caching: bool
169
+
170
+ Notes
171
+ -----
172
+ By default for a sequence of less than 50 items in size, we don't do
173
+ caching; for the number of elements less than 5000, we take ten percent of
174
+ all elements to check for a uniqueness share; if the sequence size is more
175
+ than 5000, then we check only the first 500 elements.
176
+ All constants were chosen empirically by.
177
+ """
178
+ do_caching = True
179
+
180
+ # default realization
181
+ if check_count is None:
182
+ # in this case, the gain from caching is negligible
183
+ if len(arg) <= start_caching_at:
184
+ return False
185
+
186
+ if len(arg) <= 5000:
187
+ check_count = len(arg) // 10
188
+ else:
189
+ check_count = 500
190
+ else:
191
+ assert (
192
+ 0 <= check_count <= len(arg)
193
+ ), "check_count must be in next bounds: [0; len(arg)]"
194
+ if check_count == 0:
195
+ return False
196
+
197
+ assert 0 < unique_share < 1, "unique_share must be in next bounds: (0; 1)"
198
+
199
+ try:
200
+ # We can't cache if the items are not hashable.
201
+ unique_elements = set(islice(arg, check_count))
202
+ except TypeError:
203
+ return False
204
+ if len(unique_elements) > check_count * unique_share:
205
+ do_caching = False
206
+ return do_caching
207
+
208
+
209
+ def _maybe_cache(
210
+ arg: ArrayConvertible,
211
+ format: str | None,
212
+ cache: bool,
213
+ convert_listlike: Callable,
214
+ ) -> Series:
215
+ """
216
+ Create a cache of unique dates from an array of dates
217
+
218
+ Parameters
219
+ ----------
220
+ arg : listlike, tuple, 1-d array, Series
221
+ format : string
222
+ Strftime format to parse time
223
+ cache : bool
224
+ True attempts to create a cache of converted values
225
+ convert_listlike : function
226
+ Conversion function to apply on dates
227
+
228
+ Returns
229
+ -------
230
+ cache_array : Series
231
+ Cache of converted, unique dates. Can be empty
232
+ """
233
+ from pandas import Series
234
+
235
+ cache_array = Series(dtype=object)
236
+
237
+ if cache:
238
+ # Perform a quicker unique check
239
+ if not should_cache(arg):
240
+ return cache_array
241
+
242
+ if not isinstance(arg, (np.ndarray, ExtensionArray, Index, ABCSeries)):
243
+ arg = np.array(arg)
244
+
245
+ unique_dates = unique(arg)
246
+ if len(unique_dates) < len(arg):
247
+ cache_dates = convert_listlike(unique_dates, format)
248
+ # GH#45319
249
+ try:
250
+ cache_array = Series(cache_dates, index=unique_dates, copy=False)
251
+ except OutOfBoundsDatetime:
252
+ return cache_array
253
+ # GH#39882 and GH#35888 in case of None and NaT we get duplicates
254
+ if not cache_array.index.is_unique:
255
+ cache_array = cache_array[~cache_array.index.duplicated()]
256
+ return cache_array
257
+
258
+
259
+ def _box_as_indexlike(
260
+ dt_array: ArrayLike, utc: bool = False, name: Hashable | None = None
261
+ ) -> Index:
262
+ """
263
+ Properly boxes the ndarray of datetimes to DatetimeIndex
264
+ if it is possible or to generic Index instead
265
+
266
+ Parameters
267
+ ----------
268
+ dt_array: 1-d array
269
+ Array of datetimes to be wrapped in an Index.
270
+ utc : bool
271
+ Whether to convert/localize timestamps to UTC.
272
+ name : string, default None
273
+ Name for a resulting index
274
+
275
+ Returns
276
+ -------
277
+ result : datetime of converted dates
278
+ - DatetimeIndex if convertible to sole datetime64 type
279
+ - general Index otherwise
280
+ """
281
+
282
+ if lib.is_np_dtype(dt_array.dtype, "M"):
283
+ tz = "utc" if utc else None
284
+ return DatetimeIndex(dt_array, tz=tz, name=name)
285
+ return Index(dt_array, name=name, dtype=dt_array.dtype)
286
+
287
+
288
+ def _convert_and_box_cache(
289
+ arg: DatetimeScalarOrArrayConvertible,
290
+ cache_array: Series,
291
+ name: Hashable | None = None,
292
+ ) -> Index:
293
+ """
294
+ Convert array of dates with a cache and wrap the result in an Index.
295
+
296
+ Parameters
297
+ ----------
298
+ arg : integer, float, string, datetime, list, tuple, 1-d array, Series
299
+ cache_array : Series
300
+ Cache of converted, unique dates
301
+ name : string, default None
302
+ Name for a DatetimeIndex
303
+
304
+ Returns
305
+ -------
306
+ result : Index-like of converted dates
307
+ """
308
+ from pandas import Series
309
+
310
+ result = Series(arg, dtype=cache_array.index.dtype).map(cache_array)
311
+ return _box_as_indexlike(result._values, utc=False, name=name)
312
+
313
+
314
+ def _convert_listlike_datetimes(
315
+ arg,
316
+ format: str | None,
317
+ name: Hashable | None = None,
318
+ utc: bool = False,
319
+ unit: str | None = None,
320
+ errors: DateTimeErrorChoices = "raise",
321
+ dayfirst: bool | None = None,
322
+ yearfirst: bool | None = None,
323
+ exact: bool = True,
324
+ ):
325
+ """
326
+ Helper function for to_datetime. Performs the conversions of 1D listlike
327
+ of dates
328
+
329
+ Parameters
330
+ ----------
331
+ arg : list, tuple, ndarray, Series, Index
332
+ date to be parsed
333
+ name : object
334
+ None or string for the Index name
335
+ utc : bool
336
+ Whether to convert/localize timestamps to UTC.
337
+ unit : str
338
+ None or string of the frequency of the passed data
339
+ errors : str
340
+ error handing behaviors from to_datetime, 'raise', 'coerce', 'ignore'
341
+ dayfirst : bool
342
+ dayfirst parsing behavior from to_datetime
343
+ yearfirst : bool
344
+ yearfirst parsing behavior from to_datetime
345
+ exact : bool, default True
346
+ exact format matching behavior from to_datetime
347
+
348
+ Returns
349
+ -------
350
+ Index-like of parsed dates
351
+ """
352
+ if isinstance(arg, (list, tuple)):
353
+ arg = np.array(arg, dtype="O")
354
+ elif isinstance(arg, NumpyExtensionArray):
355
+ arg = np.array(arg)
356
+
357
+ arg_dtype = getattr(arg, "dtype", None)
358
+ # these are shortcutable
359
+ tz = "utc" if utc else None
360
+ if isinstance(arg_dtype, DatetimeTZDtype):
361
+ if not isinstance(arg, (DatetimeArray, DatetimeIndex)):
362
+ return DatetimeIndex(arg, tz=tz, name=name)
363
+ if utc:
364
+ arg = arg.tz_convert(None).tz_localize("utc")
365
+ return arg
366
+
367
+ elif isinstance(arg_dtype, ArrowDtype) and arg_dtype.type is Timestamp:
368
+ # TODO: Combine with above if DTI/DTA supports Arrow timestamps
369
+ if utc:
370
+ # pyarrow uses UTC, not lowercase utc
371
+ if isinstance(arg, Index):
372
+ arg_array = cast(ArrowExtensionArray, arg.array)
373
+ if arg_dtype.pyarrow_dtype.tz is not None:
374
+ arg_array = arg_array._dt_tz_convert("UTC")
375
+ else:
376
+ arg_array = arg_array._dt_tz_localize("UTC")
377
+ arg = Index(arg_array)
378
+ else:
379
+ # ArrowExtensionArray
380
+ if arg_dtype.pyarrow_dtype.tz is not None:
381
+ arg = arg._dt_tz_convert("UTC")
382
+ else:
383
+ arg = arg._dt_tz_localize("UTC")
384
+ return arg
385
+
386
+ elif lib.is_np_dtype(arg_dtype, "M"):
387
+ if not is_supported_dtype(arg_dtype):
388
+ # We go to closest supported reso, i.e. "s"
389
+ arg = astype_overflowsafe(
390
+ # TODO: looks like we incorrectly raise with errors=="ignore"
391
+ np.asarray(arg),
392
+ np.dtype("M8[s]"),
393
+ is_coerce=errors == "coerce",
394
+ )
395
+
396
+ if not isinstance(arg, (DatetimeArray, DatetimeIndex)):
397
+ return DatetimeIndex(arg, tz=tz, name=name)
398
+ elif utc:
399
+ # DatetimeArray, DatetimeIndex
400
+ return arg.tz_localize("utc")
401
+
402
+ return arg
403
+
404
+ elif unit is not None:
405
+ if format is not None:
406
+ raise ValueError("cannot specify both format and unit")
407
+ return _to_datetime_with_unit(arg, unit, name, utc, errors)
408
+ elif getattr(arg, "ndim", 1) > 1:
409
+ raise TypeError(
410
+ "arg must be a string, datetime, list, tuple, 1-d array, or Series"
411
+ )
412
+
413
+ # warn if passing timedelta64, raise for PeriodDtype
414
+ # NB: this must come after unit transformation
415
+ try:
416
+ arg, _ = maybe_convert_dtype(arg, copy=False, tz=libtimezones.maybe_get_tz(tz))
417
+ except TypeError:
418
+ if errors == "coerce":
419
+ npvalues = np.array(["NaT"], dtype="datetime64[ns]").repeat(len(arg))
420
+ return DatetimeIndex(npvalues, name=name)
421
+ elif errors == "ignore":
422
+ idx = Index(arg, name=name)
423
+ return idx
424
+ raise
425
+
426
+ arg = ensure_object(arg)
427
+
428
+ if format is None:
429
+ format = _guess_datetime_format_for_array(arg, dayfirst=dayfirst)
430
+
431
+ # `format` could be inferred, or user didn't ask for mixed-format parsing.
432
+ if format is not None and format != "mixed":
433
+ return _array_strptime_with_fallback(arg, name, utc, format, exact, errors)
434
+
435
+ result, tz_parsed = objects_to_datetime64(
436
+ arg,
437
+ dayfirst=dayfirst,
438
+ yearfirst=yearfirst,
439
+ utc=utc,
440
+ errors=errors,
441
+ allow_object=True,
442
+ )
443
+
444
+ if tz_parsed is not None:
445
+ # We can take a shortcut since the datetime64 numpy array
446
+ # is in UTC
447
+ out_unit = np.datetime_data(result.dtype)[0]
448
+ dtype = cast(DatetimeTZDtype, tz_to_dtype(tz_parsed, out_unit))
449
+ dt64_values = result.view(f"M8[{dtype.unit}]")
450
+ dta = DatetimeArray._simple_new(dt64_values, dtype=dtype)
451
+ return DatetimeIndex._simple_new(dta, name=name)
452
+
453
+ return _box_as_indexlike(result, utc=utc, name=name)
454
+
455
+
456
+ def _array_strptime_with_fallback(
457
+ arg,
458
+ name,
459
+ utc: bool,
460
+ fmt: str,
461
+ exact: bool,
462
+ errors: str,
463
+ ) -> Index:
464
+ """
465
+ Call array_strptime, with fallback behavior depending on 'errors'.
466
+ """
467
+ result, tz_out = array_strptime(arg, fmt, exact=exact, errors=errors, utc=utc)
468
+ if tz_out is not None:
469
+ unit = np.datetime_data(result.dtype)[0]
470
+ dtype = DatetimeTZDtype(tz=tz_out, unit=unit)
471
+ dta = DatetimeArray._simple_new(result, dtype=dtype)
472
+ if utc:
473
+ dta = dta.tz_convert("UTC")
474
+ return Index(dta, name=name)
475
+ elif result.dtype != object and utc:
476
+ unit = np.datetime_data(result.dtype)[0]
477
+ res = Index(result, dtype=f"M8[{unit}, UTC]", name=name)
478
+ return res
479
+ return Index(result, dtype=result.dtype, name=name)
480
+
481
+
482
+ def _to_datetime_with_unit(arg, unit, name, utc: bool, errors: str) -> Index:
483
+ """
484
+ to_datetime specalized to the case where a 'unit' is passed.
485
+ """
486
+ arg = extract_array(arg, extract_numpy=True)
487
+
488
+ # GH#30050 pass an ndarray to tslib.array_with_unit_to_datetime
489
+ # because it expects an ndarray argument
490
+ if isinstance(arg, IntegerArray):
491
+ arr = arg.astype(f"datetime64[{unit}]")
492
+ tz_parsed = None
493
+ else:
494
+ arg = np.asarray(arg)
495
+
496
+ if arg.dtype.kind in "iu":
497
+ # Note we can't do "f" here because that could induce unwanted
498
+ # rounding GH#14156, GH#20445
499
+ arr = arg.astype(f"datetime64[{unit}]", copy=False)
500
+ try:
501
+ arr = astype_overflowsafe(arr, np.dtype("M8[ns]"), copy=False)
502
+ except OutOfBoundsDatetime:
503
+ if errors == "raise":
504
+ raise
505
+ arg = arg.astype(object)
506
+ return _to_datetime_with_unit(arg, unit, name, utc, errors)
507
+ tz_parsed = None
508
+
509
+ elif arg.dtype.kind == "f":
510
+ with np.errstate(over="raise"):
511
+ try:
512
+ arr = cast_from_unit_vectorized(arg, unit=unit)
513
+ except OutOfBoundsDatetime:
514
+ if errors != "raise":
515
+ return _to_datetime_with_unit(
516
+ arg.astype(object), unit, name, utc, errors
517
+ )
518
+ raise OutOfBoundsDatetime(
519
+ f"cannot convert input with unit '{unit}'"
520
+ )
521
+
522
+ arr = arr.view("M8[ns]")
523
+ tz_parsed = None
524
+ else:
525
+ arg = arg.astype(object, copy=False)
526
+ arr, tz_parsed = tslib.array_with_unit_to_datetime(arg, unit, errors=errors)
527
+
528
+ if errors == "ignore":
529
+ # Index constructor _may_ infer to DatetimeIndex
530
+ result = Index._with_infer(arr, name=name)
531
+ else:
532
+ result = DatetimeIndex(arr, name=name)
533
+
534
+ if not isinstance(result, DatetimeIndex):
535
+ return result
536
+
537
+ # GH#23758: We may still need to localize the result with tz
538
+ # GH#25546: Apply tz_parsed first (from arg), then tz (from caller)
539
+ # result will be naive but in UTC
540
+ result = result.tz_localize("UTC").tz_convert(tz_parsed)
541
+
542
+ if utc:
543
+ if result.tz is None:
544
+ result = result.tz_localize("utc")
545
+ else:
546
+ result = result.tz_convert("utc")
547
+ return result
548
+
549
+
550
+ def _adjust_to_origin(arg, origin, unit):
551
+ """
552
+ Helper function for to_datetime.
553
+ Adjust input argument to the specified origin
554
+
555
+ Parameters
556
+ ----------
557
+ arg : list, tuple, ndarray, Series, Index
558
+ date to be adjusted
559
+ origin : 'julian' or Timestamp
560
+ origin offset for the arg
561
+ unit : str
562
+ passed unit from to_datetime, must be 'D'
563
+
564
+ Returns
565
+ -------
566
+ ndarray or scalar of adjusted date(s)
567
+ """
568
+ if origin == "julian":
569
+ original = arg
570
+ j0 = Timestamp(0).to_julian_date()
571
+ if unit != "D":
572
+ raise ValueError("unit must be 'D' for origin='julian'")
573
+ try:
574
+ arg = arg - j0
575
+ except TypeError as err:
576
+ raise ValueError(
577
+ "incompatible 'arg' type for given 'origin'='julian'"
578
+ ) from err
579
+
580
+ # preemptively check this for a nice range
581
+ j_max = Timestamp.max.to_julian_date() - j0
582
+ j_min = Timestamp.min.to_julian_date() - j0
583
+ if np.any(arg > j_max) or np.any(arg < j_min):
584
+ raise OutOfBoundsDatetime(
585
+ f"{original} is Out of Bounds for origin='julian'"
586
+ )
587
+ else:
588
+ # arg must be numeric
589
+ if not (
590
+ (is_integer(arg) or is_float(arg)) or is_numeric_dtype(np.asarray(arg))
591
+ ):
592
+ raise ValueError(
593
+ f"'{arg}' is not compatible with origin='{origin}'; "
594
+ "it must be numeric with a unit specified"
595
+ )
596
+
597
+ # we are going to offset back to unix / epoch time
598
+ try:
599
+ offset = Timestamp(origin, unit=unit)
600
+ except OutOfBoundsDatetime as err:
601
+ raise OutOfBoundsDatetime(f"origin {origin} is Out of Bounds") from err
602
+ except ValueError as err:
603
+ raise ValueError(
604
+ f"origin {origin} cannot be converted to a Timestamp"
605
+ ) from err
606
+
607
+ if offset.tz is not None:
608
+ raise ValueError(f"origin offset {offset} must be tz-naive")
609
+ td_offset = offset - Timestamp(0)
610
+
611
+ # convert the offset to the unit of the arg
612
+ # this should be lossless in terms of precision
613
+ ioffset = td_offset // Timedelta(1, unit=unit)
614
+
615
+ # scalars & ndarray-like can handle the addition
616
+ if is_list_like(arg) and not isinstance(arg, (ABCSeries, Index, np.ndarray)):
617
+ arg = np.asarray(arg)
618
+ arg = arg + ioffset
619
+ return arg
620
+
621
+
622
+ @overload
623
+ def to_datetime(
624
+ arg: DatetimeScalar,
625
+ errors: DateTimeErrorChoices = ...,
626
+ dayfirst: bool = ...,
627
+ yearfirst: bool = ...,
628
+ utc: bool = ...,
629
+ format: str | None = ...,
630
+ exact: bool = ...,
631
+ unit: str | None = ...,
632
+ infer_datetime_format: bool = ...,
633
+ origin=...,
634
+ cache: bool = ...,
635
+ ) -> Timestamp:
636
+ ...
637
+
638
+
639
+ @overload
640
+ def to_datetime(
641
+ arg: Series | DictConvertible,
642
+ errors: DateTimeErrorChoices = ...,
643
+ dayfirst: bool = ...,
644
+ yearfirst: bool = ...,
645
+ utc: bool = ...,
646
+ format: str | None = ...,
647
+ exact: bool = ...,
648
+ unit: str | None = ...,
649
+ infer_datetime_format: bool = ...,
650
+ origin=...,
651
+ cache: bool = ...,
652
+ ) -> Series:
653
+ ...
654
+
655
+
656
+ @overload
657
+ def to_datetime(
658
+ arg: list | tuple | Index | ArrayLike,
659
+ errors: DateTimeErrorChoices = ...,
660
+ dayfirst: bool = ...,
661
+ yearfirst: bool = ...,
662
+ utc: bool = ...,
663
+ format: str | None = ...,
664
+ exact: bool = ...,
665
+ unit: str | None = ...,
666
+ infer_datetime_format: bool = ...,
667
+ origin=...,
668
+ cache: bool = ...,
669
+ ) -> DatetimeIndex:
670
+ ...
671
+
672
+
673
+ def to_datetime(
674
+ arg: DatetimeScalarOrArrayConvertible | DictConvertible,
675
+ errors: DateTimeErrorChoices = "raise",
676
+ dayfirst: bool = False,
677
+ yearfirst: bool = False,
678
+ utc: bool = False,
679
+ format: str | None = None,
680
+ exact: bool | lib.NoDefault = lib.no_default,
681
+ unit: str | None = None,
682
+ infer_datetime_format: lib.NoDefault | bool = lib.no_default,
683
+ origin: str = "unix",
684
+ cache: bool = True,
685
+ ) -> DatetimeIndex | Series | DatetimeScalar | NaTType | None:
686
+ """
687
+ Convert argument to datetime.
688
+
689
+ This function converts a scalar, array-like, :class:`Series` or
690
+ :class:`DataFrame`/dict-like to a pandas datetime object.
691
+
692
+ Parameters
693
+ ----------
694
+ arg : int, float, str, datetime, list, tuple, 1-d array, Series, DataFrame/dict-like
695
+ The object to convert to a datetime. If a :class:`DataFrame` is provided, the
696
+ method expects minimally the following columns: :const:`"year"`,
697
+ :const:`"month"`, :const:`"day"`. The column "year"
698
+ must be specified in 4-digit format.
699
+ errors : {'ignore', 'raise', 'coerce'}, default 'raise'
700
+ - If :const:`'raise'`, then invalid parsing will raise an exception.
701
+ - If :const:`'coerce'`, then invalid parsing will be set as :const:`NaT`.
702
+ - If :const:`'ignore'`, then invalid parsing will return the input.
703
+ dayfirst : bool, default False
704
+ Specify a date parse order if `arg` is str or is list-like.
705
+ If :const:`True`, parses dates with the day first, e.g. :const:`"10/11/12"`
706
+ is parsed as :const:`2012-11-10`.
707
+
708
+ .. warning::
709
+
710
+ ``dayfirst=True`` is not strict, but will prefer to parse
711
+ with day first.
712
+
713
+ yearfirst : bool, default False
714
+ Specify a date parse order if `arg` is str or is list-like.
715
+
716
+ - If :const:`True` parses dates with the year first, e.g.
717
+ :const:`"10/11/12"` is parsed as :const:`2010-11-12`.
718
+ - If both `dayfirst` and `yearfirst` are :const:`True`, `yearfirst` is
719
+ preceded (same as :mod:`dateutil`).
720
+
721
+ .. warning::
722
+
723
+ ``yearfirst=True`` is not strict, but will prefer to parse
724
+ with year first.
725
+
726
+ utc : bool, default False
727
+ Control timezone-related parsing, localization and conversion.
728
+
729
+ - If :const:`True`, the function *always* returns a timezone-aware
730
+ UTC-localized :class:`Timestamp`, :class:`Series` or
731
+ :class:`DatetimeIndex`. To do this, timezone-naive inputs are
732
+ *localized* as UTC, while timezone-aware inputs are *converted* to UTC.
733
+
734
+ - If :const:`False` (default), inputs will not be coerced to UTC.
735
+ Timezone-naive inputs will remain naive, while timezone-aware ones
736
+ will keep their time offsets. Limitations exist for mixed
737
+ offsets (typically, daylight savings), see :ref:`Examples
738
+ <to_datetime_tz_examples>` section for details.
739
+
740
+ .. warning::
741
+
742
+ In a future version of pandas, parsing datetimes with mixed time
743
+ zones will raise an error unless `utc=True`.
744
+ Please specify `utc=True` to opt in to the new behaviour
745
+ and silence this warning. To create a `Series` with mixed offsets and
746
+ `object` dtype, please use `apply` and `datetime.datetime.strptime`.
747
+
748
+ See also: pandas general documentation about `timezone conversion and
749
+ localization
750
+ <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html
751
+ #time-zone-handling>`_.
752
+
753
+ format : str, default None
754
+ The strftime to parse time, e.g. :const:`"%d/%m/%Y"`. See
755
+ `strftime documentation
756
+ <https://docs.python.org/3/library/datetime.html
757
+ #strftime-and-strptime-behavior>`_ for more information on choices, though
758
+ note that :const:`"%f"` will parse all the way up to nanoseconds.
759
+ You can also pass:
760
+
761
+ - "ISO8601", to parse any `ISO8601 <https://en.wikipedia.org/wiki/ISO_8601>`_
762
+ time string (not necessarily in exactly the same format);
763
+ - "mixed", to infer the format for each element individually. This is risky,
764
+ and you should probably use it along with `dayfirst`.
765
+
766
+ .. note::
767
+
768
+ If a :class:`DataFrame` is passed, then `format` has no effect.
769
+
770
+ exact : bool, default True
771
+ Control how `format` is used:
772
+
773
+ - If :const:`True`, require an exact `format` match.
774
+ - If :const:`False`, allow the `format` to match anywhere in the target
775
+ string.
776
+
777
+ Cannot be used alongside ``format='ISO8601'`` or ``format='mixed'``.
778
+ unit : str, default 'ns'
779
+ The unit of the arg (D,s,ms,us,ns) denote the unit, which is an
780
+ integer or float number. This will be based off the origin.
781
+ Example, with ``unit='ms'`` and ``origin='unix'``, this would calculate
782
+ the number of milliseconds to the unix epoch start.
783
+ infer_datetime_format : bool, default False
784
+ If :const:`True` and no `format` is given, attempt to infer the format
785
+ of the datetime strings based on the first non-NaN element,
786
+ and if it can be inferred, switch to a faster method of parsing them.
787
+ In some cases this can increase the parsing speed by ~5-10x.
788
+
789
+ .. deprecated:: 2.0.0
790
+ A strict version of this argument is now the default, passing it has
791
+ no effect.
792
+
793
+ origin : scalar, default 'unix'
794
+ Define the reference date. The numeric values would be parsed as number
795
+ of units (defined by `unit`) since this reference date.
796
+
797
+ - If :const:`'unix'` (or POSIX) time; origin is set to 1970-01-01.
798
+ - If :const:`'julian'`, unit must be :const:`'D'`, and origin is set to
799
+ beginning of Julian Calendar. Julian day number :const:`0` is assigned
800
+ to the day starting at noon on January 1, 4713 BC.
801
+ - If Timestamp convertible (Timestamp, dt.datetime, np.datetimt64 or date
802
+ string), origin is set to Timestamp identified by origin.
803
+ - If a float or integer, origin is the difference
804
+ (in units determined by the ``unit`` argument) relative to 1970-01-01.
805
+ cache : bool, default True
806
+ If :const:`True`, use a cache of unique, converted dates to apply the
807
+ datetime conversion. May produce significant speed-up when parsing
808
+ duplicate date strings, especially ones with timezone offsets. The cache
809
+ is only used when there are at least 50 values. The presence of
810
+ out-of-bounds values will render the cache unusable and may slow down
811
+ parsing.
812
+
813
+ Returns
814
+ -------
815
+ datetime
816
+ If parsing succeeded.
817
+ Return type depends on input (types in parenthesis correspond to
818
+ fallback in case of unsuccessful timezone or out-of-range timestamp
819
+ parsing):
820
+
821
+ - scalar: :class:`Timestamp` (or :class:`datetime.datetime`)
822
+ - array-like: :class:`DatetimeIndex` (or :class:`Series` with
823
+ :class:`object` dtype containing :class:`datetime.datetime`)
824
+ - Series: :class:`Series` of :class:`datetime64` dtype (or
825
+ :class:`Series` of :class:`object` dtype containing
826
+ :class:`datetime.datetime`)
827
+ - DataFrame: :class:`Series` of :class:`datetime64` dtype (or
828
+ :class:`Series` of :class:`object` dtype containing
829
+ :class:`datetime.datetime`)
830
+
831
+ Raises
832
+ ------
833
+ ParserError
834
+ When parsing a date from string fails.
835
+ ValueError
836
+ When another datetime conversion error happens. For example when one
837
+ of 'year', 'month', day' columns is missing in a :class:`DataFrame`, or
838
+ when a Timezone-aware :class:`datetime.datetime` is found in an array-like
839
+ of mixed time offsets, and ``utc=False``.
840
+
841
+ See Also
842
+ --------
843
+ DataFrame.astype : Cast argument to a specified dtype.
844
+ to_timedelta : Convert argument to timedelta.
845
+ convert_dtypes : Convert dtypes.
846
+
847
+ Notes
848
+ -----
849
+
850
+ Many input types are supported, and lead to different output types:
851
+
852
+ - **scalars** can be int, float, str, datetime object (from stdlib :mod:`datetime`
853
+ module or :mod:`numpy`). They are converted to :class:`Timestamp` when
854
+ possible, otherwise they are converted to :class:`datetime.datetime`.
855
+ None/NaN/null scalars are converted to :const:`NaT`.
856
+
857
+ - **array-like** can contain int, float, str, datetime objects. They are
858
+ converted to :class:`DatetimeIndex` when possible, otherwise they are
859
+ converted to :class:`Index` with :class:`object` dtype, containing
860
+ :class:`datetime.datetime`. None/NaN/null entries are converted to
861
+ :const:`NaT` in both cases.
862
+
863
+ - **Series** are converted to :class:`Series` with :class:`datetime64`
864
+ dtype when possible, otherwise they are converted to :class:`Series` with
865
+ :class:`object` dtype, containing :class:`datetime.datetime`. None/NaN/null
866
+ entries are converted to :const:`NaT` in both cases.
867
+
868
+ - **DataFrame/dict-like** are converted to :class:`Series` with
869
+ :class:`datetime64` dtype. For each row a datetime is created from assembling
870
+ the various dataframe columns. Column keys can be common abbreviations
871
+ like ['year', 'month', 'day', 'minute', 'second', 'ms', 'us', 'ns']) or
872
+ plurals of the same.
873
+
874
+ The following causes are responsible for :class:`datetime.datetime` objects
875
+ being returned (possibly inside an :class:`Index` or a :class:`Series` with
876
+ :class:`object` dtype) instead of a proper pandas designated type
877
+ (:class:`Timestamp`, :class:`DatetimeIndex` or :class:`Series`
878
+ with :class:`datetime64` dtype):
879
+
880
+ - when any input element is before :const:`Timestamp.min` or after
881
+ :const:`Timestamp.max`, see `timestamp limitations
882
+ <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html
883
+ #timeseries-timestamp-limits>`_.
884
+
885
+ - when ``utc=False`` (default) and the input is an array-like or
886
+ :class:`Series` containing mixed naive/aware datetime, or aware with mixed
887
+ time offsets. Note that this happens in the (quite frequent) situation when
888
+ the timezone has a daylight savings policy. In that case you may wish to
889
+ use ``utc=True``.
890
+
891
+ Examples
892
+ --------
893
+
894
+ **Handling various input formats**
895
+
896
+ Assembling a datetime from multiple columns of a :class:`DataFrame`. The keys
897
+ can be common abbreviations like ['year', 'month', 'day', 'minute', 'second',
898
+ 'ms', 'us', 'ns']) or plurals of the same
899
+
900
+ >>> df = pd.DataFrame({'year': [2015, 2016],
901
+ ... 'month': [2, 3],
902
+ ... 'day': [4, 5]})
903
+ >>> pd.to_datetime(df)
904
+ 0 2015-02-04
905
+ 1 2016-03-05
906
+ dtype: datetime64[ns]
907
+
908
+ Using a unix epoch time
909
+
910
+ >>> pd.to_datetime(1490195805, unit='s')
911
+ Timestamp('2017-03-22 15:16:45')
912
+ >>> pd.to_datetime(1490195805433502912, unit='ns')
913
+ Timestamp('2017-03-22 15:16:45.433502912')
914
+
915
+ .. warning:: For float arg, precision rounding might happen. To prevent
916
+ unexpected behavior use a fixed-width exact type.
917
+
918
+ Using a non-unix epoch origin
919
+
920
+ >>> pd.to_datetime([1, 2, 3], unit='D',
921
+ ... origin=pd.Timestamp('1960-01-01'))
922
+ DatetimeIndex(['1960-01-02', '1960-01-03', '1960-01-04'],
923
+ dtype='datetime64[ns]', freq=None)
924
+
925
+ **Differences with strptime behavior**
926
+
927
+ :const:`"%f"` will parse all the way up to nanoseconds.
928
+
929
+ >>> pd.to_datetime('2018-10-26 12:00:00.0000000011',
930
+ ... format='%Y-%m-%d %H:%M:%S.%f')
931
+ Timestamp('2018-10-26 12:00:00.000000001')
932
+
933
+ **Non-convertible date/times**
934
+
935
+ Passing ``errors='coerce'`` will force an out-of-bounds date to :const:`NaT`,
936
+ in addition to forcing non-dates (or non-parseable dates) to :const:`NaT`.
937
+
938
+ >>> pd.to_datetime('13000101', format='%Y%m%d', errors='coerce')
939
+ NaT
940
+
941
+ .. _to_datetime_tz_examples:
942
+
943
+ **Timezones and time offsets**
944
+
945
+ The default behaviour (``utc=False``) is as follows:
946
+
947
+ - Timezone-naive inputs are converted to timezone-naive :class:`DatetimeIndex`:
948
+
949
+ >>> pd.to_datetime(['2018-10-26 12:00:00', '2018-10-26 13:00:15'])
950
+ DatetimeIndex(['2018-10-26 12:00:00', '2018-10-26 13:00:15'],
951
+ dtype='datetime64[ns]', freq=None)
952
+
953
+ - Timezone-aware inputs *with constant time offset* are converted to
954
+ timezone-aware :class:`DatetimeIndex`:
955
+
956
+ >>> pd.to_datetime(['2018-10-26 12:00 -0500', '2018-10-26 13:00 -0500'])
957
+ DatetimeIndex(['2018-10-26 12:00:00-05:00', '2018-10-26 13:00:00-05:00'],
958
+ dtype='datetime64[ns, UTC-05:00]', freq=None)
959
+
960
+ - However, timezone-aware inputs *with mixed time offsets* (for example
961
+ issued from a timezone with daylight savings, such as Europe/Paris)
962
+ are **not successfully converted** to a :class:`DatetimeIndex`.
963
+ Parsing datetimes with mixed time zones will show a warning unless
964
+ `utc=True`. If you specify `utc=False` the warning below will be shown
965
+ and a simple :class:`Index` containing :class:`datetime.datetime`
966
+ objects will be returned:
967
+
968
+ >>> pd.to_datetime(['2020-10-25 02:00 +0200',
969
+ ... '2020-10-25 04:00 +0100']) # doctest: +SKIP
970
+ FutureWarning: In a future version of pandas, parsing datetimes with mixed
971
+ time zones will raise an error unless `utc=True`. Please specify `utc=True`
972
+ to opt in to the new behaviour and silence this warning. To create a `Series`
973
+ with mixed offsets and `object` dtype, please use `apply` and
974
+ `datetime.datetime.strptime`.
975
+ Index([2020-10-25 02:00:00+02:00, 2020-10-25 04:00:00+01:00],
976
+ dtype='object')
977
+
978
+ - A mix of timezone-aware and timezone-naive inputs is also converted to
979
+ a simple :class:`Index` containing :class:`datetime.datetime` objects:
980
+
981
+ >>> from datetime import datetime
982
+ >>> pd.to_datetime(["2020-01-01 01:00:00-01:00",
983
+ ... datetime(2020, 1, 1, 3, 0)]) # doctest: +SKIP
984
+ FutureWarning: In a future version of pandas, parsing datetimes with mixed
985
+ time zones will raise an error unless `utc=True`. Please specify `utc=True`
986
+ to opt in to the new behaviour and silence this warning. To create a `Series`
987
+ with mixed offsets and `object` dtype, please use `apply` and
988
+ `datetime.datetime.strptime`.
989
+ Index([2020-01-01 01:00:00-01:00, 2020-01-01 03:00:00], dtype='object')
990
+
991
+ |
992
+
993
+ Setting ``utc=True`` solves most of the above issues:
994
+
995
+ - Timezone-naive inputs are *localized* as UTC
996
+
997
+ >>> pd.to_datetime(['2018-10-26 12:00', '2018-10-26 13:00'], utc=True)
998
+ DatetimeIndex(['2018-10-26 12:00:00+00:00', '2018-10-26 13:00:00+00:00'],
999
+ dtype='datetime64[ns, UTC]', freq=None)
1000
+
1001
+ - Timezone-aware inputs are *converted* to UTC (the output represents the
1002
+ exact same datetime, but viewed from the UTC time offset `+00:00`).
1003
+
1004
+ >>> pd.to_datetime(['2018-10-26 12:00 -0530', '2018-10-26 12:00 -0500'],
1005
+ ... utc=True)
1006
+ DatetimeIndex(['2018-10-26 17:30:00+00:00', '2018-10-26 17:00:00+00:00'],
1007
+ dtype='datetime64[ns, UTC]', freq=None)
1008
+
1009
+ - Inputs can contain both string or datetime, the above
1010
+ rules still apply
1011
+
1012
+ >>> pd.to_datetime(['2018-10-26 12:00', datetime(2020, 1, 1, 18)], utc=True)
1013
+ DatetimeIndex(['2018-10-26 12:00:00+00:00', '2020-01-01 18:00:00+00:00'],
1014
+ dtype='datetime64[ns, UTC]', freq=None)
1015
+ """
1016
+ if exact is not lib.no_default and format in {"mixed", "ISO8601"}:
1017
+ raise ValueError("Cannot use 'exact' when 'format' is 'mixed' or 'ISO8601'")
1018
+ if infer_datetime_format is not lib.no_default:
1019
+ warnings.warn(
1020
+ "The argument 'infer_datetime_format' is deprecated and will "
1021
+ "be removed in a future version. "
1022
+ "A strict version of it is now the default, see "
1023
+ "https://pandas.pydata.org/pdeps/0004-consistent-to-datetime-parsing.html. "
1024
+ "You can safely remove this argument.",
1025
+ stacklevel=find_stack_level(),
1026
+ )
1027
+ if errors == "ignore":
1028
+ # GH#54467
1029
+ warnings.warn(
1030
+ "errors='ignore' is deprecated and will raise in a future version. "
1031
+ "Use to_datetime without passing `errors` and catch exceptions "
1032
+ "explicitly instead",
1033
+ FutureWarning,
1034
+ stacklevel=find_stack_level(),
1035
+ )
1036
+
1037
+ if arg is None:
1038
+ return None
1039
+
1040
+ if origin != "unix":
1041
+ arg = _adjust_to_origin(arg, origin, unit)
1042
+
1043
+ convert_listlike = partial(
1044
+ _convert_listlike_datetimes,
1045
+ utc=utc,
1046
+ unit=unit,
1047
+ dayfirst=dayfirst,
1048
+ yearfirst=yearfirst,
1049
+ errors=errors,
1050
+ exact=exact,
1051
+ )
1052
+ # pylint: disable-next=used-before-assignment
1053
+ result: Timestamp | NaTType | Series | Index
1054
+
1055
+ if isinstance(arg, Timestamp):
1056
+ result = arg
1057
+ if utc:
1058
+ if arg.tz is not None:
1059
+ result = arg.tz_convert("utc")
1060
+ else:
1061
+ result = arg.tz_localize("utc")
1062
+ elif isinstance(arg, ABCSeries):
1063
+ cache_array = _maybe_cache(arg, format, cache, convert_listlike)
1064
+ if not cache_array.empty:
1065
+ result = arg.map(cache_array)
1066
+ else:
1067
+ values = convert_listlike(arg._values, format)
1068
+ result = arg._constructor(values, index=arg.index, name=arg.name)
1069
+ elif isinstance(arg, (ABCDataFrame, abc.MutableMapping)):
1070
+ result = _assemble_from_unit_mappings(arg, errors, utc)
1071
+ elif isinstance(arg, Index):
1072
+ cache_array = _maybe_cache(arg, format, cache, convert_listlike)
1073
+ if not cache_array.empty:
1074
+ result = _convert_and_box_cache(arg, cache_array, name=arg.name)
1075
+ else:
1076
+ result = convert_listlike(arg, format, name=arg.name)
1077
+ elif is_list_like(arg):
1078
+ try:
1079
+ # error: Argument 1 to "_maybe_cache" has incompatible type
1080
+ # "Union[float, str, datetime, List[Any], Tuple[Any, ...], ExtensionArray,
1081
+ # ndarray[Any, Any], Series]"; expected "Union[List[Any], Tuple[Any, ...],
1082
+ # Union[Union[ExtensionArray, ndarray[Any, Any]], Index, Series], Series]"
1083
+ argc = cast(
1084
+ Union[list, tuple, ExtensionArray, np.ndarray, "Series", Index], arg
1085
+ )
1086
+ cache_array = _maybe_cache(argc, format, cache, convert_listlike)
1087
+ except OutOfBoundsDatetime:
1088
+ # caching attempts to create a DatetimeIndex, which may raise
1089
+ # an OOB. If that's the desired behavior, then just reraise...
1090
+ if errors == "raise":
1091
+ raise
1092
+ # ... otherwise, continue without the cache.
1093
+ from pandas import Series
1094
+
1095
+ cache_array = Series([], dtype=object) # just an empty array
1096
+ if not cache_array.empty:
1097
+ result = _convert_and_box_cache(argc, cache_array)
1098
+ else:
1099
+ result = convert_listlike(argc, format)
1100
+ else:
1101
+ result = convert_listlike(np.array([arg]), format)[0]
1102
+ if isinstance(arg, bool) and isinstance(result, np.bool_):
1103
+ result = bool(result) # TODO: avoid this kludge.
1104
+
1105
+ # error: Incompatible return value type (got "Union[Timestamp, NaTType,
1106
+ # Series, Index]", expected "Union[DatetimeIndex, Series, float, str,
1107
+ # NaTType, None]")
1108
+ return result # type: ignore[return-value]
1109
+
1110
+
1111
+ # mappings for assembling units
1112
+ _unit_map = {
1113
+ "year": "year",
1114
+ "years": "year",
1115
+ "month": "month",
1116
+ "months": "month",
1117
+ "day": "day",
1118
+ "days": "day",
1119
+ "hour": "h",
1120
+ "hours": "h",
1121
+ "minute": "m",
1122
+ "minutes": "m",
1123
+ "second": "s",
1124
+ "seconds": "s",
1125
+ "ms": "ms",
1126
+ "millisecond": "ms",
1127
+ "milliseconds": "ms",
1128
+ "us": "us",
1129
+ "microsecond": "us",
1130
+ "microseconds": "us",
1131
+ "ns": "ns",
1132
+ "nanosecond": "ns",
1133
+ "nanoseconds": "ns",
1134
+ }
1135
+
1136
+
1137
+ def _assemble_from_unit_mappings(arg, errors: DateTimeErrorChoices, utc: bool):
1138
+ """
1139
+ assemble the unit specified fields from the arg (DataFrame)
1140
+ Return a Series for actual parsing
1141
+
1142
+ Parameters
1143
+ ----------
1144
+ arg : DataFrame
1145
+ errors : {'ignore', 'raise', 'coerce'}, default 'raise'
1146
+
1147
+ - If :const:`'raise'`, then invalid parsing will raise an exception
1148
+ - If :const:`'coerce'`, then invalid parsing will be set as :const:`NaT`
1149
+ - If :const:`'ignore'`, then invalid parsing will return the input
1150
+ utc : bool
1151
+ Whether to convert/localize timestamps to UTC.
1152
+
1153
+ Returns
1154
+ -------
1155
+ Series
1156
+ """
1157
+ from pandas import (
1158
+ DataFrame,
1159
+ to_numeric,
1160
+ to_timedelta,
1161
+ )
1162
+
1163
+ arg = DataFrame(arg)
1164
+ if not arg.columns.is_unique:
1165
+ raise ValueError("cannot assemble with duplicate keys")
1166
+
1167
+ # replace passed unit with _unit_map
1168
+ def f(value):
1169
+ if value in _unit_map:
1170
+ return _unit_map[value]
1171
+
1172
+ # m is case significant
1173
+ if value.lower() in _unit_map:
1174
+ return _unit_map[value.lower()]
1175
+
1176
+ return value
1177
+
1178
+ unit = {k: f(k) for k in arg.keys()}
1179
+ unit_rev = {v: k for k, v in unit.items()}
1180
+
1181
+ # we require at least Ymd
1182
+ required = ["year", "month", "day"]
1183
+ req = sorted(set(required) - set(unit_rev.keys()))
1184
+ if len(req):
1185
+ _required = ",".join(req)
1186
+ raise ValueError(
1187
+ "to assemble mappings requires at least that "
1188
+ f"[year, month, day] be specified: [{_required}] is missing"
1189
+ )
1190
+
1191
+ # keys we don't recognize
1192
+ excess = sorted(set(unit_rev.keys()) - set(_unit_map.values()))
1193
+ if len(excess):
1194
+ _excess = ",".join(excess)
1195
+ raise ValueError(
1196
+ f"extra keys have been passed to the datetime assemblage: [{_excess}]"
1197
+ )
1198
+
1199
+ def coerce(values):
1200
+ # we allow coercion to if errors allows
1201
+ values = to_numeric(values, errors=errors)
1202
+
1203
+ # prevent overflow in case of int8 or int16
1204
+ if is_integer_dtype(values.dtype):
1205
+ values = values.astype("int64", copy=False)
1206
+ return values
1207
+
1208
+ values = (
1209
+ coerce(arg[unit_rev["year"]]) * 10000
1210
+ + coerce(arg[unit_rev["month"]]) * 100
1211
+ + coerce(arg[unit_rev["day"]])
1212
+ )
1213
+ try:
1214
+ values = to_datetime(values, format="%Y%m%d", errors=errors, utc=utc)
1215
+ except (TypeError, ValueError) as err:
1216
+ raise ValueError(f"cannot assemble the datetimes: {err}") from err
1217
+
1218
+ units: list[UnitChoices] = ["h", "m", "s", "ms", "us", "ns"]
1219
+ for u in units:
1220
+ value = unit_rev.get(u)
1221
+ if value is not None and value in arg:
1222
+ try:
1223
+ values += to_timedelta(coerce(arg[value]), unit=u, errors=errors)
1224
+ except (TypeError, ValueError) as err:
1225
+ raise ValueError(
1226
+ f"cannot assemble the datetimes [{value}]: {err}"
1227
+ ) from err
1228
+ return values
1229
+
1230
+
1231
+ __all__ = [
1232
+ "DateParseError",
1233
+ "should_cache",
1234
+ "to_datetime",
1235
+ ]
env-llmeval/lib/python3.10/site-packages/pandas/core/tools/numeric.py ADDED
@@ -0,0 +1,329 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from typing import (
4
+ TYPE_CHECKING,
5
+ Literal,
6
+ )
7
+ import warnings
8
+
9
+ import numpy as np
10
+
11
+ from pandas._libs import lib
12
+ from pandas.util._exceptions import find_stack_level
13
+ from pandas.util._validators import check_dtype_backend
14
+
15
+ from pandas.core.dtypes.cast import maybe_downcast_numeric
16
+ from pandas.core.dtypes.common import (
17
+ ensure_object,
18
+ is_bool_dtype,
19
+ is_decimal,
20
+ is_integer_dtype,
21
+ is_number,
22
+ is_numeric_dtype,
23
+ is_scalar,
24
+ is_string_dtype,
25
+ needs_i8_conversion,
26
+ )
27
+ from pandas.core.dtypes.dtypes import ArrowDtype
28
+ from pandas.core.dtypes.generic import (
29
+ ABCIndex,
30
+ ABCSeries,
31
+ )
32
+
33
+ from pandas.core.arrays import BaseMaskedArray
34
+ from pandas.core.arrays.string_ import StringDtype
35
+
36
+ if TYPE_CHECKING:
37
+ from pandas._typing import (
38
+ DateTimeErrorChoices,
39
+ DtypeBackend,
40
+ npt,
41
+ )
42
+
43
+
44
+ def to_numeric(
45
+ arg,
46
+ errors: DateTimeErrorChoices = "raise",
47
+ downcast: Literal["integer", "signed", "unsigned", "float"] | None = None,
48
+ dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default,
49
+ ):
50
+ """
51
+ Convert argument to a numeric type.
52
+
53
+ The default return dtype is `float64` or `int64`
54
+ depending on the data supplied. Use the `downcast` parameter
55
+ to obtain other dtypes.
56
+
57
+ Please note that precision loss may occur if really large numbers
58
+ are passed in. Due to the internal limitations of `ndarray`, if
59
+ numbers smaller than `-9223372036854775808` (np.iinfo(np.int64).min)
60
+ or larger than `18446744073709551615` (np.iinfo(np.uint64).max) are
61
+ passed in, it is very likely they will be converted to float so that
62
+ they can be stored in an `ndarray`. These warnings apply similarly to
63
+ `Series` since it internally leverages `ndarray`.
64
+
65
+ Parameters
66
+ ----------
67
+ arg : scalar, list, tuple, 1-d array, or Series
68
+ Argument to be converted.
69
+ errors : {'ignore', 'raise', 'coerce'}, default 'raise'
70
+ - If 'raise', then invalid parsing will raise an exception.
71
+ - If 'coerce', then invalid parsing will be set as NaN.
72
+ - If 'ignore', then invalid parsing will return the input.
73
+
74
+ .. versionchanged:: 2.2
75
+
76
+ "ignore" is deprecated. Catch exceptions explicitly instead.
77
+
78
+ downcast : str, default None
79
+ Can be 'integer', 'signed', 'unsigned', or 'float'.
80
+ If not None, and if the data has been successfully cast to a
81
+ numerical dtype (or if the data was numeric to begin with),
82
+ downcast that resulting data to the smallest numerical dtype
83
+ possible according to the following rules:
84
+
85
+ - 'integer' or 'signed': smallest signed int dtype (min.: np.int8)
86
+ - 'unsigned': smallest unsigned int dtype (min.: np.uint8)
87
+ - 'float': smallest float dtype (min.: np.float32)
88
+
89
+ As this behaviour is separate from the core conversion to
90
+ numeric values, any errors raised during the downcasting
91
+ will be surfaced regardless of the value of the 'errors' input.
92
+
93
+ In addition, downcasting will only occur if the size
94
+ of the resulting data's dtype is strictly larger than
95
+ the dtype it is to be cast to, so if none of the dtypes
96
+ checked satisfy that specification, no downcasting will be
97
+ performed on the data.
98
+ dtype_backend : {'numpy_nullable', 'pyarrow'}, default 'numpy_nullable'
99
+ Back-end data type applied to the resultant :class:`DataFrame`
100
+ (still experimental). Behaviour is as follows:
101
+
102
+ * ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame`
103
+ (default).
104
+ * ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype`
105
+ DataFrame.
106
+
107
+ .. versionadded:: 2.0
108
+
109
+ Returns
110
+ -------
111
+ ret
112
+ Numeric if parsing succeeded.
113
+ Return type depends on input. Series if Series, otherwise ndarray.
114
+
115
+ See Also
116
+ --------
117
+ DataFrame.astype : Cast argument to a specified dtype.
118
+ to_datetime : Convert argument to datetime.
119
+ to_timedelta : Convert argument to timedelta.
120
+ numpy.ndarray.astype : Cast a numpy array to a specified type.
121
+ DataFrame.convert_dtypes : Convert dtypes.
122
+
123
+ Examples
124
+ --------
125
+ Take separate series and convert to numeric, coercing when told to
126
+
127
+ >>> s = pd.Series(['1.0', '2', -3])
128
+ >>> pd.to_numeric(s)
129
+ 0 1.0
130
+ 1 2.0
131
+ 2 -3.0
132
+ dtype: float64
133
+ >>> pd.to_numeric(s, downcast='float')
134
+ 0 1.0
135
+ 1 2.0
136
+ 2 -3.0
137
+ dtype: float32
138
+ >>> pd.to_numeric(s, downcast='signed')
139
+ 0 1
140
+ 1 2
141
+ 2 -3
142
+ dtype: int8
143
+ >>> s = pd.Series(['apple', '1.0', '2', -3])
144
+ >>> pd.to_numeric(s, errors='coerce')
145
+ 0 NaN
146
+ 1 1.0
147
+ 2 2.0
148
+ 3 -3.0
149
+ dtype: float64
150
+
151
+ Downcasting of nullable integer and floating dtypes is supported:
152
+
153
+ >>> s = pd.Series([1, 2, 3], dtype="Int64")
154
+ >>> pd.to_numeric(s, downcast="integer")
155
+ 0 1
156
+ 1 2
157
+ 2 3
158
+ dtype: Int8
159
+ >>> s = pd.Series([1.0, 2.1, 3.0], dtype="Float64")
160
+ >>> pd.to_numeric(s, downcast="float")
161
+ 0 1.0
162
+ 1 2.1
163
+ 2 3.0
164
+ dtype: Float32
165
+ """
166
+ if downcast not in (None, "integer", "signed", "unsigned", "float"):
167
+ raise ValueError("invalid downcasting method provided")
168
+
169
+ if errors not in ("ignore", "raise", "coerce"):
170
+ raise ValueError("invalid error value specified")
171
+ if errors == "ignore":
172
+ # GH#54467
173
+ warnings.warn(
174
+ "errors='ignore' is deprecated and will raise in a future version. "
175
+ "Use to_numeric without passing `errors` and catch exceptions "
176
+ "explicitly instead",
177
+ FutureWarning,
178
+ stacklevel=find_stack_level(),
179
+ )
180
+
181
+ check_dtype_backend(dtype_backend)
182
+
183
+ is_series = False
184
+ is_index = False
185
+ is_scalars = False
186
+
187
+ if isinstance(arg, ABCSeries):
188
+ is_series = True
189
+ values = arg.values
190
+ elif isinstance(arg, ABCIndex):
191
+ is_index = True
192
+ if needs_i8_conversion(arg.dtype):
193
+ values = arg.view("i8")
194
+ else:
195
+ values = arg.values
196
+ elif isinstance(arg, (list, tuple)):
197
+ values = np.array(arg, dtype="O")
198
+ elif is_scalar(arg):
199
+ if is_decimal(arg):
200
+ return float(arg)
201
+ if is_number(arg):
202
+ return arg
203
+ is_scalars = True
204
+ values = np.array([arg], dtype="O")
205
+ elif getattr(arg, "ndim", 1) > 1:
206
+ raise TypeError("arg must be a list, tuple, 1-d array, or Series")
207
+ else:
208
+ values = arg
209
+
210
+ orig_values = values
211
+
212
+ # GH33013: for IntegerArray & FloatingArray extract non-null values for casting
213
+ # save mask to reconstruct the full array after casting
214
+ mask: npt.NDArray[np.bool_] | None = None
215
+ if isinstance(values, BaseMaskedArray):
216
+ mask = values._mask
217
+ values = values._data[~mask]
218
+
219
+ values_dtype = getattr(values, "dtype", None)
220
+ if isinstance(values_dtype, ArrowDtype):
221
+ mask = values.isna()
222
+ values = values.dropna().to_numpy()
223
+ new_mask: np.ndarray | None = None
224
+ if is_numeric_dtype(values_dtype):
225
+ pass
226
+ elif lib.is_np_dtype(values_dtype, "mM"):
227
+ values = values.view(np.int64)
228
+ else:
229
+ values = ensure_object(values)
230
+ coerce_numeric = errors not in ("ignore", "raise")
231
+ try:
232
+ values, new_mask = lib.maybe_convert_numeric( # type: ignore[call-overload]
233
+ values,
234
+ set(),
235
+ coerce_numeric=coerce_numeric,
236
+ convert_to_masked_nullable=dtype_backend is not lib.no_default
237
+ or isinstance(values_dtype, StringDtype)
238
+ and not values_dtype.storage == "pyarrow_numpy",
239
+ )
240
+ except (ValueError, TypeError):
241
+ if errors == "raise":
242
+ raise
243
+ values = orig_values
244
+
245
+ if new_mask is not None:
246
+ # Remove unnecessary values, is expected later anyway and enables
247
+ # downcasting
248
+ values = values[~new_mask]
249
+ elif (
250
+ dtype_backend is not lib.no_default
251
+ and new_mask is None
252
+ or isinstance(values_dtype, StringDtype)
253
+ and not values_dtype.storage == "pyarrow_numpy"
254
+ ):
255
+ new_mask = np.zeros(values.shape, dtype=np.bool_)
256
+
257
+ # attempt downcast only if the data has been successfully converted
258
+ # to a numerical dtype and if a downcast method has been specified
259
+ if downcast is not None and is_numeric_dtype(values.dtype):
260
+ typecodes: str | None = None
261
+
262
+ if downcast in ("integer", "signed"):
263
+ typecodes = np.typecodes["Integer"]
264
+ elif downcast == "unsigned" and (not len(values) or np.min(values) >= 0):
265
+ typecodes = np.typecodes["UnsignedInteger"]
266
+ elif downcast == "float":
267
+ typecodes = np.typecodes["Float"]
268
+
269
+ # pandas support goes only to np.float32,
270
+ # as float dtypes smaller than that are
271
+ # extremely rare and not well supported
272
+ float_32_char = np.dtype(np.float32).char
273
+ float_32_ind = typecodes.index(float_32_char)
274
+ typecodes = typecodes[float_32_ind:]
275
+
276
+ if typecodes is not None:
277
+ # from smallest to largest
278
+ for typecode in typecodes:
279
+ dtype = np.dtype(typecode)
280
+ if dtype.itemsize <= values.dtype.itemsize:
281
+ values = maybe_downcast_numeric(values, dtype)
282
+
283
+ # successful conversion
284
+ if values.dtype == dtype:
285
+ break
286
+
287
+ # GH33013: for IntegerArray, BooleanArray & FloatingArray need to reconstruct
288
+ # masked array
289
+ if (mask is not None or new_mask is not None) and not is_string_dtype(values.dtype):
290
+ if mask is None or (new_mask is not None and new_mask.shape == mask.shape):
291
+ # GH 52588
292
+ mask = new_mask
293
+ else:
294
+ mask = mask.copy()
295
+ assert isinstance(mask, np.ndarray)
296
+ data = np.zeros(mask.shape, dtype=values.dtype)
297
+ data[~mask] = values
298
+
299
+ from pandas.core.arrays import (
300
+ ArrowExtensionArray,
301
+ BooleanArray,
302
+ FloatingArray,
303
+ IntegerArray,
304
+ )
305
+
306
+ klass: type[IntegerArray | BooleanArray | FloatingArray]
307
+ if is_integer_dtype(data.dtype):
308
+ klass = IntegerArray
309
+ elif is_bool_dtype(data.dtype):
310
+ klass = BooleanArray
311
+ else:
312
+ klass = FloatingArray
313
+ values = klass(data, mask)
314
+
315
+ if dtype_backend == "pyarrow" or isinstance(values_dtype, ArrowDtype):
316
+ values = ArrowExtensionArray(values.__arrow_array__())
317
+
318
+ if is_series:
319
+ return arg._constructor(values, index=arg.index, name=arg.name)
320
+ elif is_index:
321
+ # because we want to coerce to numeric if possible,
322
+ # do not use _shallow_copy
323
+ from pandas import Index
324
+
325
+ return Index(values, name=arg.name)
326
+ elif is_scalars:
327
+ return values[0]
328
+ else:
329
+ return values
env-llmeval/lib/python3.10/site-packages/pandas/core/window/__init__.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pandas.core.window.ewm import (
2
+ ExponentialMovingWindow,
3
+ ExponentialMovingWindowGroupby,
4
+ )
5
+ from pandas.core.window.expanding import (
6
+ Expanding,
7
+ ExpandingGroupby,
8
+ )
9
+ from pandas.core.window.rolling import (
10
+ Rolling,
11
+ RollingGroupby,
12
+ Window,
13
+ )
14
+
15
+ __all__ = [
16
+ "Expanding",
17
+ "ExpandingGroupby",
18
+ "ExponentialMovingWindow",
19
+ "ExponentialMovingWindowGroupby",
20
+ "Rolling",
21
+ "RollingGroupby",
22
+ "Window",
23
+ ]
env-llmeval/lib/python3.10/site-packages/pandas/core/window/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (539 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/window/__pycache__/common.cpython-310.pyc ADDED
Binary file (4 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/window/__pycache__/doc.cpython-310.pyc ADDED
Binary file (4.44 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/window/__pycache__/ewm.cpython-310.pyc ADDED
Binary file (27.7 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/window/__pycache__/expanding.cpython-310.pyc ADDED
Binary file (20.1 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/window/__pycache__/numba_.cpython-310.pyc ADDED
Binary file (7.58 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/window/__pycache__/online.cpython-310.pyc ADDED
Binary file (3.17 kB). View file