applied-ai-018 commited on
Commit
70e71e5
·
verified ·
1 Parent(s): 1265bcd

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/__init__.cpython-310.pyc +0 -0
  2. env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/_ranges.cpython-310.pyc +0 -0
  3. env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/_utils.cpython-310.pyc +0 -0
  4. env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/categorical.cpython-310.pyc +0 -0
  5. env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/floating.cpython-310.pyc +0 -0
  6. env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/masked.cpython-310.pyc +0 -0
  7. env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/numeric.cpython-310.pyc +0 -0
  8. env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/numpy_.cpython-310.pyc +0 -0
  9. env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/period.cpython-310.pyc +0 -0
  10. env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/string_arrow.cpython-310.pyc +0 -0
  11. env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/arrow/__init__.py +7 -0
  12. env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/arrow/__pycache__/__init__.cpython-310.pyc +0 -0
  13. env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/arrow/__pycache__/_arrow_utils.cpython-310.pyc +0 -0
  14. env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/arrow/__pycache__/accessors.cpython-310.pyc +0 -0
  15. env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/arrow/__pycache__/array.cpython-310.pyc +0 -0
  16. env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/arrow/__pycache__/extension_types.cpython-310.pyc +0 -0
  17. env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/arrow/_arrow_utils.py +66 -0
  18. env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/arrow/accessors.py +473 -0
  19. env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/arrow/array.py +0 -0
  20. env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/arrow/extension_types.py +174 -0
  21. env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/sparse/__init__.py +19 -0
  22. env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/sparse/__pycache__/__init__.cpython-310.pyc +0 -0
  23. env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/sparse/__pycache__/accessor.cpython-310.pyc +0 -0
  24. env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/sparse/__pycache__/array.cpython-310.pyc +0 -0
  25. env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/sparse/__pycache__/scipy_sparse.cpython-310.pyc +0 -0
  26. env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/sparse/accessor.py +414 -0
  27. env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/sparse/scipy_sparse.py +207 -0
  28. env-llmeval/lib/python3.10/site-packages/pandas/core/interchange/__init__.py +0 -0
  29. env-llmeval/lib/python3.10/site-packages/pandas/core/interchange/__pycache__/__init__.cpython-310.pyc +0 -0
  30. env-llmeval/lib/python3.10/site-packages/pandas/core/interchange/__pycache__/buffer.cpython-310.pyc +0 -0
  31. env-llmeval/lib/python3.10/site-packages/pandas/core/interchange/__pycache__/column.cpython-310.pyc +0 -0
  32. env-llmeval/lib/python3.10/site-packages/pandas/core/interchange/__pycache__/dataframe.cpython-310.pyc +0 -0
  33. env-llmeval/lib/python3.10/site-packages/pandas/core/interchange/__pycache__/dataframe_protocol.cpython-310.pyc +0 -0
  34. env-llmeval/lib/python3.10/site-packages/pandas/core/interchange/__pycache__/from_dataframe.cpython-310.pyc +0 -0
  35. env-llmeval/lib/python3.10/site-packages/pandas/core/interchange/__pycache__/utils.cpython-310.pyc +0 -0
  36. env-llmeval/lib/python3.10/site-packages/pandas/core/interchange/buffer.py +136 -0
  37. env-llmeval/lib/python3.10/site-packages/pandas/core/interchange/column.py +461 -0
  38. env-llmeval/lib/python3.10/site-packages/pandas/core/interchange/dataframe.py +113 -0
  39. env-llmeval/lib/python3.10/site-packages/pandas/core/interchange/dataframe_protocol.py +465 -0
  40. env-llmeval/lib/python3.10/site-packages/pandas/core/interchange/from_dataframe.py +526 -0
  41. env-llmeval/lib/python3.10/site-packages/pandas/core/interchange/utils.py +178 -0
  42. env-llmeval/lib/python3.10/site-packages/pandas/core/ops/__init__.py +93 -0
  43. env-llmeval/lib/python3.10/site-packages/pandas/core/ops/__pycache__/array_ops.cpython-310.pyc +0 -0
  44. env-llmeval/lib/python3.10/site-packages/pandas/core/ops/__pycache__/common.cpython-310.pyc +0 -0
  45. env-llmeval/lib/python3.10/site-packages/pandas/core/ops/__pycache__/docstrings.cpython-310.pyc +0 -0
  46. env-llmeval/lib/python3.10/site-packages/pandas/core/ops/__pycache__/invalid.cpython-310.pyc +0 -0
  47. env-llmeval/lib/python3.10/site-packages/pandas/core/ops/__pycache__/mask_ops.cpython-310.pyc +0 -0
  48. env-llmeval/lib/python3.10/site-packages/pandas/core/ops/__pycache__/missing.cpython-310.pyc +0 -0
  49. env-llmeval/lib/python3.10/site-packages/pandas/core/ops/array_ops.py +604 -0
  50. env-llmeval/lib/python3.10/site-packages/pandas/core/ops/common.py +146 -0
env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.36 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/_ranges.cpython-310.pyc ADDED
Binary file (4.84 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/_utils.cpython-310.pyc ADDED
Binary file (1.4 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/categorical.cpython-310.pyc ADDED
Binary file (81.2 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/floating.cpython-310.pyc ADDED
Binary file (4.69 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/masked.cpython-310.pyc ADDED
Binary file (41.4 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/numeric.cpython-310.pyc ADDED
Binary file (7.39 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/numpy_.cpython-310.pyc ADDED
Binary file (12.5 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/period.cpython-310.pyc ADDED
Binary file (32.7 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/string_arrow.cpython-310.pyc ADDED
Binary file (20.5 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/arrow/__init__.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ from pandas.core.arrays.arrow.accessors import (
2
+ ListAccessor,
3
+ StructAccessor,
4
+ )
5
+ from pandas.core.arrays.arrow.array import ArrowExtensionArray
6
+
7
+ __all__ = ["ArrowExtensionArray", "StructAccessor", "ListAccessor"]
env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/arrow/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (395 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/arrow/__pycache__/_arrow_utils.cpython-310.pyc ADDED
Binary file (2.06 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/arrow/__pycache__/accessors.cpython-310.pyc ADDED
Binary file (13.3 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/arrow/__pycache__/array.cpython-310.pyc ADDED
Binary file (82.6 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/arrow/__pycache__/extension_types.cpython-310.pyc ADDED
Binary file (6.29 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/arrow/_arrow_utils.py ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import warnings
4
+
5
+ import numpy as np
6
+ import pyarrow
7
+
8
+ from pandas.errors import PerformanceWarning
9
+ from pandas.util._exceptions import find_stack_level
10
+
11
+
12
+ def fallback_performancewarning(version: str | None = None) -> None:
13
+ """
14
+ Raise a PerformanceWarning for falling back to ExtensionArray's
15
+ non-pyarrow method
16
+ """
17
+ msg = "Falling back on a non-pyarrow code path which may decrease performance."
18
+ if version is not None:
19
+ msg += f" Upgrade to pyarrow >={version} to possibly suppress this warning."
20
+ warnings.warn(msg, PerformanceWarning, stacklevel=find_stack_level())
21
+
22
+
23
+ def pyarrow_array_to_numpy_and_mask(
24
+ arr, dtype: np.dtype
25
+ ) -> tuple[np.ndarray, np.ndarray]:
26
+ """
27
+ Convert a primitive pyarrow.Array to a numpy array and boolean mask based
28
+ on the buffers of the Array.
29
+
30
+ At the moment pyarrow.BooleanArray is not supported.
31
+
32
+ Parameters
33
+ ----------
34
+ arr : pyarrow.Array
35
+ dtype : numpy.dtype
36
+
37
+ Returns
38
+ -------
39
+ (data, mask)
40
+ Tuple of two numpy arrays with the raw data (with specified dtype) and
41
+ a boolean mask (validity mask, so False means missing)
42
+ """
43
+ dtype = np.dtype(dtype)
44
+
45
+ if pyarrow.types.is_null(arr.type):
46
+ # No initialization of data is needed since everything is null
47
+ data = np.empty(len(arr), dtype=dtype)
48
+ mask = np.zeros(len(arr), dtype=bool)
49
+ return data, mask
50
+ buflist = arr.buffers()
51
+ # Since Arrow buffers might contain padding and the data might be offset,
52
+ # the buffer gets sliced here before handing it to numpy.
53
+ # See also https://github.com/pandas-dev/pandas/issues/40896
54
+ offset = arr.offset * dtype.itemsize
55
+ length = len(arr) * dtype.itemsize
56
+ data_buf = buflist[1][offset : offset + length]
57
+ data = np.frombuffer(data_buf, dtype=dtype)
58
+ bitmask = buflist[0]
59
+ if bitmask is not None:
60
+ mask = pyarrow.BooleanArray.from_buffers(
61
+ pyarrow.bool_(), len(arr), [None, bitmask], offset=arr.offset
62
+ )
63
+ mask = np.asarray(mask)
64
+ else:
65
+ mask = np.ones(len(arr), dtype=bool)
66
+ return data, mask
env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/arrow/accessors.py ADDED
@@ -0,0 +1,473 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Accessors for arrow-backed data."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from abc import (
6
+ ABCMeta,
7
+ abstractmethod,
8
+ )
9
+ from typing import (
10
+ TYPE_CHECKING,
11
+ cast,
12
+ )
13
+
14
+ from pandas.compat import (
15
+ pa_version_under10p1,
16
+ pa_version_under11p0,
17
+ )
18
+
19
+ from pandas.core.dtypes.common import is_list_like
20
+
21
+ if not pa_version_under10p1:
22
+ import pyarrow as pa
23
+ import pyarrow.compute as pc
24
+
25
+ from pandas.core.dtypes.dtypes import ArrowDtype
26
+
27
+ if TYPE_CHECKING:
28
+ from collections.abc import Iterator
29
+
30
+ from pandas import (
31
+ DataFrame,
32
+ Series,
33
+ )
34
+
35
+
36
+ class ArrowAccessor(metaclass=ABCMeta):
37
+ @abstractmethod
38
+ def __init__(self, data, validation_msg: str) -> None:
39
+ self._data = data
40
+ self._validation_msg = validation_msg
41
+ self._validate(data)
42
+
43
+ @abstractmethod
44
+ def _is_valid_pyarrow_dtype(self, pyarrow_dtype) -> bool:
45
+ pass
46
+
47
+ def _validate(self, data):
48
+ dtype = data.dtype
49
+ if not isinstance(dtype, ArrowDtype):
50
+ # Raise AttributeError so that inspect can handle non-struct Series.
51
+ raise AttributeError(self._validation_msg.format(dtype=dtype))
52
+
53
+ if not self._is_valid_pyarrow_dtype(dtype.pyarrow_dtype):
54
+ # Raise AttributeError so that inspect can handle invalid Series.
55
+ raise AttributeError(self._validation_msg.format(dtype=dtype))
56
+
57
+ @property
58
+ def _pa_array(self):
59
+ return self._data.array._pa_array
60
+
61
+
62
+ class ListAccessor(ArrowAccessor):
63
+ """
64
+ Accessor object for list data properties of the Series values.
65
+
66
+ Parameters
67
+ ----------
68
+ data : Series
69
+ Series containing Arrow list data.
70
+ """
71
+
72
+ def __init__(self, data=None) -> None:
73
+ super().__init__(
74
+ data,
75
+ validation_msg="Can only use the '.list' accessor with "
76
+ "'list[pyarrow]' dtype, not {dtype}.",
77
+ )
78
+
79
+ def _is_valid_pyarrow_dtype(self, pyarrow_dtype) -> bool:
80
+ return (
81
+ pa.types.is_list(pyarrow_dtype)
82
+ or pa.types.is_fixed_size_list(pyarrow_dtype)
83
+ or pa.types.is_large_list(pyarrow_dtype)
84
+ )
85
+
86
+ def len(self) -> Series:
87
+ """
88
+ Return the length of each list in the Series.
89
+
90
+ Returns
91
+ -------
92
+ pandas.Series
93
+ The length of each list.
94
+
95
+ Examples
96
+ --------
97
+ >>> import pyarrow as pa
98
+ >>> s = pd.Series(
99
+ ... [
100
+ ... [1, 2, 3],
101
+ ... [3],
102
+ ... ],
103
+ ... dtype=pd.ArrowDtype(pa.list_(
104
+ ... pa.int64()
105
+ ... ))
106
+ ... )
107
+ >>> s.list.len()
108
+ 0 3
109
+ 1 1
110
+ dtype: int32[pyarrow]
111
+ """
112
+ from pandas import Series
113
+
114
+ value_lengths = pc.list_value_length(self._pa_array)
115
+ return Series(value_lengths, dtype=ArrowDtype(value_lengths.type))
116
+
117
+ def __getitem__(self, key: int | slice) -> Series:
118
+ """
119
+ Index or slice lists in the Series.
120
+
121
+ Parameters
122
+ ----------
123
+ key : int | slice
124
+ Index or slice of indices to access from each list.
125
+
126
+ Returns
127
+ -------
128
+ pandas.Series
129
+ The list at requested index.
130
+
131
+ Examples
132
+ --------
133
+ >>> import pyarrow as pa
134
+ >>> s = pd.Series(
135
+ ... [
136
+ ... [1, 2, 3],
137
+ ... [3],
138
+ ... ],
139
+ ... dtype=pd.ArrowDtype(pa.list_(
140
+ ... pa.int64()
141
+ ... ))
142
+ ... )
143
+ >>> s.list[0]
144
+ 0 1
145
+ 1 3
146
+ dtype: int64[pyarrow]
147
+ """
148
+ from pandas import Series
149
+
150
+ if isinstance(key, int):
151
+ # TODO: Support negative key but pyarrow does not allow
152
+ # element index to be an array.
153
+ # if key < 0:
154
+ # key = pc.add(key, pc.list_value_length(self._pa_array))
155
+ element = pc.list_element(self._pa_array, key)
156
+ return Series(element, dtype=ArrowDtype(element.type))
157
+ elif isinstance(key, slice):
158
+ if pa_version_under11p0:
159
+ raise NotImplementedError(
160
+ f"List slice not supported by pyarrow {pa.__version__}."
161
+ )
162
+
163
+ # TODO: Support negative start/stop/step, ideally this would be added
164
+ # upstream in pyarrow.
165
+ start, stop, step = key.start, key.stop, key.step
166
+ if start is None:
167
+ # TODO: When adding negative step support
168
+ # this should be setto last element of array
169
+ # when step is negative.
170
+ start = 0
171
+ if step is None:
172
+ step = 1
173
+ sliced = pc.list_slice(self._pa_array, start, stop, step)
174
+ return Series(sliced, dtype=ArrowDtype(sliced.type))
175
+ else:
176
+ raise ValueError(f"key must be an int or slice, got {type(key).__name__}")
177
+
178
+ def __iter__(self) -> Iterator:
179
+ raise TypeError(f"'{type(self).__name__}' object is not iterable")
180
+
181
+ def flatten(self) -> Series:
182
+ """
183
+ Flatten list values.
184
+
185
+ Returns
186
+ -------
187
+ pandas.Series
188
+ The data from all lists in the series flattened.
189
+
190
+ Examples
191
+ --------
192
+ >>> import pyarrow as pa
193
+ >>> s = pd.Series(
194
+ ... [
195
+ ... [1, 2, 3],
196
+ ... [3],
197
+ ... ],
198
+ ... dtype=pd.ArrowDtype(pa.list_(
199
+ ... pa.int64()
200
+ ... ))
201
+ ... )
202
+ >>> s.list.flatten()
203
+ 0 1
204
+ 1 2
205
+ 2 3
206
+ 3 3
207
+ dtype: int64[pyarrow]
208
+ """
209
+ from pandas import Series
210
+
211
+ flattened = pc.list_flatten(self._pa_array)
212
+ return Series(flattened, dtype=ArrowDtype(flattened.type))
213
+
214
+
215
+ class StructAccessor(ArrowAccessor):
216
+ """
217
+ Accessor object for structured data properties of the Series values.
218
+
219
+ Parameters
220
+ ----------
221
+ data : Series
222
+ Series containing Arrow struct data.
223
+ """
224
+
225
+ def __init__(self, data=None) -> None:
226
+ super().__init__(
227
+ data,
228
+ validation_msg=(
229
+ "Can only use the '.struct' accessor with 'struct[pyarrow]' "
230
+ "dtype, not {dtype}."
231
+ ),
232
+ )
233
+
234
+ def _is_valid_pyarrow_dtype(self, pyarrow_dtype) -> bool:
235
+ return pa.types.is_struct(pyarrow_dtype)
236
+
237
+ @property
238
+ def dtypes(self) -> Series:
239
+ """
240
+ Return the dtype object of each child field of the struct.
241
+
242
+ Returns
243
+ -------
244
+ pandas.Series
245
+ The data type of each child field.
246
+
247
+ Examples
248
+ --------
249
+ >>> import pyarrow as pa
250
+ >>> s = pd.Series(
251
+ ... [
252
+ ... {"version": 1, "project": "pandas"},
253
+ ... {"version": 2, "project": "pandas"},
254
+ ... {"version": 1, "project": "numpy"},
255
+ ... ],
256
+ ... dtype=pd.ArrowDtype(pa.struct(
257
+ ... [("version", pa.int64()), ("project", pa.string())]
258
+ ... ))
259
+ ... )
260
+ >>> s.struct.dtypes
261
+ version int64[pyarrow]
262
+ project string[pyarrow]
263
+ dtype: object
264
+ """
265
+ from pandas import (
266
+ Index,
267
+ Series,
268
+ )
269
+
270
+ pa_type = self._data.dtype.pyarrow_dtype
271
+ types = [ArrowDtype(struct.type) for struct in pa_type]
272
+ names = [struct.name for struct in pa_type]
273
+ return Series(types, index=Index(names))
274
+
275
+ def field(
276
+ self,
277
+ name_or_index: list[str]
278
+ | list[bytes]
279
+ | list[int]
280
+ | pc.Expression
281
+ | bytes
282
+ | str
283
+ | int,
284
+ ) -> Series:
285
+ """
286
+ Extract a child field of a struct as a Series.
287
+
288
+ Parameters
289
+ ----------
290
+ name_or_index : str | bytes | int | expression | list
291
+ Name or index of the child field to extract.
292
+
293
+ For list-like inputs, this will index into a nested
294
+ struct.
295
+
296
+ Returns
297
+ -------
298
+ pandas.Series
299
+ The data corresponding to the selected child field.
300
+
301
+ See Also
302
+ --------
303
+ Series.struct.explode : Return all child fields as a DataFrame.
304
+
305
+ Notes
306
+ -----
307
+ The name of the resulting Series will be set using the following
308
+ rules:
309
+
310
+ - For string, bytes, or integer `name_or_index` (or a list of these, for
311
+ a nested selection), the Series name is set to the selected
312
+ field's name.
313
+ - For a :class:`pyarrow.compute.Expression`, this is set to
314
+ the string form of the expression.
315
+ - For list-like `name_or_index`, the name will be set to the
316
+ name of the final field selected.
317
+
318
+ Examples
319
+ --------
320
+ >>> import pyarrow as pa
321
+ >>> s = pd.Series(
322
+ ... [
323
+ ... {"version": 1, "project": "pandas"},
324
+ ... {"version": 2, "project": "pandas"},
325
+ ... {"version": 1, "project": "numpy"},
326
+ ... ],
327
+ ... dtype=pd.ArrowDtype(pa.struct(
328
+ ... [("version", pa.int64()), ("project", pa.string())]
329
+ ... ))
330
+ ... )
331
+
332
+ Extract by field name.
333
+
334
+ >>> s.struct.field("project")
335
+ 0 pandas
336
+ 1 pandas
337
+ 2 numpy
338
+ Name: project, dtype: string[pyarrow]
339
+
340
+ Extract by field index.
341
+
342
+ >>> s.struct.field(0)
343
+ 0 1
344
+ 1 2
345
+ 2 1
346
+ Name: version, dtype: int64[pyarrow]
347
+
348
+ Or an expression
349
+
350
+ >>> import pyarrow.compute as pc
351
+ >>> s.struct.field(pc.field("project"))
352
+ 0 pandas
353
+ 1 pandas
354
+ 2 numpy
355
+ Name: project, dtype: string[pyarrow]
356
+
357
+ For nested struct types, you can pass a list of values to index
358
+ multiple levels:
359
+
360
+ >>> version_type = pa.struct([
361
+ ... ("major", pa.int64()),
362
+ ... ("minor", pa.int64()),
363
+ ... ])
364
+ >>> s = pd.Series(
365
+ ... [
366
+ ... {"version": {"major": 1, "minor": 5}, "project": "pandas"},
367
+ ... {"version": {"major": 2, "minor": 1}, "project": "pandas"},
368
+ ... {"version": {"major": 1, "minor": 26}, "project": "numpy"},
369
+ ... ],
370
+ ... dtype=pd.ArrowDtype(pa.struct(
371
+ ... [("version", version_type), ("project", pa.string())]
372
+ ... ))
373
+ ... )
374
+ >>> s.struct.field(["version", "minor"])
375
+ 0 5
376
+ 1 1
377
+ 2 26
378
+ Name: minor, dtype: int64[pyarrow]
379
+ >>> s.struct.field([0, 0])
380
+ 0 1
381
+ 1 2
382
+ 2 1
383
+ Name: major, dtype: int64[pyarrow]
384
+ """
385
+ from pandas import Series
386
+
387
+ def get_name(
388
+ level_name_or_index: list[str]
389
+ | list[bytes]
390
+ | list[int]
391
+ | pc.Expression
392
+ | bytes
393
+ | str
394
+ | int,
395
+ data: pa.ChunkedArray,
396
+ ):
397
+ if isinstance(level_name_or_index, int):
398
+ name = data.type.field(level_name_or_index).name
399
+ elif isinstance(level_name_or_index, (str, bytes)):
400
+ name = level_name_or_index
401
+ elif isinstance(level_name_or_index, pc.Expression):
402
+ name = str(level_name_or_index)
403
+ elif is_list_like(level_name_or_index):
404
+ # For nested input like [2, 1, 2]
405
+ # iteratively get the struct and field name. The last
406
+ # one is used for the name of the index.
407
+ level_name_or_index = list(reversed(level_name_or_index))
408
+ selected = data
409
+ while level_name_or_index:
410
+ # we need the cast, otherwise mypy complains about
411
+ # getting ints, bytes, or str here, which isn't possible.
412
+ level_name_or_index = cast(list, level_name_or_index)
413
+ name_or_index = level_name_or_index.pop()
414
+ name = get_name(name_or_index, selected)
415
+ selected = selected.type.field(selected.type.get_field_index(name))
416
+ name = selected.name
417
+ else:
418
+ raise ValueError(
419
+ "name_or_index must be an int, str, bytes, "
420
+ "pyarrow.compute.Expression, or list of those"
421
+ )
422
+ return name
423
+
424
+ pa_arr = self._data.array._pa_array
425
+ name = get_name(name_or_index, pa_arr)
426
+ field_arr = pc.struct_field(pa_arr, name_or_index)
427
+
428
+ return Series(
429
+ field_arr,
430
+ dtype=ArrowDtype(field_arr.type),
431
+ index=self._data.index,
432
+ name=name,
433
+ )
434
+
435
+ def explode(self) -> DataFrame:
436
+ """
437
+ Extract all child fields of a struct as a DataFrame.
438
+
439
+ Returns
440
+ -------
441
+ pandas.DataFrame
442
+ The data corresponding to all child fields.
443
+
444
+ See Also
445
+ --------
446
+ Series.struct.field : Return a single child field as a Series.
447
+
448
+ Examples
449
+ --------
450
+ >>> import pyarrow as pa
451
+ >>> s = pd.Series(
452
+ ... [
453
+ ... {"version": 1, "project": "pandas"},
454
+ ... {"version": 2, "project": "pandas"},
455
+ ... {"version": 1, "project": "numpy"},
456
+ ... ],
457
+ ... dtype=pd.ArrowDtype(pa.struct(
458
+ ... [("version", pa.int64()), ("project", pa.string())]
459
+ ... ))
460
+ ... )
461
+
462
+ >>> s.struct.explode()
463
+ version project
464
+ 0 1 pandas
465
+ 1 2 pandas
466
+ 2 1 numpy
467
+ """
468
+ from pandas import concat
469
+
470
+ pa_type = self._pa_array.type
471
+ return concat(
472
+ [self.field(i) for i in range(pa_type.num_fields)], axis="columns"
473
+ )
env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/arrow/array.py ADDED
The diff for this file is too large to render. See raw diff
 
env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/arrow/extension_types.py ADDED
@@ -0,0 +1,174 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import json
4
+ from typing import TYPE_CHECKING
5
+
6
+ import pyarrow
7
+
8
+ from pandas.compat import pa_version_under14p1
9
+
10
+ from pandas.core.dtypes.dtypes import (
11
+ IntervalDtype,
12
+ PeriodDtype,
13
+ )
14
+
15
+ from pandas.core.arrays.interval import VALID_CLOSED
16
+
17
+ if TYPE_CHECKING:
18
+ from pandas._typing import IntervalClosedType
19
+
20
+
21
+ class ArrowPeriodType(pyarrow.ExtensionType):
22
+ def __init__(self, freq) -> None:
23
+ # attributes need to be set first before calling
24
+ # super init (as that calls serialize)
25
+ self._freq = freq
26
+ pyarrow.ExtensionType.__init__(self, pyarrow.int64(), "pandas.period")
27
+
28
+ @property
29
+ def freq(self):
30
+ return self._freq
31
+
32
+ def __arrow_ext_serialize__(self) -> bytes:
33
+ metadata = {"freq": self.freq}
34
+ return json.dumps(metadata).encode()
35
+
36
+ @classmethod
37
+ def __arrow_ext_deserialize__(cls, storage_type, serialized) -> ArrowPeriodType:
38
+ metadata = json.loads(serialized.decode())
39
+ return ArrowPeriodType(metadata["freq"])
40
+
41
+ def __eq__(self, other):
42
+ if isinstance(other, pyarrow.BaseExtensionType):
43
+ return type(self) == type(other) and self.freq == other.freq
44
+ else:
45
+ return NotImplemented
46
+
47
+ def __ne__(self, other) -> bool:
48
+ return not self == other
49
+
50
+ def __hash__(self) -> int:
51
+ return hash((str(self), self.freq))
52
+
53
+ def to_pandas_dtype(self) -> PeriodDtype:
54
+ return PeriodDtype(freq=self.freq)
55
+
56
+
57
+ # register the type with a dummy instance
58
+ _period_type = ArrowPeriodType("D")
59
+ pyarrow.register_extension_type(_period_type)
60
+
61
+
62
+ class ArrowIntervalType(pyarrow.ExtensionType):
63
+ def __init__(self, subtype, closed: IntervalClosedType) -> None:
64
+ # attributes need to be set first before calling
65
+ # super init (as that calls serialize)
66
+ assert closed in VALID_CLOSED
67
+ self._closed: IntervalClosedType = closed
68
+ if not isinstance(subtype, pyarrow.DataType):
69
+ subtype = pyarrow.type_for_alias(str(subtype))
70
+ self._subtype = subtype
71
+
72
+ storage_type = pyarrow.struct([("left", subtype), ("right", subtype)])
73
+ pyarrow.ExtensionType.__init__(self, storage_type, "pandas.interval")
74
+
75
+ @property
76
+ def subtype(self):
77
+ return self._subtype
78
+
79
+ @property
80
+ def closed(self) -> IntervalClosedType:
81
+ return self._closed
82
+
83
+ def __arrow_ext_serialize__(self) -> bytes:
84
+ metadata = {"subtype": str(self.subtype), "closed": self.closed}
85
+ return json.dumps(metadata).encode()
86
+
87
+ @classmethod
88
+ def __arrow_ext_deserialize__(cls, storage_type, serialized) -> ArrowIntervalType:
89
+ metadata = json.loads(serialized.decode())
90
+ subtype = pyarrow.type_for_alias(metadata["subtype"])
91
+ closed = metadata["closed"]
92
+ return ArrowIntervalType(subtype, closed)
93
+
94
+ def __eq__(self, other):
95
+ if isinstance(other, pyarrow.BaseExtensionType):
96
+ return (
97
+ type(self) == type(other)
98
+ and self.subtype == other.subtype
99
+ and self.closed == other.closed
100
+ )
101
+ else:
102
+ return NotImplemented
103
+
104
+ def __ne__(self, other) -> bool:
105
+ return not self == other
106
+
107
+ def __hash__(self) -> int:
108
+ return hash((str(self), str(self.subtype), self.closed))
109
+
110
+ def to_pandas_dtype(self) -> IntervalDtype:
111
+ return IntervalDtype(self.subtype.to_pandas_dtype(), self.closed)
112
+
113
+
114
+ # register the type with a dummy instance
115
+ _interval_type = ArrowIntervalType(pyarrow.int64(), "left")
116
+ pyarrow.register_extension_type(_interval_type)
117
+
118
+
119
+ _ERROR_MSG = """\
120
+ Disallowed deserialization of 'arrow.py_extension_type':
121
+ storage_type = {storage_type}
122
+ serialized = {serialized}
123
+ pickle disassembly:\n{pickle_disassembly}
124
+
125
+ Reading of untrusted Parquet or Feather files with a PyExtensionType column
126
+ allows arbitrary code execution.
127
+ If you trust this file, you can enable reading the extension type by one of:
128
+
129
+ - upgrading to pyarrow >= 14.0.1, and call `pa.PyExtensionType.set_auto_load(True)`
130
+ - install pyarrow-hotfix (`pip install pyarrow-hotfix`) and disable it by running
131
+ `import pyarrow_hotfix; pyarrow_hotfix.uninstall()`
132
+
133
+ We strongly recommend updating your Parquet/Feather files to use extension types
134
+ derived from `pyarrow.ExtensionType` instead, and register this type explicitly.
135
+ """
136
+
137
+
138
+ def patch_pyarrow():
139
+ # starting from pyarrow 14.0.1, it has its own mechanism
140
+ if not pa_version_under14p1:
141
+ return
142
+
143
+ # if https://github.com/pitrou/pyarrow-hotfix was installed and enabled
144
+ if getattr(pyarrow, "_hotfix_installed", False):
145
+ return
146
+
147
+ class ForbiddenExtensionType(pyarrow.ExtensionType):
148
+ def __arrow_ext_serialize__(self):
149
+ return b""
150
+
151
+ @classmethod
152
+ def __arrow_ext_deserialize__(cls, storage_type, serialized):
153
+ import io
154
+ import pickletools
155
+
156
+ out = io.StringIO()
157
+ pickletools.dis(serialized, out)
158
+ raise RuntimeError(
159
+ _ERROR_MSG.format(
160
+ storage_type=storage_type,
161
+ serialized=serialized,
162
+ pickle_disassembly=out.getvalue(),
163
+ )
164
+ )
165
+
166
+ pyarrow.unregister_extension_type("arrow.py_extension_type")
167
+ pyarrow.register_extension_type(
168
+ ForbiddenExtensionType(pyarrow.null(), "arrow.py_extension_type")
169
+ )
170
+
171
+ pyarrow._hotfix_installed = True
172
+
173
+
174
+ patch_pyarrow()
env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/sparse/__init__.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pandas.core.arrays.sparse.accessor import (
2
+ SparseAccessor,
3
+ SparseFrameAccessor,
4
+ )
5
+ from pandas.core.arrays.sparse.array import (
6
+ BlockIndex,
7
+ IntIndex,
8
+ SparseArray,
9
+ make_sparse_index,
10
+ )
11
+
12
+ __all__ = [
13
+ "BlockIndex",
14
+ "IntIndex",
15
+ "make_sparse_index",
16
+ "SparseAccessor",
17
+ "SparseArray",
18
+ "SparseFrameAccessor",
19
+ ]
env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/sparse/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (479 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/sparse/__pycache__/accessor.cpython-310.pyc ADDED
Binary file (13.2 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/sparse/__pycache__/array.cpython-310.pyc ADDED
Binary file (44.4 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/sparse/__pycache__/scipy_sparse.cpython-310.pyc ADDED
Binary file (6.43 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/sparse/accessor.py ADDED
@@ -0,0 +1,414 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Sparse accessor"""
2
+ from __future__ import annotations
3
+
4
+ from typing import TYPE_CHECKING
5
+
6
+ import numpy as np
7
+
8
+ from pandas.compat._optional import import_optional_dependency
9
+
10
+ from pandas.core.dtypes.cast import find_common_type
11
+ from pandas.core.dtypes.dtypes import SparseDtype
12
+
13
+ from pandas.core.accessor import (
14
+ PandasDelegate,
15
+ delegate_names,
16
+ )
17
+ from pandas.core.arrays.sparse.array import SparseArray
18
+
19
+ if TYPE_CHECKING:
20
+ from pandas import (
21
+ DataFrame,
22
+ Series,
23
+ )
24
+
25
+
26
+ class BaseAccessor:
27
+ _validation_msg = "Can only use the '.sparse' accessor with Sparse data."
28
+
29
+ def __init__(self, data=None) -> None:
30
+ self._parent = data
31
+ self._validate(data)
32
+
33
+ def _validate(self, data):
34
+ raise NotImplementedError
35
+
36
+
37
+ @delegate_names(
38
+ SparseArray, ["npoints", "density", "fill_value", "sp_values"], typ="property"
39
+ )
40
+ class SparseAccessor(BaseAccessor, PandasDelegate):
41
+ """
42
+ Accessor for SparseSparse from other sparse matrix data types.
43
+
44
+ Examples
45
+ --------
46
+ >>> ser = pd.Series([0, 0, 2, 2, 2], dtype="Sparse[int]")
47
+ >>> ser.sparse.density
48
+ 0.6
49
+ >>> ser.sparse.sp_values
50
+ array([2, 2, 2])
51
+ """
52
+
53
+ def _validate(self, data):
54
+ if not isinstance(data.dtype, SparseDtype):
55
+ raise AttributeError(self._validation_msg)
56
+
57
+ def _delegate_property_get(self, name: str, *args, **kwargs):
58
+ return getattr(self._parent.array, name)
59
+
60
+ def _delegate_method(self, name: str, *args, **kwargs):
61
+ if name == "from_coo":
62
+ return self.from_coo(*args, **kwargs)
63
+ elif name == "to_coo":
64
+ return self.to_coo(*args, **kwargs)
65
+ else:
66
+ raise ValueError
67
+
68
+ @classmethod
69
+ def from_coo(cls, A, dense_index: bool = False) -> Series:
70
+ """
71
+ Create a Series with sparse values from a scipy.sparse.coo_matrix.
72
+
73
+ Parameters
74
+ ----------
75
+ A : scipy.sparse.coo_matrix
76
+ dense_index : bool, default False
77
+ If False (default), the index consists of only the
78
+ coords of the non-null entries of the original coo_matrix.
79
+ If True, the index consists of the full sorted
80
+ (row, col) coordinates of the coo_matrix.
81
+
82
+ Returns
83
+ -------
84
+ s : Series
85
+ A Series with sparse values.
86
+
87
+ Examples
88
+ --------
89
+ >>> from scipy import sparse
90
+
91
+ >>> A = sparse.coo_matrix(
92
+ ... ([3.0, 1.0, 2.0], ([1, 0, 0], [0, 2, 3])), shape=(3, 4)
93
+ ... )
94
+ >>> A
95
+ <3x4 sparse matrix of type '<class 'numpy.float64'>'
96
+ with 3 stored elements in COOrdinate format>
97
+
98
+ >>> A.todense()
99
+ matrix([[0., 0., 1., 2.],
100
+ [3., 0., 0., 0.],
101
+ [0., 0., 0., 0.]])
102
+
103
+ >>> ss = pd.Series.sparse.from_coo(A)
104
+ >>> ss
105
+ 0 2 1.0
106
+ 3 2.0
107
+ 1 0 3.0
108
+ dtype: Sparse[float64, nan]
109
+ """
110
+ from pandas import Series
111
+ from pandas.core.arrays.sparse.scipy_sparse import coo_to_sparse_series
112
+
113
+ result = coo_to_sparse_series(A, dense_index=dense_index)
114
+ result = Series(result.array, index=result.index, copy=False)
115
+
116
+ return result
117
+
118
+ def to_coo(self, row_levels=(0,), column_levels=(1,), sort_labels: bool = False):
119
+ """
120
+ Create a scipy.sparse.coo_matrix from a Series with MultiIndex.
121
+
122
+ Use row_levels and column_levels to determine the row and column
123
+ coordinates respectively. row_levels and column_levels are the names
124
+ (labels) or numbers of the levels. {row_levels, column_levels} must be
125
+ a partition of the MultiIndex level names (or numbers).
126
+
127
+ Parameters
128
+ ----------
129
+ row_levels : tuple/list
130
+ column_levels : tuple/list
131
+ sort_labels : bool, default False
132
+ Sort the row and column labels before forming the sparse matrix.
133
+ When `row_levels` and/or `column_levels` refer to a single level,
134
+ set to `True` for a faster execution.
135
+
136
+ Returns
137
+ -------
138
+ y : scipy.sparse.coo_matrix
139
+ rows : list (row labels)
140
+ columns : list (column labels)
141
+
142
+ Examples
143
+ --------
144
+ >>> s = pd.Series([3.0, np.nan, 1.0, 3.0, np.nan, np.nan])
145
+ >>> s.index = pd.MultiIndex.from_tuples(
146
+ ... [
147
+ ... (1, 2, "a", 0),
148
+ ... (1, 2, "a", 1),
149
+ ... (1, 1, "b", 0),
150
+ ... (1, 1, "b", 1),
151
+ ... (2, 1, "b", 0),
152
+ ... (2, 1, "b", 1)
153
+ ... ],
154
+ ... names=["A", "B", "C", "D"],
155
+ ... )
156
+ >>> s
157
+ A B C D
158
+ 1 2 a 0 3.0
159
+ 1 NaN
160
+ 1 b 0 1.0
161
+ 1 3.0
162
+ 2 1 b 0 NaN
163
+ 1 NaN
164
+ dtype: float64
165
+
166
+ >>> ss = s.astype("Sparse")
167
+ >>> ss
168
+ A B C D
169
+ 1 2 a 0 3.0
170
+ 1 NaN
171
+ 1 b 0 1.0
172
+ 1 3.0
173
+ 2 1 b 0 NaN
174
+ 1 NaN
175
+ dtype: Sparse[float64, nan]
176
+
177
+ >>> A, rows, columns = ss.sparse.to_coo(
178
+ ... row_levels=["A", "B"], column_levels=["C", "D"], sort_labels=True
179
+ ... )
180
+ >>> A
181
+ <3x4 sparse matrix of type '<class 'numpy.float64'>'
182
+ with 3 stored elements in COOrdinate format>
183
+ >>> A.todense()
184
+ matrix([[0., 0., 1., 3.],
185
+ [3., 0., 0., 0.],
186
+ [0., 0., 0., 0.]])
187
+
188
+ >>> rows
189
+ [(1, 1), (1, 2), (2, 1)]
190
+ >>> columns
191
+ [('a', 0), ('a', 1), ('b', 0), ('b', 1)]
192
+ """
193
+ from pandas.core.arrays.sparse.scipy_sparse import sparse_series_to_coo
194
+
195
+ A, rows, columns = sparse_series_to_coo(
196
+ self._parent, row_levels, column_levels, sort_labels=sort_labels
197
+ )
198
+ return A, rows, columns
199
+
200
+ def to_dense(self) -> Series:
201
+ """
202
+ Convert a Series from sparse values to dense.
203
+
204
+ Returns
205
+ -------
206
+ Series:
207
+ A Series with the same values, stored as a dense array.
208
+
209
+ Examples
210
+ --------
211
+ >>> series = pd.Series(pd.arrays.SparseArray([0, 1, 0]))
212
+ >>> series
213
+ 0 0
214
+ 1 1
215
+ 2 0
216
+ dtype: Sparse[int64, 0]
217
+
218
+ >>> series.sparse.to_dense()
219
+ 0 0
220
+ 1 1
221
+ 2 0
222
+ dtype: int64
223
+ """
224
+ from pandas import Series
225
+
226
+ return Series(
227
+ self._parent.array.to_dense(),
228
+ index=self._parent.index,
229
+ name=self._parent.name,
230
+ copy=False,
231
+ )
232
+
233
+
234
+ class SparseFrameAccessor(BaseAccessor, PandasDelegate):
235
+ """
236
+ DataFrame accessor for sparse data.
237
+
238
+ Examples
239
+ --------
240
+ >>> df = pd.DataFrame({"a": [1, 2, 0, 0],
241
+ ... "b": [3, 0, 0, 4]}, dtype="Sparse[int]")
242
+ >>> df.sparse.density
243
+ 0.5
244
+ """
245
+
246
+ def _validate(self, data):
247
+ dtypes = data.dtypes
248
+ if not all(isinstance(t, SparseDtype) for t in dtypes):
249
+ raise AttributeError(self._validation_msg)
250
+
251
+ @classmethod
252
+ def from_spmatrix(cls, data, index=None, columns=None) -> DataFrame:
253
+ """
254
+ Create a new DataFrame from a scipy sparse matrix.
255
+
256
+ Parameters
257
+ ----------
258
+ data : scipy.sparse.spmatrix
259
+ Must be convertible to csc format.
260
+ index, columns : Index, optional
261
+ Row and column labels to use for the resulting DataFrame.
262
+ Defaults to a RangeIndex.
263
+
264
+ Returns
265
+ -------
266
+ DataFrame
267
+ Each column of the DataFrame is stored as a
268
+ :class:`arrays.SparseArray`.
269
+
270
+ Examples
271
+ --------
272
+ >>> import scipy.sparse
273
+ >>> mat = scipy.sparse.eye(3, dtype=float)
274
+ >>> pd.DataFrame.sparse.from_spmatrix(mat)
275
+ 0 1 2
276
+ 0 1.0 0 0
277
+ 1 0 1.0 0
278
+ 2 0 0 1.0
279
+ """
280
+ from pandas._libs.sparse import IntIndex
281
+
282
+ from pandas import DataFrame
283
+
284
+ data = data.tocsc()
285
+ index, columns = cls._prep_index(data, index, columns)
286
+ n_rows, n_columns = data.shape
287
+ # We need to make sure indices are sorted, as we create
288
+ # IntIndex with no input validation (i.e. check_integrity=False ).
289
+ # Indices may already be sorted in scipy in which case this adds
290
+ # a small overhead.
291
+ data.sort_indices()
292
+ indices = data.indices
293
+ indptr = data.indptr
294
+ array_data = data.data
295
+ dtype = SparseDtype(array_data.dtype, 0)
296
+ arrays = []
297
+ for i in range(n_columns):
298
+ sl = slice(indptr[i], indptr[i + 1])
299
+ idx = IntIndex(n_rows, indices[sl], check_integrity=False)
300
+ arr = SparseArray._simple_new(array_data[sl], idx, dtype)
301
+ arrays.append(arr)
302
+ return DataFrame._from_arrays(
303
+ arrays, columns=columns, index=index, verify_integrity=False
304
+ )
305
+
306
+ def to_dense(self) -> DataFrame:
307
+ """
308
+ Convert a DataFrame with sparse values to dense.
309
+
310
+ Returns
311
+ -------
312
+ DataFrame
313
+ A DataFrame with the same values stored as dense arrays.
314
+
315
+ Examples
316
+ --------
317
+ >>> df = pd.DataFrame({"A": pd.arrays.SparseArray([0, 1, 0])})
318
+ >>> df.sparse.to_dense()
319
+ A
320
+ 0 0
321
+ 1 1
322
+ 2 0
323
+ """
324
+ from pandas import DataFrame
325
+
326
+ data = {k: v.array.to_dense() for k, v in self._parent.items()}
327
+ return DataFrame(data, index=self._parent.index, columns=self._parent.columns)
328
+
329
+ def to_coo(self):
330
+ """
331
+ Return the contents of the frame as a sparse SciPy COO matrix.
332
+
333
+ Returns
334
+ -------
335
+ scipy.sparse.spmatrix
336
+ If the caller is heterogeneous and contains booleans or objects,
337
+ the result will be of dtype=object. See Notes.
338
+
339
+ Notes
340
+ -----
341
+ The dtype will be the lowest-common-denominator type (implicit
342
+ upcasting); that is to say if the dtypes (even of numeric types)
343
+ are mixed, the one that accommodates all will be chosen.
344
+
345
+ e.g. If the dtypes are float16 and float32, dtype will be upcast to
346
+ float32. By numpy.find_common_type convention, mixing int64 and
347
+ and uint64 will result in a float64 dtype.
348
+
349
+ Examples
350
+ --------
351
+ >>> df = pd.DataFrame({"A": pd.arrays.SparseArray([0, 1, 0, 1])})
352
+ >>> df.sparse.to_coo()
353
+ <4x1 sparse matrix of type '<class 'numpy.int64'>'
354
+ with 2 stored elements in COOrdinate format>
355
+ """
356
+ import_optional_dependency("scipy")
357
+ from scipy.sparse import coo_matrix
358
+
359
+ dtype = find_common_type(self._parent.dtypes.to_list())
360
+ if isinstance(dtype, SparseDtype):
361
+ dtype = dtype.subtype
362
+
363
+ cols, rows, data = [], [], []
364
+ for col, (_, ser) in enumerate(self._parent.items()):
365
+ sp_arr = ser.array
366
+ if sp_arr.fill_value != 0:
367
+ raise ValueError("fill value must be 0 when converting to COO matrix")
368
+
369
+ row = sp_arr.sp_index.indices
370
+ cols.append(np.repeat(col, len(row)))
371
+ rows.append(row)
372
+ data.append(sp_arr.sp_values.astype(dtype, copy=False))
373
+
374
+ cols = np.concatenate(cols)
375
+ rows = np.concatenate(rows)
376
+ data = np.concatenate(data)
377
+ return coo_matrix((data, (rows, cols)), shape=self._parent.shape)
378
+
379
+ @property
380
+ def density(self) -> float:
381
+ """
382
+ Ratio of non-sparse points to total (dense) data points.
383
+
384
+ Examples
385
+ --------
386
+ >>> df = pd.DataFrame({"A": pd.arrays.SparseArray([0, 1, 0, 1])})
387
+ >>> df.sparse.density
388
+ 0.5
389
+ """
390
+ tmp = np.mean([column.array.density for _, column in self._parent.items()])
391
+ return tmp
392
+
393
+ @staticmethod
394
+ def _prep_index(data, index, columns):
395
+ from pandas.core.indexes.api import (
396
+ default_index,
397
+ ensure_index,
398
+ )
399
+
400
+ N, K = data.shape
401
+ if index is None:
402
+ index = default_index(N)
403
+ else:
404
+ index = ensure_index(index)
405
+ if columns is None:
406
+ columns = default_index(K)
407
+ else:
408
+ columns = ensure_index(columns)
409
+
410
+ if len(columns) != K:
411
+ raise ValueError(f"Column length mismatch: {len(columns)} vs. {K}")
412
+ if len(index) != N:
413
+ raise ValueError(f"Index length mismatch: {len(index)} vs. {N}")
414
+ return index, columns
env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/sparse/scipy_sparse.py ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Interaction with scipy.sparse matrices.
3
+
4
+ Currently only includes to_coo helpers.
5
+ """
6
+ from __future__ import annotations
7
+
8
+ from typing import TYPE_CHECKING
9
+
10
+ from pandas._libs import lib
11
+
12
+ from pandas.core.dtypes.missing import notna
13
+
14
+ from pandas.core.algorithms import factorize
15
+ from pandas.core.indexes.api import MultiIndex
16
+ from pandas.core.series import Series
17
+
18
+ if TYPE_CHECKING:
19
+ from collections.abc import Iterable
20
+
21
+ import numpy as np
22
+ import scipy.sparse
23
+
24
+ from pandas._typing import (
25
+ IndexLabel,
26
+ npt,
27
+ )
28
+
29
+
30
+ def _check_is_partition(parts: Iterable, whole: Iterable):
31
+ whole = set(whole)
32
+ parts = [set(x) for x in parts]
33
+ if set.intersection(*parts) != set():
34
+ raise ValueError("Is not a partition because intersection is not null.")
35
+ if set.union(*parts) != whole:
36
+ raise ValueError("Is not a partition because union is not the whole.")
37
+
38
+
39
+ def _levels_to_axis(
40
+ ss,
41
+ levels: tuple[int] | list[int],
42
+ valid_ilocs: npt.NDArray[np.intp],
43
+ sort_labels: bool = False,
44
+ ) -> tuple[npt.NDArray[np.intp], list[IndexLabel]]:
45
+ """
46
+ For a MultiIndexed sparse Series `ss`, return `ax_coords` and `ax_labels`,
47
+ where `ax_coords` are the coordinates along one of the two axes of the
48
+ destination sparse matrix, and `ax_labels` are the labels from `ss`' Index
49
+ which correspond to these coordinates.
50
+
51
+ Parameters
52
+ ----------
53
+ ss : Series
54
+ levels : tuple/list
55
+ valid_ilocs : numpy.ndarray
56
+ Array of integer positions of valid values for the sparse matrix in ss.
57
+ sort_labels : bool, default False
58
+ Sort the axis labels before forming the sparse matrix. When `levels`
59
+ refers to a single level, set to True for a faster execution.
60
+
61
+ Returns
62
+ -------
63
+ ax_coords : numpy.ndarray (axis coordinates)
64
+ ax_labels : list (axis labels)
65
+ """
66
+ # Since the labels are sorted in `Index.levels`, when we wish to sort and
67
+ # there is only one level of the MultiIndex for this axis, the desired
68
+ # output can be obtained in the following simpler, more efficient way.
69
+ if sort_labels and len(levels) == 1:
70
+ ax_coords = ss.index.codes[levels[0]][valid_ilocs]
71
+ ax_labels = ss.index.levels[levels[0]]
72
+
73
+ else:
74
+ levels_values = lib.fast_zip(
75
+ [ss.index.get_level_values(lvl).to_numpy() for lvl in levels]
76
+ )
77
+ codes, ax_labels = factorize(levels_values, sort=sort_labels)
78
+ ax_coords = codes[valid_ilocs]
79
+
80
+ ax_labels = ax_labels.tolist()
81
+ return ax_coords, ax_labels
82
+
83
+
84
+ def _to_ijv(
85
+ ss,
86
+ row_levels: tuple[int] | list[int] = (0,),
87
+ column_levels: tuple[int] | list[int] = (1,),
88
+ sort_labels: bool = False,
89
+ ) -> tuple[
90
+ np.ndarray,
91
+ npt.NDArray[np.intp],
92
+ npt.NDArray[np.intp],
93
+ list[IndexLabel],
94
+ list[IndexLabel],
95
+ ]:
96
+ """
97
+ For an arbitrary MultiIndexed sparse Series return (v, i, j, ilabels,
98
+ jlabels) where (v, (i, j)) is suitable for passing to scipy.sparse.coo
99
+ constructor, and ilabels and jlabels are the row and column labels
100
+ respectively.
101
+
102
+ Parameters
103
+ ----------
104
+ ss : Series
105
+ row_levels : tuple/list
106
+ column_levels : tuple/list
107
+ sort_labels : bool, default False
108
+ Sort the row and column labels before forming the sparse matrix.
109
+ When `row_levels` and/or `column_levels` refer to a single level,
110
+ set to `True` for a faster execution.
111
+
112
+ Returns
113
+ -------
114
+ values : numpy.ndarray
115
+ Valid values to populate a sparse matrix, extracted from
116
+ ss.
117
+ i_coords : numpy.ndarray (row coordinates of the values)
118
+ j_coords : numpy.ndarray (column coordinates of the values)
119
+ i_labels : list (row labels)
120
+ j_labels : list (column labels)
121
+ """
122
+ # index and column levels must be a partition of the index
123
+ _check_is_partition([row_levels, column_levels], range(ss.index.nlevels))
124
+ # From the sparse Series, get the integer indices and data for valid sparse
125
+ # entries.
126
+ sp_vals = ss.array.sp_values
127
+ na_mask = notna(sp_vals)
128
+ values = sp_vals[na_mask]
129
+ valid_ilocs = ss.array.sp_index.indices[na_mask]
130
+
131
+ i_coords, i_labels = _levels_to_axis(
132
+ ss, row_levels, valid_ilocs, sort_labels=sort_labels
133
+ )
134
+
135
+ j_coords, j_labels = _levels_to_axis(
136
+ ss, column_levels, valid_ilocs, sort_labels=sort_labels
137
+ )
138
+
139
+ return values, i_coords, j_coords, i_labels, j_labels
140
+
141
+
142
+ def sparse_series_to_coo(
143
+ ss: Series,
144
+ row_levels: Iterable[int] = (0,),
145
+ column_levels: Iterable[int] = (1,),
146
+ sort_labels: bool = False,
147
+ ) -> tuple[scipy.sparse.coo_matrix, list[IndexLabel], list[IndexLabel]]:
148
+ """
149
+ Convert a sparse Series to a scipy.sparse.coo_matrix using index
150
+ levels row_levels, column_levels as the row and column
151
+ labels respectively. Returns the sparse_matrix, row and column labels.
152
+ """
153
+ import scipy.sparse
154
+
155
+ if ss.index.nlevels < 2:
156
+ raise ValueError("to_coo requires MultiIndex with nlevels >= 2.")
157
+ if not ss.index.is_unique:
158
+ raise ValueError(
159
+ "Duplicate index entries are not allowed in to_coo transformation."
160
+ )
161
+
162
+ # to keep things simple, only rely on integer indexing (not labels)
163
+ row_levels = [ss.index._get_level_number(x) for x in row_levels]
164
+ column_levels = [ss.index._get_level_number(x) for x in column_levels]
165
+
166
+ v, i, j, rows, columns = _to_ijv(
167
+ ss, row_levels=row_levels, column_levels=column_levels, sort_labels=sort_labels
168
+ )
169
+ sparse_matrix = scipy.sparse.coo_matrix(
170
+ (v, (i, j)), shape=(len(rows), len(columns))
171
+ )
172
+ return sparse_matrix, rows, columns
173
+
174
+
175
+ def coo_to_sparse_series(
176
+ A: scipy.sparse.coo_matrix, dense_index: bool = False
177
+ ) -> Series:
178
+ """
179
+ Convert a scipy.sparse.coo_matrix to a Series with type sparse.
180
+
181
+ Parameters
182
+ ----------
183
+ A : scipy.sparse.coo_matrix
184
+ dense_index : bool, default False
185
+
186
+ Returns
187
+ -------
188
+ Series
189
+
190
+ Raises
191
+ ------
192
+ TypeError if A is not a coo_matrix
193
+ """
194
+ from pandas import SparseDtype
195
+
196
+ try:
197
+ ser = Series(A.data, MultiIndex.from_arrays((A.row, A.col)), copy=False)
198
+ except AttributeError as err:
199
+ raise TypeError(
200
+ f"Expected coo_matrix. Got {type(A).__name__} instead."
201
+ ) from err
202
+ ser = ser.sort_index()
203
+ ser = ser.astype(SparseDtype(ser.dtype))
204
+ if dense_index:
205
+ ind = MultiIndex.from_product([A.row, A.col])
206
+ ser = ser.reindex(ind)
207
+ return ser
env-llmeval/lib/python3.10/site-packages/pandas/core/interchange/__init__.py ADDED
File without changes
env-llmeval/lib/python3.10/site-packages/pandas/core/interchange/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (188 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/interchange/__pycache__/buffer.cpython-310.pyc ADDED
Binary file (3.61 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/interchange/__pycache__/column.cpython-310.pyc ADDED
Binary file (12.1 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/interchange/__pycache__/dataframe.cpython-310.pyc ADDED
Binary file (4.69 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/interchange/__pycache__/dataframe_protocol.cpython-310.pyc ADDED
Binary file (17.9 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/interchange/__pycache__/from_dataframe.cpython-310.pyc ADDED
Binary file (12.2 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/interchange/__pycache__/utils.cpython-310.pyc ADDED
Binary file (4.55 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/interchange/buffer.py ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from typing import (
4
+ TYPE_CHECKING,
5
+ Any,
6
+ )
7
+
8
+ from pandas.core.interchange.dataframe_protocol import (
9
+ Buffer,
10
+ DlpackDeviceType,
11
+ )
12
+
13
+ if TYPE_CHECKING:
14
+ import numpy as np
15
+ import pyarrow as pa
16
+
17
+
18
+ class PandasBuffer(Buffer):
19
+ """
20
+ Data in the buffer is guaranteed to be contiguous in memory.
21
+ """
22
+
23
+ def __init__(self, x: np.ndarray, allow_copy: bool = True) -> None:
24
+ """
25
+ Handle only regular columns (= numpy arrays) for now.
26
+ """
27
+ if x.strides[0] and not x.strides == (x.dtype.itemsize,):
28
+ # The protocol does not support strided buffers, so a copy is
29
+ # necessary. If that's not allowed, we need to raise an exception.
30
+ if allow_copy:
31
+ x = x.copy()
32
+ else:
33
+ raise RuntimeError(
34
+ "Exports cannot be zero-copy in the case "
35
+ "of a non-contiguous buffer"
36
+ )
37
+
38
+ # Store the numpy array in which the data resides as a private
39
+ # attribute, so we can use it to retrieve the public attributes
40
+ self._x = x
41
+
42
+ @property
43
+ def bufsize(self) -> int:
44
+ """
45
+ Buffer size in bytes.
46
+ """
47
+ return self._x.size * self._x.dtype.itemsize
48
+
49
+ @property
50
+ def ptr(self) -> int:
51
+ """
52
+ Pointer to start of the buffer as an integer.
53
+ """
54
+ return self._x.__array_interface__["data"][0]
55
+
56
+ def __dlpack__(self) -> Any:
57
+ """
58
+ Represent this structure as DLPack interface.
59
+ """
60
+ return self._x.__dlpack__()
61
+
62
+ def __dlpack_device__(self) -> tuple[DlpackDeviceType, int | None]:
63
+ """
64
+ Device type and device ID for where the data in the buffer resides.
65
+ """
66
+ return (DlpackDeviceType.CPU, None)
67
+
68
+ def __repr__(self) -> str:
69
+ return (
70
+ "PandasBuffer("
71
+ + str(
72
+ {
73
+ "bufsize": self.bufsize,
74
+ "ptr": self.ptr,
75
+ "device": self.__dlpack_device__()[0].name,
76
+ }
77
+ )
78
+ + ")"
79
+ )
80
+
81
+
82
+ class PandasBufferPyarrow(Buffer):
83
+ """
84
+ Data in the buffer is guaranteed to be contiguous in memory.
85
+ """
86
+
87
+ def __init__(
88
+ self,
89
+ buffer: pa.Buffer,
90
+ *,
91
+ length: int,
92
+ ) -> None:
93
+ """
94
+ Handle pyarrow chunked arrays.
95
+ """
96
+ self._buffer = buffer
97
+ self._length = length
98
+
99
+ @property
100
+ def bufsize(self) -> int:
101
+ """
102
+ Buffer size in bytes.
103
+ """
104
+ return self._buffer.size
105
+
106
+ @property
107
+ def ptr(self) -> int:
108
+ """
109
+ Pointer to start of the buffer as an integer.
110
+ """
111
+ return self._buffer.address
112
+
113
+ def __dlpack__(self) -> Any:
114
+ """
115
+ Represent this structure as DLPack interface.
116
+ """
117
+ raise NotImplementedError()
118
+
119
+ def __dlpack_device__(self) -> tuple[DlpackDeviceType, int | None]:
120
+ """
121
+ Device type and device ID for where the data in the buffer resides.
122
+ """
123
+ return (DlpackDeviceType.CPU, None)
124
+
125
+ def __repr__(self) -> str:
126
+ return (
127
+ "PandasBuffer[pyarrow]("
128
+ + str(
129
+ {
130
+ "bufsize": self.bufsize,
131
+ "ptr": self.ptr,
132
+ "device": "CPU",
133
+ }
134
+ )
135
+ + ")"
136
+ )
env-llmeval/lib/python3.10/site-packages/pandas/core/interchange/column.py ADDED
@@ -0,0 +1,461 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from typing import (
4
+ TYPE_CHECKING,
5
+ Any,
6
+ )
7
+
8
+ import numpy as np
9
+
10
+ from pandas._libs.lib import infer_dtype
11
+ from pandas._libs.tslibs import iNaT
12
+ from pandas.errors import NoBufferPresent
13
+ from pandas.util._decorators import cache_readonly
14
+
15
+ from pandas.core.dtypes.dtypes import BaseMaskedDtype
16
+
17
+ import pandas as pd
18
+ from pandas import (
19
+ ArrowDtype,
20
+ DatetimeTZDtype,
21
+ )
22
+ from pandas.api.types import is_string_dtype
23
+ from pandas.core.interchange.buffer import (
24
+ PandasBuffer,
25
+ PandasBufferPyarrow,
26
+ )
27
+ from pandas.core.interchange.dataframe_protocol import (
28
+ Column,
29
+ ColumnBuffers,
30
+ ColumnNullType,
31
+ DtypeKind,
32
+ )
33
+ from pandas.core.interchange.utils import (
34
+ ArrowCTypes,
35
+ Endianness,
36
+ dtype_to_arrow_c_fmt,
37
+ )
38
+
39
+ if TYPE_CHECKING:
40
+ from pandas.core.interchange.dataframe_protocol import Buffer
41
+
42
+ _NP_KINDS = {
43
+ "i": DtypeKind.INT,
44
+ "u": DtypeKind.UINT,
45
+ "f": DtypeKind.FLOAT,
46
+ "b": DtypeKind.BOOL,
47
+ "U": DtypeKind.STRING,
48
+ "M": DtypeKind.DATETIME,
49
+ "m": DtypeKind.DATETIME,
50
+ }
51
+
52
+ _NULL_DESCRIPTION = {
53
+ DtypeKind.FLOAT: (ColumnNullType.USE_NAN, None),
54
+ DtypeKind.DATETIME: (ColumnNullType.USE_SENTINEL, iNaT),
55
+ DtypeKind.INT: (ColumnNullType.NON_NULLABLE, None),
56
+ DtypeKind.UINT: (ColumnNullType.NON_NULLABLE, None),
57
+ DtypeKind.BOOL: (ColumnNullType.NON_NULLABLE, None),
58
+ # Null values for categoricals are stored as `-1` sentinel values
59
+ # in the category date (e.g., `col.values.codes` is int8 np.ndarray)
60
+ DtypeKind.CATEGORICAL: (ColumnNullType.USE_SENTINEL, -1),
61
+ # follow Arrow in using 1 as valid value and 0 for missing/null value
62
+ DtypeKind.STRING: (ColumnNullType.USE_BYTEMASK, 0),
63
+ }
64
+
65
+ _NO_VALIDITY_BUFFER = {
66
+ ColumnNullType.NON_NULLABLE: "This column is non-nullable",
67
+ ColumnNullType.USE_NAN: "This column uses NaN as null",
68
+ ColumnNullType.USE_SENTINEL: "This column uses a sentinel value",
69
+ }
70
+
71
+
72
+ class PandasColumn(Column):
73
+ """
74
+ A column object, with only the methods and properties required by the
75
+ interchange protocol defined.
76
+ A column can contain one or more chunks. Each chunk can contain up to three
77
+ buffers - a data buffer, a mask buffer (depending on null representation),
78
+ and an offsets buffer (if variable-size binary; e.g., variable-length
79
+ strings).
80
+ Note: this Column object can only be produced by ``__dataframe__``, so
81
+ doesn't need its own version or ``__column__`` protocol.
82
+ """
83
+
84
+ def __init__(self, column: pd.Series, allow_copy: bool = True) -> None:
85
+ """
86
+ Note: doesn't deal with extension arrays yet, just assume a regular
87
+ Series/ndarray for now.
88
+ """
89
+ if isinstance(column, pd.DataFrame):
90
+ raise TypeError(
91
+ "Expected a Series, got a DataFrame. This likely happened "
92
+ "because you called __dataframe__ on a DataFrame which, "
93
+ "after converting column names to string, resulted in duplicated "
94
+ f"names: {column.columns}. Please rename these columns before "
95
+ "using the interchange protocol."
96
+ )
97
+ if not isinstance(column, pd.Series):
98
+ raise NotImplementedError(f"Columns of type {type(column)} not handled yet")
99
+
100
+ # Store the column as a private attribute
101
+ self._col = column
102
+ self._allow_copy = allow_copy
103
+
104
+ def size(self) -> int:
105
+ """
106
+ Size of the column, in elements.
107
+ """
108
+ return self._col.size
109
+
110
+ @property
111
+ def offset(self) -> int:
112
+ """
113
+ Offset of first element. Always zero.
114
+ """
115
+ # TODO: chunks are implemented now, probably this should return something
116
+ return 0
117
+
118
+ @cache_readonly
119
+ def dtype(self) -> tuple[DtypeKind, int, str, str]:
120
+ dtype = self._col.dtype
121
+
122
+ if isinstance(dtype, pd.CategoricalDtype):
123
+ codes = self._col.values.codes
124
+ (
125
+ _,
126
+ bitwidth,
127
+ c_arrow_dtype_f_str,
128
+ _,
129
+ ) = self._dtype_from_pandasdtype(codes.dtype)
130
+ return (
131
+ DtypeKind.CATEGORICAL,
132
+ bitwidth,
133
+ c_arrow_dtype_f_str,
134
+ Endianness.NATIVE,
135
+ )
136
+ elif is_string_dtype(dtype):
137
+ if infer_dtype(self._col) in ("string", "empty"):
138
+ return (
139
+ DtypeKind.STRING,
140
+ 8,
141
+ dtype_to_arrow_c_fmt(dtype),
142
+ Endianness.NATIVE,
143
+ )
144
+ raise NotImplementedError("Non-string object dtypes are not supported yet")
145
+ else:
146
+ return self._dtype_from_pandasdtype(dtype)
147
+
148
+ def _dtype_from_pandasdtype(self, dtype) -> tuple[DtypeKind, int, str, str]:
149
+ """
150
+ See `self.dtype` for details.
151
+ """
152
+ # Note: 'c' (complex) not handled yet (not in array spec v1).
153
+ # 'b', 'B' (bytes), 'S', 'a', (old-style string) 'V' (void) not handled
154
+ # datetime and timedelta both map to datetime (is timedelta handled?)
155
+
156
+ kind = _NP_KINDS.get(dtype.kind, None)
157
+ if kind is None:
158
+ # Not a NumPy dtype. Check if it's a categorical maybe
159
+ raise ValueError(f"Data type {dtype} not supported by interchange protocol")
160
+ if isinstance(dtype, ArrowDtype):
161
+ byteorder = dtype.numpy_dtype.byteorder
162
+ elif isinstance(dtype, DatetimeTZDtype):
163
+ byteorder = dtype.base.byteorder # type: ignore[union-attr]
164
+ elif isinstance(dtype, BaseMaskedDtype):
165
+ byteorder = dtype.numpy_dtype.byteorder
166
+ else:
167
+ byteorder = dtype.byteorder
168
+
169
+ if dtype == "bool[pyarrow]":
170
+ # return early to avoid the `* 8` below, as this is a bitmask
171
+ # rather than a bytemask
172
+ return (
173
+ kind,
174
+ dtype.itemsize, # pyright: ignore[reportGeneralTypeIssues]
175
+ ArrowCTypes.BOOL,
176
+ byteorder,
177
+ )
178
+
179
+ return kind, dtype.itemsize * 8, dtype_to_arrow_c_fmt(dtype), byteorder
180
+
181
+ @property
182
+ def describe_categorical(self):
183
+ """
184
+ If the dtype is categorical, there are two options:
185
+ - There are only values in the data buffer.
186
+ - There is a separate non-categorical Column encoding for categorical values.
187
+
188
+ Raises TypeError if the dtype is not categorical
189
+
190
+ Content of returned dict:
191
+ - "is_ordered" : bool, whether the ordering of dictionary indices is
192
+ semantically meaningful.
193
+ - "is_dictionary" : bool, whether a dictionary-style mapping of
194
+ categorical values to other objects exists
195
+ - "categories" : Column representing the (implicit) mapping of indices to
196
+ category values (e.g. an array of cat1, cat2, ...).
197
+ None if not a dictionary-style categorical.
198
+ """
199
+ if not self.dtype[0] == DtypeKind.CATEGORICAL:
200
+ raise TypeError(
201
+ "describe_categorical only works on a column with categorical dtype!"
202
+ )
203
+
204
+ return {
205
+ "is_ordered": self._col.cat.ordered,
206
+ "is_dictionary": True,
207
+ "categories": PandasColumn(pd.Series(self._col.cat.categories)),
208
+ }
209
+
210
+ @property
211
+ def describe_null(self):
212
+ if isinstance(self._col.dtype, BaseMaskedDtype):
213
+ column_null_dtype = ColumnNullType.USE_BYTEMASK
214
+ null_value = 1
215
+ return column_null_dtype, null_value
216
+ if isinstance(self._col.dtype, ArrowDtype):
217
+ # We already rechunk (if necessary / allowed) upon initialization, so this
218
+ # is already single-chunk by the time we get here.
219
+ if self._col.array._pa_array.chunks[0].buffers()[0] is None: # type: ignore[attr-defined]
220
+ return ColumnNullType.NON_NULLABLE, None
221
+ return ColumnNullType.USE_BITMASK, 0
222
+ kind = self.dtype[0]
223
+ try:
224
+ null, value = _NULL_DESCRIPTION[kind]
225
+ except KeyError:
226
+ raise NotImplementedError(f"Data type {kind} not yet supported")
227
+
228
+ return null, value
229
+
230
+ @cache_readonly
231
+ def null_count(self) -> int:
232
+ """
233
+ Number of null elements. Should always be known.
234
+ """
235
+ return self._col.isna().sum().item()
236
+
237
+ @property
238
+ def metadata(self) -> dict[str, pd.Index]:
239
+ """
240
+ Store specific metadata of the column.
241
+ """
242
+ return {"pandas.index": self._col.index}
243
+
244
+ def num_chunks(self) -> int:
245
+ """
246
+ Return the number of chunks the column consists of.
247
+ """
248
+ return 1
249
+
250
+ def get_chunks(self, n_chunks: int | None = None):
251
+ """
252
+ Return an iterator yielding the chunks.
253
+ See `DataFrame.get_chunks` for details on ``n_chunks``.
254
+ """
255
+ if n_chunks and n_chunks > 1:
256
+ size = len(self._col)
257
+ step = size // n_chunks
258
+ if size % n_chunks != 0:
259
+ step += 1
260
+ for start in range(0, step * n_chunks, step):
261
+ yield PandasColumn(
262
+ self._col.iloc[start : start + step], self._allow_copy
263
+ )
264
+ else:
265
+ yield self
266
+
267
+ def get_buffers(self) -> ColumnBuffers:
268
+ """
269
+ Return a dictionary containing the underlying buffers.
270
+ The returned dictionary has the following contents:
271
+ - "data": a two-element tuple whose first element is a buffer
272
+ containing the data and whose second element is the data
273
+ buffer's associated dtype.
274
+ - "validity": a two-element tuple whose first element is a buffer
275
+ containing mask values indicating missing data and
276
+ whose second element is the mask value buffer's
277
+ associated dtype. None if the null representation is
278
+ not a bit or byte mask.
279
+ - "offsets": a two-element tuple whose first element is a buffer
280
+ containing the offset values for variable-size binary
281
+ data (e.g., variable-length strings) and whose second
282
+ element is the offsets buffer's associated dtype. None
283
+ if the data buffer does not have an associated offsets
284
+ buffer.
285
+ """
286
+ buffers: ColumnBuffers = {
287
+ "data": self._get_data_buffer(),
288
+ "validity": None,
289
+ "offsets": None,
290
+ }
291
+
292
+ try:
293
+ buffers["validity"] = self._get_validity_buffer()
294
+ except NoBufferPresent:
295
+ pass
296
+
297
+ try:
298
+ buffers["offsets"] = self._get_offsets_buffer()
299
+ except NoBufferPresent:
300
+ pass
301
+
302
+ return buffers
303
+
304
+ def _get_data_buffer(
305
+ self,
306
+ ) -> tuple[Buffer, tuple[DtypeKind, int, str, str]]:
307
+ """
308
+ Return the buffer containing the data and the buffer's associated dtype.
309
+ """
310
+ buffer: Buffer
311
+ if self.dtype[0] in (
312
+ DtypeKind.INT,
313
+ DtypeKind.UINT,
314
+ DtypeKind.FLOAT,
315
+ DtypeKind.BOOL,
316
+ DtypeKind.DATETIME,
317
+ ):
318
+ # self.dtype[2] is an ArrowCTypes.TIMESTAMP where the tz will make
319
+ # it longer than 4 characters
320
+ dtype = self.dtype
321
+ if self.dtype[0] == DtypeKind.DATETIME and len(self.dtype[2]) > 4:
322
+ np_arr = self._col.dt.tz_convert(None).to_numpy()
323
+ else:
324
+ arr = self._col.array
325
+ if isinstance(self._col.dtype, BaseMaskedDtype):
326
+ np_arr = arr._data # type: ignore[attr-defined]
327
+ elif isinstance(self._col.dtype, ArrowDtype):
328
+ # We already rechunk (if necessary / allowed) upon initialization,
329
+ # so this is already single-chunk by the time we get here.
330
+ arr = arr._pa_array.chunks[0] # type: ignore[attr-defined]
331
+ buffer = PandasBufferPyarrow(
332
+ arr.buffers()[1], # type: ignore[attr-defined]
333
+ length=len(arr),
334
+ )
335
+ return buffer, dtype
336
+ else:
337
+ np_arr = arr._ndarray # type: ignore[attr-defined]
338
+ buffer = PandasBuffer(np_arr, allow_copy=self._allow_copy)
339
+ elif self.dtype[0] == DtypeKind.CATEGORICAL:
340
+ codes = self._col.values._codes
341
+ buffer = PandasBuffer(codes, allow_copy=self._allow_copy)
342
+ dtype = self._dtype_from_pandasdtype(codes.dtype)
343
+ elif self.dtype[0] == DtypeKind.STRING:
344
+ # Marshal the strings from a NumPy object array into a byte array
345
+ buf = self._col.to_numpy()
346
+ b = bytearray()
347
+
348
+ # TODO: this for-loop is slow; can be implemented in Cython/C/C++ later
349
+ for obj in buf:
350
+ if isinstance(obj, str):
351
+ b.extend(obj.encode(encoding="utf-8"))
352
+
353
+ # Convert the byte array to a Pandas "buffer" using
354
+ # a NumPy array as the backing store
355
+ buffer = PandasBuffer(np.frombuffer(b, dtype="uint8"))
356
+
357
+ # Define the dtype for the returned buffer
358
+ # TODO: this will need correcting
359
+ # https://github.com/pandas-dev/pandas/issues/54781
360
+ dtype = self.dtype
361
+ else:
362
+ raise NotImplementedError(f"Data type {self._col.dtype} not handled yet")
363
+
364
+ return buffer, dtype
365
+
366
+ def _get_validity_buffer(self) -> tuple[Buffer, Any] | None:
367
+ """
368
+ Return the buffer containing the mask values indicating missing data and
369
+ the buffer's associated dtype.
370
+ Raises NoBufferPresent if null representation is not a bit or byte mask.
371
+ """
372
+ null, invalid = self.describe_null
373
+ buffer: Buffer
374
+ if isinstance(self._col.dtype, ArrowDtype):
375
+ # We already rechunk (if necessary / allowed) upon initialization, so this
376
+ # is already single-chunk by the time we get here.
377
+ arr = self._col.array._pa_array.chunks[0] # type: ignore[attr-defined]
378
+ dtype = (DtypeKind.BOOL, 1, ArrowCTypes.BOOL, Endianness.NATIVE)
379
+ if arr.buffers()[0] is None:
380
+ return None
381
+ buffer = PandasBufferPyarrow(
382
+ arr.buffers()[0],
383
+ length=len(arr),
384
+ )
385
+ return buffer, dtype
386
+
387
+ if isinstance(self._col.dtype, BaseMaskedDtype):
388
+ mask = self._col.array._mask # type: ignore[attr-defined]
389
+ buffer = PandasBuffer(mask)
390
+ dtype = (DtypeKind.BOOL, 8, ArrowCTypes.BOOL, Endianness.NATIVE)
391
+ return buffer, dtype
392
+
393
+ if self.dtype[0] == DtypeKind.STRING:
394
+ # For now, use byte array as the mask.
395
+ # TODO: maybe store as bit array to save space?..
396
+ buf = self._col.to_numpy()
397
+
398
+ # Determine the encoding for valid values
399
+ valid = invalid == 0
400
+ invalid = not valid
401
+
402
+ mask = np.zeros(shape=(len(buf),), dtype=np.bool_)
403
+ for i, obj in enumerate(buf):
404
+ mask[i] = valid if isinstance(obj, str) else invalid
405
+
406
+ # Convert the mask array to a Pandas "buffer" using
407
+ # a NumPy array as the backing store
408
+ buffer = PandasBuffer(mask)
409
+
410
+ # Define the dtype of the returned buffer
411
+ dtype = (DtypeKind.BOOL, 8, ArrowCTypes.BOOL, Endianness.NATIVE)
412
+
413
+ return buffer, dtype
414
+
415
+ try:
416
+ msg = f"{_NO_VALIDITY_BUFFER[null]} so does not have a separate mask"
417
+ except KeyError:
418
+ # TODO: implement for other bit/byte masks?
419
+ raise NotImplementedError("See self.describe_null")
420
+
421
+ raise NoBufferPresent(msg)
422
+
423
+ def _get_offsets_buffer(self) -> tuple[PandasBuffer, Any]:
424
+ """
425
+ Return the buffer containing the offset values for variable-size binary
426
+ data (e.g., variable-length strings) and the buffer's associated dtype.
427
+ Raises NoBufferPresent if the data buffer does not have an associated
428
+ offsets buffer.
429
+ """
430
+ if self.dtype[0] == DtypeKind.STRING:
431
+ # For each string, we need to manually determine the next offset
432
+ values = self._col.to_numpy()
433
+ ptr = 0
434
+ offsets = np.zeros(shape=(len(values) + 1,), dtype=np.int64)
435
+ for i, v in enumerate(values):
436
+ # For missing values (in this case, `np.nan` values)
437
+ # we don't increment the pointer
438
+ if isinstance(v, str):
439
+ b = v.encode(encoding="utf-8")
440
+ ptr += len(b)
441
+
442
+ offsets[i + 1] = ptr
443
+
444
+ # Convert the offsets to a Pandas "buffer" using
445
+ # the NumPy array as the backing store
446
+ buffer = PandasBuffer(offsets)
447
+
448
+ # Assemble the buffer dtype info
449
+ dtype = (
450
+ DtypeKind.INT,
451
+ 64,
452
+ ArrowCTypes.INT64,
453
+ Endianness.NATIVE,
454
+ ) # note: currently only support native endianness
455
+ else:
456
+ raise NoBufferPresent(
457
+ "This column has a fixed-length dtype so "
458
+ "it does not have an offsets buffer"
459
+ )
460
+
461
+ return buffer, dtype
env-llmeval/lib/python3.10/site-packages/pandas/core/interchange/dataframe.py ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from collections import abc
4
+ from typing import TYPE_CHECKING
5
+
6
+ from pandas.core.interchange.column import PandasColumn
7
+ from pandas.core.interchange.dataframe_protocol import DataFrame as DataFrameXchg
8
+ from pandas.core.interchange.utils import maybe_rechunk
9
+
10
+ if TYPE_CHECKING:
11
+ from collections.abc import (
12
+ Iterable,
13
+ Sequence,
14
+ )
15
+
16
+ from pandas import (
17
+ DataFrame,
18
+ Index,
19
+ )
20
+
21
+
22
+ class PandasDataFrameXchg(DataFrameXchg):
23
+ """
24
+ A data frame class, with only the methods required by the interchange
25
+ protocol defined.
26
+ Instances of this (private) class are returned from
27
+ ``pd.DataFrame.__dataframe__`` as objects with the methods and
28
+ attributes defined on this class.
29
+ """
30
+
31
+ def __init__(self, df: DataFrame, allow_copy: bool = True) -> None:
32
+ """
33
+ Constructor - an instance of this (private) class is returned from
34
+ `pd.DataFrame.__dataframe__`.
35
+ """
36
+ self._df = df.rename(columns=str, copy=False)
37
+ self._allow_copy = allow_copy
38
+ for i, _col in enumerate(self._df.columns):
39
+ rechunked = maybe_rechunk(self._df.iloc[:, i], allow_copy=allow_copy)
40
+ if rechunked is not None:
41
+ self._df.isetitem(i, rechunked)
42
+
43
+ def __dataframe__(
44
+ self, nan_as_null: bool = False, allow_copy: bool = True
45
+ ) -> PandasDataFrameXchg:
46
+ # `nan_as_null` can be removed here once it's removed from
47
+ # Dataframe.__dataframe__
48
+ return PandasDataFrameXchg(self._df, allow_copy)
49
+
50
+ @property
51
+ def metadata(self) -> dict[str, Index]:
52
+ # `index` isn't a regular column, and the protocol doesn't support row
53
+ # labels - so we export it as Pandas-specific metadata here.
54
+ return {"pandas.index": self._df.index}
55
+
56
+ def num_columns(self) -> int:
57
+ return len(self._df.columns)
58
+
59
+ def num_rows(self) -> int:
60
+ return len(self._df)
61
+
62
+ def num_chunks(self) -> int:
63
+ return 1
64
+
65
+ def column_names(self) -> Index:
66
+ return self._df.columns
67
+
68
+ def get_column(self, i: int) -> PandasColumn:
69
+ return PandasColumn(self._df.iloc[:, i], allow_copy=self._allow_copy)
70
+
71
+ def get_column_by_name(self, name: str) -> PandasColumn:
72
+ return PandasColumn(self._df[name], allow_copy=self._allow_copy)
73
+
74
+ def get_columns(self) -> list[PandasColumn]:
75
+ return [
76
+ PandasColumn(self._df[name], allow_copy=self._allow_copy)
77
+ for name in self._df.columns
78
+ ]
79
+
80
+ def select_columns(self, indices: Sequence[int]) -> PandasDataFrameXchg:
81
+ if not isinstance(indices, abc.Sequence):
82
+ raise ValueError("`indices` is not a sequence")
83
+ if not isinstance(indices, list):
84
+ indices = list(indices)
85
+
86
+ return PandasDataFrameXchg(
87
+ self._df.iloc[:, indices], allow_copy=self._allow_copy
88
+ )
89
+
90
+ def select_columns_by_name(self, names: list[str]) -> PandasDataFrameXchg: # type: ignore[override]
91
+ if not isinstance(names, abc.Sequence):
92
+ raise ValueError("`names` is not a sequence")
93
+ if not isinstance(names, list):
94
+ names = list(names)
95
+
96
+ return PandasDataFrameXchg(self._df.loc[:, names], allow_copy=self._allow_copy)
97
+
98
+ def get_chunks(self, n_chunks: int | None = None) -> Iterable[PandasDataFrameXchg]:
99
+ """
100
+ Return an iterator yielding the chunks.
101
+ """
102
+ if n_chunks and n_chunks > 1:
103
+ size = len(self._df)
104
+ step = size // n_chunks
105
+ if size % n_chunks != 0:
106
+ step += 1
107
+ for start in range(0, step * n_chunks, step):
108
+ yield PandasDataFrameXchg(
109
+ self._df.iloc[start : start + step, :],
110
+ allow_copy=self._allow_copy,
111
+ )
112
+ else:
113
+ yield self
env-llmeval/lib/python3.10/site-packages/pandas/core/interchange/dataframe_protocol.py ADDED
@@ -0,0 +1,465 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ A verbatim copy (vendored) of the spec from https://github.com/data-apis/dataframe-api
3
+ """
4
+
5
+ from __future__ import annotations
6
+
7
+ from abc import (
8
+ ABC,
9
+ abstractmethod,
10
+ )
11
+ import enum
12
+ from typing import (
13
+ TYPE_CHECKING,
14
+ Any,
15
+ TypedDict,
16
+ )
17
+
18
+ if TYPE_CHECKING:
19
+ from collections.abc import (
20
+ Iterable,
21
+ Sequence,
22
+ )
23
+
24
+
25
+ class DlpackDeviceType(enum.IntEnum):
26
+ """Integer enum for device type codes matching DLPack."""
27
+
28
+ CPU = 1
29
+ CUDA = 2
30
+ CPU_PINNED = 3
31
+ OPENCL = 4
32
+ VULKAN = 7
33
+ METAL = 8
34
+ VPI = 9
35
+ ROCM = 10
36
+
37
+
38
+ class DtypeKind(enum.IntEnum):
39
+ """
40
+ Integer enum for data types.
41
+
42
+ Attributes
43
+ ----------
44
+ INT : int
45
+ Matches to signed integer data type.
46
+ UINT : int
47
+ Matches to unsigned integer data type.
48
+ FLOAT : int
49
+ Matches to floating point data type.
50
+ BOOL : int
51
+ Matches to boolean data type.
52
+ STRING : int
53
+ Matches to string data type (UTF-8 encoded).
54
+ DATETIME : int
55
+ Matches to datetime data type.
56
+ CATEGORICAL : int
57
+ Matches to categorical data type.
58
+ """
59
+
60
+ INT = 0
61
+ UINT = 1
62
+ FLOAT = 2
63
+ BOOL = 20
64
+ STRING = 21 # UTF-8
65
+ DATETIME = 22
66
+ CATEGORICAL = 23
67
+
68
+
69
+ class ColumnNullType(enum.IntEnum):
70
+ """
71
+ Integer enum for null type representation.
72
+
73
+ Attributes
74
+ ----------
75
+ NON_NULLABLE : int
76
+ Non-nullable column.
77
+ USE_NAN : int
78
+ Use explicit float NaN value.
79
+ USE_SENTINEL : int
80
+ Sentinel value besides NaN/NaT.
81
+ USE_BITMASK : int
82
+ The bit is set/unset representing a null on a certain position.
83
+ USE_BYTEMASK : int
84
+ The byte is set/unset representing a null on a certain position.
85
+ """
86
+
87
+ NON_NULLABLE = 0
88
+ USE_NAN = 1
89
+ USE_SENTINEL = 2
90
+ USE_BITMASK = 3
91
+ USE_BYTEMASK = 4
92
+
93
+
94
+ class ColumnBuffers(TypedDict):
95
+ # first element is a buffer containing the column data;
96
+ # second element is the data buffer's associated dtype
97
+ data: tuple[Buffer, Any]
98
+
99
+ # first element is a buffer containing mask values indicating missing data;
100
+ # second element is the mask value buffer's associated dtype.
101
+ # None if the null representation is not a bit or byte mask
102
+ validity: tuple[Buffer, Any] | None
103
+
104
+ # first element is a buffer containing the offset values for
105
+ # variable-size binary data (e.g., variable-length strings);
106
+ # second element is the offsets buffer's associated dtype.
107
+ # None if the data buffer does not have an associated offsets buffer
108
+ offsets: tuple[Buffer, Any] | None
109
+
110
+
111
+ class CategoricalDescription(TypedDict):
112
+ # whether the ordering of dictionary indices is semantically meaningful
113
+ is_ordered: bool
114
+ # whether a dictionary-style mapping of categorical values to other objects exists
115
+ is_dictionary: bool
116
+ # Python-level only (e.g. ``{int: str}``).
117
+ # None if not a dictionary-style categorical.
118
+ categories: Column | None
119
+
120
+
121
+ class Buffer(ABC):
122
+ """
123
+ Data in the buffer is guaranteed to be contiguous in memory.
124
+
125
+ Note that there is no dtype attribute present, a buffer can be thought of
126
+ as simply a block of memory. However, if the column that the buffer is
127
+ attached to has a dtype that's supported by DLPack and ``__dlpack__`` is
128
+ implemented, then that dtype information will be contained in the return
129
+ value from ``__dlpack__``.
130
+
131
+ This distinction is useful to support both data exchange via DLPack on a
132
+ buffer and (b) dtypes like variable-length strings which do not have a
133
+ fixed number of bytes per element.
134
+ """
135
+
136
+ @property
137
+ @abstractmethod
138
+ def bufsize(self) -> int:
139
+ """
140
+ Buffer size in bytes.
141
+ """
142
+
143
+ @property
144
+ @abstractmethod
145
+ def ptr(self) -> int:
146
+ """
147
+ Pointer to start of the buffer as an integer.
148
+ """
149
+
150
+ @abstractmethod
151
+ def __dlpack__(self):
152
+ """
153
+ Produce DLPack capsule (see array API standard).
154
+
155
+ Raises:
156
+
157
+ - TypeError : if the buffer contains unsupported dtypes.
158
+ - NotImplementedError : if DLPack support is not implemented
159
+
160
+ Useful to have to connect to array libraries. Support optional because
161
+ it's not completely trivial to implement for a Python-only library.
162
+ """
163
+ raise NotImplementedError("__dlpack__")
164
+
165
+ @abstractmethod
166
+ def __dlpack_device__(self) -> tuple[DlpackDeviceType, int | None]:
167
+ """
168
+ Device type and device ID for where the data in the buffer resides.
169
+ Uses device type codes matching DLPack.
170
+ Note: must be implemented even if ``__dlpack__`` is not.
171
+ """
172
+
173
+
174
+ class Column(ABC):
175
+ """
176
+ A column object, with only the methods and properties required by the
177
+ interchange protocol defined.
178
+
179
+ A column can contain one or more chunks. Each chunk can contain up to three
180
+ buffers - a data buffer, a mask buffer (depending on null representation),
181
+ and an offsets buffer (if variable-size binary; e.g., variable-length
182
+ strings).
183
+
184
+ TBD: Arrow has a separate "null" dtype, and has no separate mask concept.
185
+ Instead, it seems to use "children" for both columns with a bit mask,
186
+ and for nested dtypes. Unclear whether this is elegant or confusing.
187
+ This design requires checking the null representation explicitly.
188
+
189
+ The Arrow design requires checking:
190
+ 1. the ARROW_FLAG_NULLABLE (for sentinel values)
191
+ 2. if a column has two children, combined with one of those children
192
+ having a null dtype.
193
+
194
+ Making the mask concept explicit seems useful. One null dtype would
195
+ not be enough to cover both bit and byte masks, so that would mean
196
+ even more checking if we did it the Arrow way.
197
+
198
+ TBD: there's also the "chunk" concept here, which is implicit in Arrow as
199
+ multiple buffers per array (= column here). Semantically it may make
200
+ sense to have both: chunks were meant for example for lazy evaluation
201
+ of data which doesn't fit in memory, while multiple buffers per column
202
+ could also come from doing a selection operation on a single
203
+ contiguous buffer.
204
+
205
+ Given these concepts, one would expect chunks to be all of the same
206
+ size (say a 10,000 row dataframe could have 10 chunks of 1,000 rows),
207
+ while multiple buffers could have data-dependent lengths. Not an issue
208
+ in pandas if one column is backed by a single NumPy array, but in
209
+ Arrow it seems possible.
210
+ Are multiple chunks *and* multiple buffers per column necessary for
211
+ the purposes of this interchange protocol, or must producers either
212
+ reuse the chunk concept for this or copy the data?
213
+
214
+ Note: this Column object can only be produced by ``__dataframe__``, so
215
+ doesn't need its own version or ``__column__`` protocol.
216
+ """
217
+
218
+ @abstractmethod
219
+ def size(self) -> int:
220
+ """
221
+ Size of the column, in elements.
222
+
223
+ Corresponds to DataFrame.num_rows() if column is a single chunk;
224
+ equal to size of this current chunk otherwise.
225
+ """
226
+
227
+ @property
228
+ @abstractmethod
229
+ def offset(self) -> int:
230
+ """
231
+ Offset of first element.
232
+
233
+ May be > 0 if using chunks; for example for a column with N chunks of
234
+ equal size M (only the last chunk may be shorter),
235
+ ``offset = n * M``, ``n = 0 .. N-1``.
236
+ """
237
+
238
+ @property
239
+ @abstractmethod
240
+ def dtype(self) -> tuple[DtypeKind, int, str, str]:
241
+ """
242
+ Dtype description as a tuple ``(kind, bit-width, format string, endianness)``.
243
+
244
+ Bit-width : the number of bits as an integer
245
+ Format string : data type description format string in Apache Arrow C
246
+ Data Interface format.
247
+ Endianness : current only native endianness (``=``) is supported
248
+
249
+ Notes:
250
+ - Kind specifiers are aligned with DLPack where possible (hence the
251
+ jump to 20, leave enough room for future extension)
252
+ - Masks must be specified as boolean with either bit width 1 (for bit
253
+ masks) or 8 (for byte masks).
254
+ - Dtype width in bits was preferred over bytes
255
+ - Endianness isn't too useful, but included now in case in the future
256
+ we need to support non-native endianness
257
+ - Went with Apache Arrow format strings over NumPy format strings
258
+ because they're more complete from a dataframe perspective
259
+ - Format strings are mostly useful for datetime specification, and
260
+ for categoricals.
261
+ - For categoricals, the format string describes the type of the
262
+ categorical in the data buffer. In case of a separate encoding of
263
+ the categorical (e.g. an integer to string mapping), this can
264
+ be derived from ``self.describe_categorical``.
265
+ - Data types not included: complex, Arrow-style null, binary, decimal,
266
+ and nested (list, struct, map, union) dtypes.
267
+ """
268
+
269
+ @property
270
+ @abstractmethod
271
+ def describe_categorical(self) -> CategoricalDescription:
272
+ """
273
+ If the dtype is categorical, there are two options:
274
+ - There are only values in the data buffer.
275
+ - There is a separate non-categorical Column encoding for categorical values.
276
+
277
+ Raises TypeError if the dtype is not categorical
278
+
279
+ Returns the dictionary with description on how to interpret the data buffer:
280
+ - "is_ordered" : bool, whether the ordering of dictionary indices is
281
+ semantically meaningful.
282
+ - "is_dictionary" : bool, whether a mapping of
283
+ categorical values to other objects exists
284
+ - "categories" : Column representing the (implicit) mapping of indices to
285
+ category values (e.g. an array of cat1, cat2, ...).
286
+ None if not a dictionary-style categorical.
287
+
288
+ TBD: are there any other in-memory representations that are needed?
289
+ """
290
+
291
+ @property
292
+ @abstractmethod
293
+ def describe_null(self) -> tuple[ColumnNullType, Any]:
294
+ """
295
+ Return the missing value (or "null") representation the column dtype
296
+ uses, as a tuple ``(kind, value)``.
297
+
298
+ Value : if kind is "sentinel value", the actual value. If kind is a bit
299
+ mask or a byte mask, the value (0 or 1) indicating a missing value. None
300
+ otherwise.
301
+ """
302
+
303
+ @property
304
+ @abstractmethod
305
+ def null_count(self) -> int | None:
306
+ """
307
+ Number of null elements, if known.
308
+
309
+ Note: Arrow uses -1 to indicate "unknown", but None seems cleaner.
310
+ """
311
+
312
+ @property
313
+ @abstractmethod
314
+ def metadata(self) -> dict[str, Any]:
315
+ """
316
+ The metadata for the column. See `DataFrame.metadata` for more details.
317
+ """
318
+
319
+ @abstractmethod
320
+ def num_chunks(self) -> int:
321
+ """
322
+ Return the number of chunks the column consists of.
323
+ """
324
+
325
+ @abstractmethod
326
+ def get_chunks(self, n_chunks: int | None = None) -> Iterable[Column]:
327
+ """
328
+ Return an iterator yielding the chunks.
329
+
330
+ See `DataFrame.get_chunks` for details on ``n_chunks``.
331
+ """
332
+
333
+ @abstractmethod
334
+ def get_buffers(self) -> ColumnBuffers:
335
+ """
336
+ Return a dictionary containing the underlying buffers.
337
+
338
+ The returned dictionary has the following contents:
339
+
340
+ - "data": a two-element tuple whose first element is a buffer
341
+ containing the data and whose second element is the data
342
+ buffer's associated dtype.
343
+ - "validity": a two-element tuple whose first element is a buffer
344
+ containing mask values indicating missing data and
345
+ whose second element is the mask value buffer's
346
+ associated dtype. None if the null representation is
347
+ not a bit or byte mask.
348
+ - "offsets": a two-element tuple whose first element is a buffer
349
+ containing the offset values for variable-size binary
350
+ data (e.g., variable-length strings) and whose second
351
+ element is the offsets buffer's associated dtype. None
352
+ if the data buffer does not have an associated offsets
353
+ buffer.
354
+ """
355
+
356
+
357
+ # def get_children(self) -> Iterable[Column]:
358
+ # """
359
+ # Children columns underneath the column, each object in this iterator
360
+ # must adhere to the column specification.
361
+ # """
362
+ # pass
363
+
364
+
365
+ class DataFrame(ABC):
366
+ """
367
+ A data frame class, with only the methods required by the interchange
368
+ protocol defined.
369
+
370
+ A "data frame" represents an ordered collection of named columns.
371
+ A column's "name" must be a unique string.
372
+ Columns may be accessed by name or by position.
373
+
374
+ This could be a public data frame class, or an object with the methods and
375
+ attributes defined on this DataFrame class could be returned from the
376
+ ``__dataframe__`` method of a public data frame class in a library adhering
377
+ to the dataframe interchange protocol specification.
378
+ """
379
+
380
+ version = 0 # version of the protocol
381
+
382
+ @abstractmethod
383
+ def __dataframe__(self, nan_as_null: bool = False, allow_copy: bool = True):
384
+ """Construct a new interchange object, potentially changing the parameters."""
385
+
386
+ @property
387
+ @abstractmethod
388
+ def metadata(self) -> dict[str, Any]:
389
+ """
390
+ The metadata for the data frame, as a dictionary with string keys. The
391
+ contents of `metadata` may be anything, they are meant for a library
392
+ to store information that it needs to, e.g., roundtrip losslessly or
393
+ for two implementations to share data that is not (yet) part of the
394
+ interchange protocol specification. For avoiding collisions with other
395
+ entries, please add name the keys with the name of the library
396
+ followed by a period and the desired name, e.g, ``pandas.indexcol``.
397
+ """
398
+
399
+ @abstractmethod
400
+ def num_columns(self) -> int:
401
+ """
402
+ Return the number of columns in the DataFrame.
403
+ """
404
+
405
+ @abstractmethod
406
+ def num_rows(self) -> int | None:
407
+ # TODO: not happy with Optional, but need to flag it may be expensive
408
+ # why include it if it may be None - what do we expect consumers
409
+ # to do here?
410
+ """
411
+ Return the number of rows in the DataFrame, if available.
412
+ """
413
+
414
+ @abstractmethod
415
+ def num_chunks(self) -> int:
416
+ """
417
+ Return the number of chunks the DataFrame consists of.
418
+ """
419
+
420
+ @abstractmethod
421
+ def column_names(self) -> Iterable[str]:
422
+ """
423
+ Return an iterator yielding the column names.
424
+ """
425
+
426
+ @abstractmethod
427
+ def get_column(self, i: int) -> Column:
428
+ """
429
+ Return the column at the indicated position.
430
+ """
431
+
432
+ @abstractmethod
433
+ def get_column_by_name(self, name: str) -> Column:
434
+ """
435
+ Return the column whose name is the indicated name.
436
+ """
437
+
438
+ @abstractmethod
439
+ def get_columns(self) -> Iterable[Column]:
440
+ """
441
+ Return an iterator yielding the columns.
442
+ """
443
+
444
+ @abstractmethod
445
+ def select_columns(self, indices: Sequence[int]) -> DataFrame:
446
+ """
447
+ Create a new DataFrame by selecting a subset of columns by index.
448
+ """
449
+
450
+ @abstractmethod
451
+ def select_columns_by_name(self, names: Sequence[str]) -> DataFrame:
452
+ """
453
+ Create a new DataFrame by selecting a subset of columns by name.
454
+ """
455
+
456
+ @abstractmethod
457
+ def get_chunks(self, n_chunks: int | None = None) -> Iterable[DataFrame]:
458
+ """
459
+ Return an iterator yielding the chunks.
460
+
461
+ By default (None), yields the chunks that the data is stored as by the
462
+ producer. If given, ``n_chunks`` must be a multiple of
463
+ ``self.num_chunks()``, meaning the producer must subdivide each chunk
464
+ before yielding it.
465
+ """
env-llmeval/lib/python3.10/site-packages/pandas/core/interchange/from_dataframe.py ADDED
@@ -0,0 +1,526 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import ctypes
4
+ import re
5
+ from typing import Any
6
+
7
+ import numpy as np
8
+
9
+ from pandas.compat._optional import import_optional_dependency
10
+ from pandas.errors import SettingWithCopyError
11
+
12
+ import pandas as pd
13
+ from pandas.core.interchange.dataframe_protocol import (
14
+ Buffer,
15
+ Column,
16
+ ColumnNullType,
17
+ DataFrame as DataFrameXchg,
18
+ DtypeKind,
19
+ )
20
+ from pandas.core.interchange.utils import (
21
+ ArrowCTypes,
22
+ Endianness,
23
+ )
24
+
25
+ _NP_DTYPES: dict[DtypeKind, dict[int, Any]] = {
26
+ DtypeKind.INT: {8: np.int8, 16: np.int16, 32: np.int32, 64: np.int64},
27
+ DtypeKind.UINT: {8: np.uint8, 16: np.uint16, 32: np.uint32, 64: np.uint64},
28
+ DtypeKind.FLOAT: {32: np.float32, 64: np.float64},
29
+ DtypeKind.BOOL: {1: bool, 8: bool},
30
+ }
31
+
32
+
33
+ def from_dataframe(df, allow_copy: bool = True) -> pd.DataFrame:
34
+ """
35
+ Build a ``pd.DataFrame`` from any DataFrame supporting the interchange protocol.
36
+
37
+ Parameters
38
+ ----------
39
+ df : DataFrameXchg
40
+ Object supporting the interchange protocol, i.e. `__dataframe__` method.
41
+ allow_copy : bool, default: True
42
+ Whether to allow copying the memory to perform the conversion
43
+ (if false then zero-copy approach is requested).
44
+
45
+ Returns
46
+ -------
47
+ pd.DataFrame
48
+
49
+ Examples
50
+ --------
51
+ >>> df_not_necessarily_pandas = pd.DataFrame({'A': [1, 2], 'B': [3, 4]})
52
+ >>> interchange_object = df_not_necessarily_pandas.__dataframe__()
53
+ >>> interchange_object.column_names()
54
+ Index(['A', 'B'], dtype='object')
55
+ >>> df_pandas = (pd.api.interchange.from_dataframe
56
+ ... (interchange_object.select_columns_by_name(['A'])))
57
+ >>> df_pandas
58
+ A
59
+ 0 1
60
+ 1 2
61
+
62
+ These methods (``column_names``, ``select_columns_by_name``) should work
63
+ for any dataframe library which implements the interchange protocol.
64
+ """
65
+ if isinstance(df, pd.DataFrame):
66
+ return df
67
+
68
+ if not hasattr(df, "__dataframe__"):
69
+ raise ValueError("`df` does not support __dataframe__")
70
+
71
+ return _from_dataframe(
72
+ df.__dataframe__(allow_copy=allow_copy), allow_copy=allow_copy
73
+ )
74
+
75
+
76
+ def _from_dataframe(df: DataFrameXchg, allow_copy: bool = True):
77
+ """
78
+ Build a ``pd.DataFrame`` from the DataFrame interchange object.
79
+
80
+ Parameters
81
+ ----------
82
+ df : DataFrameXchg
83
+ Object supporting the interchange protocol, i.e. `__dataframe__` method.
84
+ allow_copy : bool, default: True
85
+ Whether to allow copying the memory to perform the conversion
86
+ (if false then zero-copy approach is requested).
87
+
88
+ Returns
89
+ -------
90
+ pd.DataFrame
91
+ """
92
+ pandas_dfs = []
93
+ for chunk in df.get_chunks():
94
+ pandas_df = protocol_df_chunk_to_pandas(chunk)
95
+ pandas_dfs.append(pandas_df)
96
+
97
+ if not allow_copy and len(pandas_dfs) > 1:
98
+ raise RuntimeError(
99
+ "To join chunks a copy is required which is forbidden by allow_copy=False"
100
+ )
101
+ if not pandas_dfs:
102
+ pandas_df = protocol_df_chunk_to_pandas(df)
103
+ elif len(pandas_dfs) == 1:
104
+ pandas_df = pandas_dfs[0]
105
+ else:
106
+ pandas_df = pd.concat(pandas_dfs, axis=0, ignore_index=True, copy=False)
107
+
108
+ index_obj = df.metadata.get("pandas.index", None)
109
+ if index_obj is not None:
110
+ pandas_df.index = index_obj
111
+
112
+ return pandas_df
113
+
114
+
115
+ def protocol_df_chunk_to_pandas(df: DataFrameXchg) -> pd.DataFrame:
116
+ """
117
+ Convert interchange protocol chunk to ``pd.DataFrame``.
118
+
119
+ Parameters
120
+ ----------
121
+ df : DataFrameXchg
122
+
123
+ Returns
124
+ -------
125
+ pd.DataFrame
126
+ """
127
+ # We need a dict of columns here, with each column being a NumPy array (at
128
+ # least for now, deal with non-NumPy dtypes later).
129
+ columns: dict[str, Any] = {}
130
+ buffers = [] # hold on to buffers, keeps memory alive
131
+ for name in df.column_names():
132
+ if not isinstance(name, str):
133
+ raise ValueError(f"Column {name} is not a string")
134
+ if name in columns:
135
+ raise ValueError(f"Column {name} is not unique")
136
+ col = df.get_column_by_name(name)
137
+ dtype = col.dtype[0]
138
+ if dtype in (
139
+ DtypeKind.INT,
140
+ DtypeKind.UINT,
141
+ DtypeKind.FLOAT,
142
+ DtypeKind.BOOL,
143
+ ):
144
+ columns[name], buf = primitive_column_to_ndarray(col)
145
+ elif dtype == DtypeKind.CATEGORICAL:
146
+ columns[name], buf = categorical_column_to_series(col)
147
+ elif dtype == DtypeKind.STRING:
148
+ columns[name], buf = string_column_to_ndarray(col)
149
+ elif dtype == DtypeKind.DATETIME:
150
+ columns[name], buf = datetime_column_to_ndarray(col)
151
+ else:
152
+ raise NotImplementedError(f"Data type {dtype} not handled yet")
153
+
154
+ buffers.append(buf)
155
+
156
+ pandas_df = pd.DataFrame(columns)
157
+ pandas_df.attrs["_INTERCHANGE_PROTOCOL_BUFFERS"] = buffers
158
+ return pandas_df
159
+
160
+
161
+ def primitive_column_to_ndarray(col: Column) -> tuple[np.ndarray, Any]:
162
+ """
163
+ Convert a column holding one of the primitive dtypes to a NumPy array.
164
+
165
+ A primitive type is one of: int, uint, float, bool.
166
+
167
+ Parameters
168
+ ----------
169
+ col : Column
170
+
171
+ Returns
172
+ -------
173
+ tuple
174
+ Tuple of np.ndarray holding the data and the memory owner object
175
+ that keeps the memory alive.
176
+ """
177
+ buffers = col.get_buffers()
178
+
179
+ data_buff, data_dtype = buffers["data"]
180
+ data = buffer_to_ndarray(
181
+ data_buff, data_dtype, offset=col.offset, length=col.size()
182
+ )
183
+
184
+ data = set_nulls(data, col, buffers["validity"])
185
+ return data, buffers
186
+
187
+
188
+ def categorical_column_to_series(col: Column) -> tuple[pd.Series, Any]:
189
+ """
190
+ Convert a column holding categorical data to a pandas Series.
191
+
192
+ Parameters
193
+ ----------
194
+ col : Column
195
+
196
+ Returns
197
+ -------
198
+ tuple
199
+ Tuple of pd.Series holding the data and the memory owner object
200
+ that keeps the memory alive.
201
+ """
202
+ categorical = col.describe_categorical
203
+
204
+ if not categorical["is_dictionary"]:
205
+ raise NotImplementedError("Non-dictionary categoricals not supported yet")
206
+
207
+ cat_column = categorical["categories"]
208
+ if hasattr(cat_column, "_col"):
209
+ # Item "Column" of "Optional[Column]" has no attribute "_col"
210
+ # Item "None" of "Optional[Column]" has no attribute "_col"
211
+ categories = np.array(cat_column._col) # type: ignore[union-attr]
212
+ else:
213
+ raise NotImplementedError(
214
+ "Interchanging categorical columns isn't supported yet, and our "
215
+ "fallback of using the `col._col` attribute (a ndarray) failed."
216
+ )
217
+ buffers = col.get_buffers()
218
+
219
+ codes_buff, codes_dtype = buffers["data"]
220
+ codes = buffer_to_ndarray(
221
+ codes_buff, codes_dtype, offset=col.offset, length=col.size()
222
+ )
223
+
224
+ # Doing module in order to not get ``IndexError`` for
225
+ # out-of-bounds sentinel values in `codes`
226
+ if len(categories) > 0:
227
+ values = categories[codes % len(categories)]
228
+ else:
229
+ values = codes
230
+
231
+ cat = pd.Categorical(
232
+ values, categories=categories, ordered=categorical["is_ordered"]
233
+ )
234
+ data = pd.Series(cat)
235
+
236
+ data = set_nulls(data, col, buffers["validity"])
237
+ return data, buffers
238
+
239
+
240
+ def string_column_to_ndarray(col: Column) -> tuple[np.ndarray, Any]:
241
+ """
242
+ Convert a column holding string data to a NumPy array.
243
+
244
+ Parameters
245
+ ----------
246
+ col : Column
247
+
248
+ Returns
249
+ -------
250
+ tuple
251
+ Tuple of np.ndarray holding the data and the memory owner object
252
+ that keeps the memory alive.
253
+ """
254
+ null_kind, sentinel_val = col.describe_null
255
+
256
+ if null_kind not in (
257
+ ColumnNullType.NON_NULLABLE,
258
+ ColumnNullType.USE_BITMASK,
259
+ ColumnNullType.USE_BYTEMASK,
260
+ ):
261
+ raise NotImplementedError(
262
+ f"{null_kind} null kind is not yet supported for string columns."
263
+ )
264
+
265
+ buffers = col.get_buffers()
266
+
267
+ assert buffers["offsets"], "String buffers must contain offsets"
268
+ # Retrieve the data buffer containing the UTF-8 code units
269
+ data_buff, _ = buffers["data"]
270
+ # We're going to reinterpret the buffer as uint8, so make sure we can do it safely
271
+ assert col.dtype[2] in (
272
+ ArrowCTypes.STRING,
273
+ ArrowCTypes.LARGE_STRING,
274
+ ) # format_str == utf-8
275
+ # Convert the buffers to NumPy arrays. In order to go from STRING to
276
+ # an equivalent ndarray, we claim that the buffer is uint8 (i.e., a byte array)
277
+ data_dtype = (
278
+ DtypeKind.UINT,
279
+ 8,
280
+ ArrowCTypes.UINT8,
281
+ Endianness.NATIVE,
282
+ )
283
+ # Specify zero offset as we don't want to chunk the string data
284
+ data = buffer_to_ndarray(data_buff, data_dtype, offset=0, length=data_buff.bufsize)
285
+
286
+ # Retrieve the offsets buffer containing the index offsets demarcating
287
+ # the beginning and the ending of each string
288
+ offset_buff, offset_dtype = buffers["offsets"]
289
+ # Offsets buffer contains start-stop positions of strings in the data buffer,
290
+ # meaning that it has more elements than in the data buffer, do `col.size() + 1`
291
+ # here to pass a proper offsets buffer size
292
+ offsets = buffer_to_ndarray(
293
+ offset_buff, offset_dtype, offset=col.offset, length=col.size() + 1
294
+ )
295
+
296
+ null_pos = None
297
+ if null_kind in (ColumnNullType.USE_BITMASK, ColumnNullType.USE_BYTEMASK):
298
+ validity = buffers["validity"]
299
+ if validity is not None:
300
+ valid_buff, valid_dtype = validity
301
+ null_pos = buffer_to_ndarray(
302
+ valid_buff, valid_dtype, offset=col.offset, length=col.size()
303
+ )
304
+ if sentinel_val == 0:
305
+ null_pos = ~null_pos
306
+
307
+ # Assemble the strings from the code units
308
+ str_list: list[None | float | str] = [None] * col.size()
309
+ for i in range(col.size()):
310
+ # Check for missing values
311
+ if null_pos is not None and null_pos[i]:
312
+ str_list[i] = np.nan
313
+ continue
314
+
315
+ # Extract a range of code units
316
+ units = data[offsets[i] : offsets[i + 1]]
317
+
318
+ # Convert the list of code units to bytes
319
+ str_bytes = bytes(units)
320
+
321
+ # Create the string
322
+ string = str_bytes.decode(encoding="utf-8")
323
+
324
+ # Add to our list of strings
325
+ str_list[i] = string
326
+
327
+ # Convert the string list to a NumPy array
328
+ return np.asarray(str_list, dtype="object"), buffers
329
+
330
+
331
+ def parse_datetime_format_str(format_str, data) -> pd.Series | np.ndarray:
332
+ """Parse datetime `format_str` to interpret the `data`."""
333
+ # timestamp 'ts{unit}:tz'
334
+ timestamp_meta = re.match(r"ts([smun]):(.*)", format_str)
335
+ if timestamp_meta:
336
+ unit, tz = timestamp_meta.group(1), timestamp_meta.group(2)
337
+ if unit != "s":
338
+ # the format string describes only a first letter of the unit, so
339
+ # add one extra letter to convert the unit to numpy-style:
340
+ # 'm' -> 'ms', 'u' -> 'us', 'n' -> 'ns'
341
+ unit += "s"
342
+ data = data.astype(f"datetime64[{unit}]")
343
+ if tz != "":
344
+ data = pd.Series(data).dt.tz_localize("UTC").dt.tz_convert(tz)
345
+ return data
346
+
347
+ # date 'td{Days/Ms}'
348
+ date_meta = re.match(r"td([Dm])", format_str)
349
+ if date_meta:
350
+ unit = date_meta.group(1)
351
+ if unit == "D":
352
+ # NumPy doesn't support DAY unit, so converting days to seconds
353
+ # (converting to uint64 to avoid overflow)
354
+ data = (data.astype(np.uint64) * (24 * 60 * 60)).astype("datetime64[s]")
355
+ elif unit == "m":
356
+ data = data.astype("datetime64[ms]")
357
+ else:
358
+ raise NotImplementedError(f"Date unit is not supported: {unit}")
359
+ return data
360
+
361
+ raise NotImplementedError(f"DateTime kind is not supported: {format_str}")
362
+
363
+
364
+ def datetime_column_to_ndarray(col: Column) -> tuple[np.ndarray | pd.Series, Any]:
365
+ """
366
+ Convert a column holding DateTime data to a NumPy array.
367
+
368
+ Parameters
369
+ ----------
370
+ col : Column
371
+
372
+ Returns
373
+ -------
374
+ tuple
375
+ Tuple of np.ndarray holding the data and the memory owner object
376
+ that keeps the memory alive.
377
+ """
378
+ buffers = col.get_buffers()
379
+
380
+ _, col_bit_width, format_str, _ = col.dtype
381
+ dbuf, _ = buffers["data"]
382
+ # Consider dtype being `uint` to get number of units passed since the 01.01.1970
383
+
384
+ data = buffer_to_ndarray(
385
+ dbuf,
386
+ (
387
+ DtypeKind.INT,
388
+ col_bit_width,
389
+ getattr(ArrowCTypes, f"INT{col_bit_width}"),
390
+ Endianness.NATIVE,
391
+ ),
392
+ offset=col.offset,
393
+ length=col.size(),
394
+ )
395
+
396
+ data = parse_datetime_format_str(format_str, data) # type: ignore[assignment]
397
+ data = set_nulls(data, col, buffers["validity"])
398
+ return data, buffers
399
+
400
+
401
+ def buffer_to_ndarray(
402
+ buffer: Buffer,
403
+ dtype: tuple[DtypeKind, int, str, str],
404
+ *,
405
+ length: int,
406
+ offset: int = 0,
407
+ ) -> np.ndarray:
408
+ """
409
+ Build a NumPy array from the passed buffer.
410
+
411
+ Parameters
412
+ ----------
413
+ buffer : Buffer
414
+ Buffer to build a NumPy array from.
415
+ dtype : tuple
416
+ Data type of the buffer conforming protocol dtypes format.
417
+ offset : int, default: 0
418
+ Number of elements to offset from the start of the buffer.
419
+ length : int, optional
420
+ If the buffer is a bit-mask, specifies a number of bits to read
421
+ from the buffer. Has no effect otherwise.
422
+
423
+ Returns
424
+ -------
425
+ np.ndarray
426
+
427
+ Notes
428
+ -----
429
+ The returned array doesn't own the memory. The caller of this function is
430
+ responsible for keeping the memory owner object alive as long as
431
+ the returned NumPy array is being used.
432
+ """
433
+ kind, bit_width, _, _ = dtype
434
+
435
+ column_dtype = _NP_DTYPES.get(kind, {}).get(bit_width, None)
436
+ if column_dtype is None:
437
+ raise NotImplementedError(f"Conversion for {dtype} is not yet supported.")
438
+
439
+ # TODO: No DLPack yet, so need to construct a new ndarray from the data pointer
440
+ # and size in the buffer plus the dtype on the column. Use DLPack as NumPy supports
441
+ # it since https://github.com/numpy/numpy/pull/19083
442
+ ctypes_type = np.ctypeslib.as_ctypes_type(column_dtype)
443
+
444
+ if bit_width == 1:
445
+ assert length is not None, "`length` must be specified for a bit-mask buffer."
446
+ pa = import_optional_dependency("pyarrow")
447
+ arr = pa.BooleanArray.from_buffers(
448
+ pa.bool_(),
449
+ length,
450
+ [None, pa.foreign_buffer(buffer.ptr, length)],
451
+ offset=offset,
452
+ )
453
+ return np.asarray(arr)
454
+ else:
455
+ data_pointer = ctypes.cast(
456
+ buffer.ptr + (offset * bit_width // 8), ctypes.POINTER(ctypes_type)
457
+ )
458
+ if length > 0:
459
+ return np.ctypeslib.as_array(data_pointer, shape=(length,))
460
+ return np.array([], dtype=ctypes_type)
461
+
462
+
463
+ def set_nulls(
464
+ data: np.ndarray | pd.Series,
465
+ col: Column,
466
+ validity: tuple[Buffer, tuple[DtypeKind, int, str, str]] | None,
467
+ allow_modify_inplace: bool = True,
468
+ ):
469
+ """
470
+ Set null values for the data according to the column null kind.
471
+
472
+ Parameters
473
+ ----------
474
+ data : np.ndarray or pd.Series
475
+ Data to set nulls in.
476
+ col : Column
477
+ Column object that describes the `data`.
478
+ validity : tuple(Buffer, dtype) or None
479
+ The return value of ``col.buffers()``. We do not access the ``col.buffers()``
480
+ here to not take the ownership of the memory of buffer objects.
481
+ allow_modify_inplace : bool, default: True
482
+ Whether to modify the `data` inplace when zero-copy is possible (True) or always
483
+ modify a copy of the `data` (False).
484
+
485
+ Returns
486
+ -------
487
+ np.ndarray or pd.Series
488
+ Data with the nulls being set.
489
+ """
490
+ if validity is None:
491
+ return data
492
+ null_kind, sentinel_val = col.describe_null
493
+ null_pos = None
494
+
495
+ if null_kind == ColumnNullType.USE_SENTINEL:
496
+ null_pos = pd.Series(data) == sentinel_val
497
+ elif null_kind in (ColumnNullType.USE_BITMASK, ColumnNullType.USE_BYTEMASK):
498
+ assert validity, "Expected to have a validity buffer for the mask"
499
+ valid_buff, valid_dtype = validity
500
+ null_pos = buffer_to_ndarray(
501
+ valid_buff, valid_dtype, offset=col.offset, length=col.size()
502
+ )
503
+ if sentinel_val == 0:
504
+ null_pos = ~null_pos
505
+ elif null_kind in (ColumnNullType.NON_NULLABLE, ColumnNullType.USE_NAN):
506
+ pass
507
+ else:
508
+ raise NotImplementedError(f"Null kind {null_kind} is not yet supported.")
509
+
510
+ if null_pos is not None and np.any(null_pos):
511
+ if not allow_modify_inplace:
512
+ data = data.copy()
513
+ try:
514
+ data[null_pos] = None
515
+ except TypeError:
516
+ # TypeError happens if the `data` dtype appears to be non-nullable
517
+ # in numpy notation (bool, int, uint). If this happens,
518
+ # cast the `data` to nullable float dtype.
519
+ data = data.astype(float)
520
+ data[null_pos] = None
521
+ except SettingWithCopyError:
522
+ # `SettingWithCopyError` may happen for datetime-like with missing values.
523
+ data = data.copy()
524
+ data[null_pos] = None
525
+
526
+ return data
env-llmeval/lib/python3.10/site-packages/pandas/core/interchange/utils.py ADDED
@@ -0,0 +1,178 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Utility functions and objects for implementing the interchange API.
3
+ """
4
+
5
+ from __future__ import annotations
6
+
7
+ import typing
8
+
9
+ import numpy as np
10
+
11
+ from pandas._libs import lib
12
+
13
+ from pandas.core.dtypes.dtypes import (
14
+ ArrowDtype,
15
+ CategoricalDtype,
16
+ DatetimeTZDtype,
17
+ )
18
+
19
+ import pandas as pd
20
+
21
+ if typing.TYPE_CHECKING:
22
+ from pandas._typing import DtypeObj
23
+
24
+
25
+ # Maps str(pyarrow.DataType) = C type format string
26
+ # Currently, no pyarrow API for this
27
+ PYARROW_CTYPES = {
28
+ "null": "n",
29
+ "bool": "b",
30
+ "uint8": "C",
31
+ "uint16": "S",
32
+ "uint32": "I",
33
+ "uint64": "L",
34
+ "int8": "c",
35
+ "int16": "S",
36
+ "int32": "i",
37
+ "int64": "l",
38
+ "halffloat": "e", # float16
39
+ "float": "f", # float32
40
+ "double": "g", # float64
41
+ "string": "u",
42
+ "large_string": "U",
43
+ "binary": "z",
44
+ "time32[s]": "tts",
45
+ "time32[ms]": "ttm",
46
+ "time64[us]": "ttu",
47
+ "time64[ns]": "ttn",
48
+ "date32[day]": "tdD",
49
+ "date64[ms]": "tdm",
50
+ "timestamp[s]": "tss:",
51
+ "timestamp[ms]": "tsm:",
52
+ "timestamp[us]": "tsu:",
53
+ "timestamp[ns]": "tsn:",
54
+ "duration[s]": "tDs",
55
+ "duration[ms]": "tDm",
56
+ "duration[us]": "tDu",
57
+ "duration[ns]": "tDn",
58
+ }
59
+
60
+
61
+ class ArrowCTypes:
62
+ """
63
+ Enum for Apache Arrow C type format strings.
64
+
65
+ The Arrow C data interface:
66
+ https://arrow.apache.org/docs/format/CDataInterface.html#data-type-description-format-strings
67
+ """
68
+
69
+ NULL = "n"
70
+ BOOL = "b"
71
+ INT8 = "c"
72
+ UINT8 = "C"
73
+ INT16 = "s"
74
+ UINT16 = "S"
75
+ INT32 = "i"
76
+ UINT32 = "I"
77
+ INT64 = "l"
78
+ UINT64 = "L"
79
+ FLOAT16 = "e"
80
+ FLOAT32 = "f"
81
+ FLOAT64 = "g"
82
+ STRING = "u" # utf-8
83
+ LARGE_STRING = "U" # utf-8
84
+ DATE32 = "tdD"
85
+ DATE64 = "tdm"
86
+ # Resoulution:
87
+ # - seconds -> 's'
88
+ # - milliseconds -> 'm'
89
+ # - microseconds -> 'u'
90
+ # - nanoseconds -> 'n'
91
+ TIMESTAMP = "ts{resolution}:{tz}"
92
+ TIME = "tt{resolution}"
93
+
94
+
95
+ class Endianness:
96
+ """Enum indicating the byte-order of a data-type."""
97
+
98
+ LITTLE = "<"
99
+ BIG = ">"
100
+ NATIVE = "="
101
+ NA = "|"
102
+
103
+
104
+ def dtype_to_arrow_c_fmt(dtype: DtypeObj) -> str:
105
+ """
106
+ Represent pandas `dtype` as a format string in Apache Arrow C notation.
107
+
108
+ Parameters
109
+ ----------
110
+ dtype : np.dtype
111
+ Datatype of pandas DataFrame to represent.
112
+
113
+ Returns
114
+ -------
115
+ str
116
+ Format string in Apache Arrow C notation of the given `dtype`.
117
+ """
118
+ if isinstance(dtype, CategoricalDtype):
119
+ return ArrowCTypes.INT64
120
+ elif dtype == np.dtype("O"):
121
+ return ArrowCTypes.STRING
122
+ elif isinstance(dtype, ArrowDtype):
123
+ import pyarrow as pa
124
+
125
+ pa_type = dtype.pyarrow_dtype
126
+ if pa.types.is_decimal(pa_type):
127
+ return f"d:{pa_type.precision},{pa_type.scale}"
128
+ elif pa.types.is_timestamp(pa_type) and pa_type.tz is not None:
129
+ return f"ts{pa_type.unit[0]}:{pa_type.tz}"
130
+ format_str = PYARROW_CTYPES.get(str(pa_type), None)
131
+ if format_str is not None:
132
+ return format_str
133
+
134
+ format_str = getattr(ArrowCTypes, dtype.name.upper(), None)
135
+ if format_str is not None:
136
+ return format_str
137
+
138
+ if lib.is_np_dtype(dtype, "M"):
139
+ # Selecting the first char of resolution string:
140
+ # dtype.str -> '<M8[ns]' -> 'n'
141
+ resolution = np.datetime_data(dtype)[0][0]
142
+ return ArrowCTypes.TIMESTAMP.format(resolution=resolution, tz="")
143
+
144
+ elif isinstance(dtype, DatetimeTZDtype):
145
+ return ArrowCTypes.TIMESTAMP.format(resolution=dtype.unit[0], tz=dtype.tz)
146
+
147
+ elif isinstance(dtype, pd.BooleanDtype):
148
+ return ArrowCTypes.BOOL
149
+
150
+ raise NotImplementedError(
151
+ f"Conversion of {dtype} to Arrow C format string is not implemented."
152
+ )
153
+
154
+
155
+ def maybe_rechunk(series: pd.Series, *, allow_copy: bool) -> pd.Series | None:
156
+ """
157
+ Rechunk a multi-chunk pyarrow array into a single-chunk array, if necessary.
158
+
159
+ - Returns `None` if the input series is not backed by a multi-chunk pyarrow array
160
+ (and so doesn't need rechunking)
161
+ - Returns a single-chunk-backed-Series if the input is backed by a multi-chunk
162
+ pyarrow array and `allow_copy` is `True`.
163
+ - Raises a `RuntimeError` if `allow_copy` is `False` and input is a
164
+ based by a multi-chunk pyarrow array.
165
+ """
166
+ if not isinstance(series.dtype, pd.ArrowDtype):
167
+ return None
168
+ chunked_array = series.array._pa_array # type: ignore[attr-defined]
169
+ if len(chunked_array.chunks) == 1:
170
+ return None
171
+ if not allow_copy:
172
+ raise RuntimeError(
173
+ "Found multi-chunk pyarrow array, but `allow_copy` is False. "
174
+ "Please rechunk the array before calling this function, or set "
175
+ "`allow_copy=True`."
176
+ )
177
+ arr = chunked_array.combine_chunks()
178
+ return pd.Series(arr, dtype=series.dtype, name=series.name, index=series.index)
env-llmeval/lib/python3.10/site-packages/pandas/core/ops/__init__.py ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Arithmetic operations for PandasObjects
3
+
4
+ This is not a public API.
5
+ """
6
+ from __future__ import annotations
7
+
8
+ from pandas.core.ops.array_ops import (
9
+ arithmetic_op,
10
+ comp_method_OBJECT_ARRAY,
11
+ comparison_op,
12
+ fill_binop,
13
+ get_array_op,
14
+ logical_op,
15
+ maybe_prepare_scalar_for_op,
16
+ )
17
+ from pandas.core.ops.common import (
18
+ get_op_result_name,
19
+ unpack_zerodim_and_defer,
20
+ )
21
+ from pandas.core.ops.docstrings import make_flex_doc
22
+ from pandas.core.ops.invalid import invalid_comparison
23
+ from pandas.core.ops.mask_ops import (
24
+ kleene_and,
25
+ kleene_or,
26
+ kleene_xor,
27
+ )
28
+ from pandas.core.roperator import (
29
+ radd,
30
+ rand_,
31
+ rdiv,
32
+ rdivmod,
33
+ rfloordiv,
34
+ rmod,
35
+ rmul,
36
+ ror_,
37
+ rpow,
38
+ rsub,
39
+ rtruediv,
40
+ rxor,
41
+ )
42
+
43
+ # -----------------------------------------------------------------------------
44
+ # constants
45
+ ARITHMETIC_BINOPS: set[str] = {
46
+ "add",
47
+ "sub",
48
+ "mul",
49
+ "pow",
50
+ "mod",
51
+ "floordiv",
52
+ "truediv",
53
+ "divmod",
54
+ "radd",
55
+ "rsub",
56
+ "rmul",
57
+ "rpow",
58
+ "rmod",
59
+ "rfloordiv",
60
+ "rtruediv",
61
+ "rdivmod",
62
+ }
63
+
64
+
65
+ __all__ = [
66
+ "ARITHMETIC_BINOPS",
67
+ "arithmetic_op",
68
+ "comparison_op",
69
+ "comp_method_OBJECT_ARRAY",
70
+ "invalid_comparison",
71
+ "fill_binop",
72
+ "kleene_and",
73
+ "kleene_or",
74
+ "kleene_xor",
75
+ "logical_op",
76
+ "make_flex_doc",
77
+ "radd",
78
+ "rand_",
79
+ "rdiv",
80
+ "rdivmod",
81
+ "rfloordiv",
82
+ "rmod",
83
+ "rmul",
84
+ "ror_",
85
+ "rpow",
86
+ "rsub",
87
+ "rtruediv",
88
+ "rxor",
89
+ "unpack_zerodim_and_defer",
90
+ "get_op_result_name",
91
+ "maybe_prepare_scalar_for_op",
92
+ "get_array_op",
93
+ ]
env-llmeval/lib/python3.10/site-packages/pandas/core/ops/__pycache__/array_ops.cpython-310.pyc ADDED
Binary file (11.5 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/ops/__pycache__/common.cpython-310.pyc ADDED
Binary file (3.4 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/ops/__pycache__/docstrings.cpython-310.pyc ADDED
Binary file (15.2 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/ops/__pycache__/invalid.cpython-310.pyc ADDED
Binary file (1.81 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/ops/__pycache__/mask_ops.cpython-310.pyc ADDED
Binary file (3.86 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/ops/__pycache__/missing.cpython-310.pyc ADDED
Binary file (4.08 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/ops/array_ops.py ADDED
@@ -0,0 +1,604 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Functions for arithmetic and comparison operations on NumPy arrays and
3
+ ExtensionArrays.
4
+ """
5
+ from __future__ import annotations
6
+
7
+ import datetime
8
+ from functools import partial
9
+ import operator
10
+ from typing import (
11
+ TYPE_CHECKING,
12
+ Any,
13
+ )
14
+ import warnings
15
+
16
+ import numpy as np
17
+
18
+ from pandas._libs import (
19
+ NaT,
20
+ Timedelta,
21
+ Timestamp,
22
+ lib,
23
+ ops as libops,
24
+ )
25
+ from pandas._libs.tslibs import (
26
+ BaseOffset,
27
+ get_supported_dtype,
28
+ is_supported_dtype,
29
+ is_unitless,
30
+ )
31
+ from pandas.util._exceptions import find_stack_level
32
+
33
+ from pandas.core.dtypes.cast import (
34
+ construct_1d_object_array_from_listlike,
35
+ find_common_type,
36
+ )
37
+ from pandas.core.dtypes.common import (
38
+ ensure_object,
39
+ is_bool_dtype,
40
+ is_list_like,
41
+ is_numeric_v_string_like,
42
+ is_object_dtype,
43
+ is_scalar,
44
+ )
45
+ from pandas.core.dtypes.generic import (
46
+ ABCExtensionArray,
47
+ ABCIndex,
48
+ ABCSeries,
49
+ )
50
+ from pandas.core.dtypes.missing import (
51
+ isna,
52
+ notna,
53
+ )
54
+
55
+ from pandas.core import roperator
56
+ from pandas.core.computation import expressions
57
+ from pandas.core.construction import ensure_wrapped_if_datetimelike
58
+ from pandas.core.ops import missing
59
+ from pandas.core.ops.dispatch import should_extension_dispatch
60
+ from pandas.core.ops.invalid import invalid_comparison
61
+
62
+ if TYPE_CHECKING:
63
+ from pandas._typing import (
64
+ ArrayLike,
65
+ Shape,
66
+ )
67
+
68
+ # -----------------------------------------------------------------------------
69
+ # Masking NA values and fallbacks for operations numpy does not support
70
+
71
+
72
+ def fill_binop(left, right, fill_value):
73
+ """
74
+ If a non-None fill_value is given, replace null entries in left and right
75
+ with this value, but only in positions where _one_ of left/right is null,
76
+ not both.
77
+
78
+ Parameters
79
+ ----------
80
+ left : array-like
81
+ right : array-like
82
+ fill_value : object
83
+
84
+ Returns
85
+ -------
86
+ left : array-like
87
+ right : array-like
88
+
89
+ Notes
90
+ -----
91
+ Makes copies if fill_value is not None and NAs are present.
92
+ """
93
+ if fill_value is not None:
94
+ left_mask = isna(left)
95
+ right_mask = isna(right)
96
+
97
+ # one but not both
98
+ mask = left_mask ^ right_mask
99
+
100
+ if left_mask.any():
101
+ # Avoid making a copy if we can
102
+ left = left.copy()
103
+ left[left_mask & mask] = fill_value
104
+
105
+ if right_mask.any():
106
+ # Avoid making a copy if we can
107
+ right = right.copy()
108
+ right[right_mask & mask] = fill_value
109
+
110
+ return left, right
111
+
112
+
113
+ def comp_method_OBJECT_ARRAY(op, x, y):
114
+ if isinstance(y, list):
115
+ # e.g. test_tuple_categories
116
+ y = construct_1d_object_array_from_listlike(y)
117
+
118
+ if isinstance(y, (np.ndarray, ABCSeries, ABCIndex)):
119
+ if not is_object_dtype(y.dtype):
120
+ y = y.astype(np.object_)
121
+
122
+ if isinstance(y, (ABCSeries, ABCIndex)):
123
+ y = y._values
124
+
125
+ if x.shape != y.shape:
126
+ raise ValueError("Shapes must match", x.shape, y.shape)
127
+ result = libops.vec_compare(x.ravel(), y.ravel(), op)
128
+ else:
129
+ result = libops.scalar_compare(x.ravel(), y, op)
130
+ return result.reshape(x.shape)
131
+
132
+
133
+ def _masked_arith_op(x: np.ndarray, y, op):
134
+ """
135
+ If the given arithmetic operation fails, attempt it again on
136
+ only the non-null elements of the input array(s).
137
+
138
+ Parameters
139
+ ----------
140
+ x : np.ndarray
141
+ y : np.ndarray, Series, Index
142
+ op : binary operator
143
+ """
144
+ # For Series `x` is 1D so ravel() is a no-op; calling it anyway makes
145
+ # the logic valid for both Series and DataFrame ops.
146
+ xrav = x.ravel()
147
+
148
+ if isinstance(y, np.ndarray):
149
+ dtype = find_common_type([x.dtype, y.dtype])
150
+ result = np.empty(x.size, dtype=dtype)
151
+
152
+ if len(x) != len(y):
153
+ raise ValueError(x.shape, y.shape)
154
+ ymask = notna(y)
155
+
156
+ # NB: ravel() is only safe since y is ndarray; for e.g. PeriodIndex
157
+ # we would get int64 dtype, see GH#19956
158
+ yrav = y.ravel()
159
+ mask = notna(xrav) & ymask.ravel()
160
+
161
+ # See GH#5284, GH#5035, GH#19448 for historical reference
162
+ if mask.any():
163
+ result[mask] = op(xrav[mask], yrav[mask])
164
+
165
+ else:
166
+ if not is_scalar(y):
167
+ raise TypeError(
168
+ f"Cannot broadcast np.ndarray with operand of type { type(y) }"
169
+ )
170
+
171
+ # mask is only meaningful for x
172
+ result = np.empty(x.size, dtype=x.dtype)
173
+ mask = notna(xrav)
174
+
175
+ # 1 ** np.nan is 1. So we have to unmask those.
176
+ if op is pow:
177
+ mask = np.where(x == 1, False, mask)
178
+ elif op is roperator.rpow:
179
+ mask = np.where(y == 1, False, mask)
180
+
181
+ if mask.any():
182
+ result[mask] = op(xrav[mask], y)
183
+
184
+ np.putmask(result, ~mask, np.nan)
185
+ result = result.reshape(x.shape) # 2D compat
186
+ return result
187
+
188
+
189
+ def _na_arithmetic_op(left: np.ndarray, right, op, is_cmp: bool = False):
190
+ """
191
+ Return the result of evaluating op on the passed in values.
192
+
193
+ If native types are not compatible, try coercion to object dtype.
194
+
195
+ Parameters
196
+ ----------
197
+ left : np.ndarray
198
+ right : np.ndarray or scalar
199
+ Excludes DataFrame, Series, Index, ExtensionArray.
200
+ is_cmp : bool, default False
201
+ If this a comparison operation.
202
+
203
+ Returns
204
+ -------
205
+ array-like
206
+
207
+ Raises
208
+ ------
209
+ TypeError : invalid operation
210
+ """
211
+ if isinstance(right, str):
212
+ # can never use numexpr
213
+ func = op
214
+ else:
215
+ func = partial(expressions.evaluate, op)
216
+
217
+ try:
218
+ result = func(left, right)
219
+ except TypeError:
220
+ if not is_cmp and (
221
+ left.dtype == object or getattr(right, "dtype", None) == object
222
+ ):
223
+ # For object dtype, fallback to a masked operation (only operating
224
+ # on the non-missing values)
225
+ # Don't do this for comparisons, as that will handle complex numbers
226
+ # incorrectly, see GH#32047
227
+ result = _masked_arith_op(left, right, op)
228
+ else:
229
+ raise
230
+
231
+ if is_cmp and (is_scalar(result) or result is NotImplemented):
232
+ # numpy returned a scalar instead of operating element-wise
233
+ # e.g. numeric array vs str
234
+ # TODO: can remove this after dropping some future numpy version?
235
+ return invalid_comparison(left, right, op)
236
+
237
+ return missing.dispatch_fill_zeros(op, left, right, result)
238
+
239
+
240
+ def arithmetic_op(left: ArrayLike, right: Any, op):
241
+ """
242
+ Evaluate an arithmetic operation `+`, `-`, `*`, `/`, `//`, `%`, `**`, ...
243
+
244
+ Note: the caller is responsible for ensuring that numpy warnings are
245
+ suppressed (with np.errstate(all="ignore")) if needed.
246
+
247
+ Parameters
248
+ ----------
249
+ left : np.ndarray or ExtensionArray
250
+ right : object
251
+ Cannot be a DataFrame or Index. Series is *not* excluded.
252
+ op : {operator.add, operator.sub, ...}
253
+ Or one of the reversed variants from roperator.
254
+
255
+ Returns
256
+ -------
257
+ ndarray or ExtensionArray
258
+ Or a 2-tuple of these in the case of divmod or rdivmod.
259
+ """
260
+ # NB: We assume that extract_array and ensure_wrapped_if_datetimelike
261
+ # have already been called on `left` and `right`,
262
+ # and `maybe_prepare_scalar_for_op` has already been called on `right`
263
+ # We need to special-case datetime64/timedelta64 dtypes (e.g. because numpy
264
+ # casts integer dtypes to timedelta64 when operating with timedelta64 - GH#22390)
265
+
266
+ if (
267
+ should_extension_dispatch(left, right)
268
+ or isinstance(right, (Timedelta, BaseOffset, Timestamp))
269
+ or right is NaT
270
+ ):
271
+ # Timedelta/Timestamp and other custom scalars are included in the check
272
+ # because numexpr will fail on it, see GH#31457
273
+ res_values = op(left, right)
274
+ else:
275
+ # TODO we should handle EAs consistently and move this check before the if/else
276
+ # (https://github.com/pandas-dev/pandas/issues/41165)
277
+ # error: Argument 2 to "_bool_arith_check" has incompatible type
278
+ # "Union[ExtensionArray, ndarray[Any, Any]]"; expected "ndarray[Any, Any]"
279
+ _bool_arith_check(op, left, right) # type: ignore[arg-type]
280
+
281
+ # error: Argument 1 to "_na_arithmetic_op" has incompatible type
282
+ # "Union[ExtensionArray, ndarray[Any, Any]]"; expected "ndarray[Any, Any]"
283
+ res_values = _na_arithmetic_op(left, right, op) # type: ignore[arg-type]
284
+
285
+ return res_values
286
+
287
+
288
+ def comparison_op(left: ArrayLike, right: Any, op) -> ArrayLike:
289
+ """
290
+ Evaluate a comparison operation `=`, `!=`, `>=`, `>`, `<=`, or `<`.
291
+
292
+ Note: the caller is responsible for ensuring that numpy warnings are
293
+ suppressed (with np.errstate(all="ignore")) if needed.
294
+
295
+ Parameters
296
+ ----------
297
+ left : np.ndarray or ExtensionArray
298
+ right : object
299
+ Cannot be a DataFrame, Series, or Index.
300
+ op : {operator.eq, operator.ne, operator.gt, operator.ge, operator.lt, operator.le}
301
+
302
+ Returns
303
+ -------
304
+ ndarray or ExtensionArray
305
+ """
306
+ # NB: We assume extract_array has already been called on left and right
307
+ lvalues = ensure_wrapped_if_datetimelike(left)
308
+ rvalues = ensure_wrapped_if_datetimelike(right)
309
+
310
+ rvalues = lib.item_from_zerodim(rvalues)
311
+ if isinstance(rvalues, list):
312
+ # We don't catch tuple here bc we may be comparing e.g. MultiIndex
313
+ # to a tuple that represents a single entry, see test_compare_tuple_strs
314
+ rvalues = np.asarray(rvalues)
315
+
316
+ if isinstance(rvalues, (np.ndarray, ABCExtensionArray)):
317
+ # TODO: make this treatment consistent across ops and classes.
318
+ # We are not catching all listlikes here (e.g. frozenset, tuple)
319
+ # The ambiguous case is object-dtype. See GH#27803
320
+ if len(lvalues) != len(rvalues):
321
+ raise ValueError(
322
+ "Lengths must match to compare", lvalues.shape, rvalues.shape
323
+ )
324
+
325
+ if should_extension_dispatch(lvalues, rvalues) or (
326
+ (isinstance(rvalues, (Timedelta, BaseOffset, Timestamp)) or right is NaT)
327
+ and lvalues.dtype != object
328
+ ):
329
+ # Call the method on lvalues
330
+ res_values = op(lvalues, rvalues)
331
+
332
+ elif is_scalar(rvalues) and isna(rvalues): # TODO: but not pd.NA?
333
+ # numpy does not like comparisons vs None
334
+ if op is operator.ne:
335
+ res_values = np.ones(lvalues.shape, dtype=bool)
336
+ else:
337
+ res_values = np.zeros(lvalues.shape, dtype=bool)
338
+
339
+ elif is_numeric_v_string_like(lvalues, rvalues):
340
+ # GH#36377 going through the numexpr path would incorrectly raise
341
+ return invalid_comparison(lvalues, rvalues, op)
342
+
343
+ elif lvalues.dtype == object or isinstance(rvalues, str):
344
+ res_values = comp_method_OBJECT_ARRAY(op, lvalues, rvalues)
345
+
346
+ else:
347
+ res_values = _na_arithmetic_op(lvalues, rvalues, op, is_cmp=True)
348
+
349
+ return res_values
350
+
351
+
352
+ def na_logical_op(x: np.ndarray, y, op):
353
+ try:
354
+ # For exposition, write:
355
+ # yarr = isinstance(y, np.ndarray)
356
+ # yint = is_integer(y) or (yarr and y.dtype.kind == "i")
357
+ # ybool = is_bool(y) or (yarr and y.dtype.kind == "b")
358
+ # xint = x.dtype.kind == "i"
359
+ # xbool = x.dtype.kind == "b"
360
+ # Then Cases where this goes through without raising include:
361
+ # (xint or xbool) and (yint or bool)
362
+ result = op(x, y)
363
+ except TypeError:
364
+ if isinstance(y, np.ndarray):
365
+ # bool-bool dtype operations should be OK, should not get here
366
+ assert not (x.dtype.kind == "b" and y.dtype.kind == "b")
367
+ x = ensure_object(x)
368
+ y = ensure_object(y)
369
+ result = libops.vec_binop(x.ravel(), y.ravel(), op)
370
+ else:
371
+ # let null fall thru
372
+ assert lib.is_scalar(y)
373
+ if not isna(y):
374
+ y = bool(y)
375
+ try:
376
+ result = libops.scalar_binop(x, y, op)
377
+ except (
378
+ TypeError,
379
+ ValueError,
380
+ AttributeError,
381
+ OverflowError,
382
+ NotImplementedError,
383
+ ) as err:
384
+ typ = type(y).__name__
385
+ raise TypeError(
386
+ f"Cannot perform '{op.__name__}' with a dtyped [{x.dtype}] array "
387
+ f"and scalar of type [{typ}]"
388
+ ) from err
389
+
390
+ return result.reshape(x.shape)
391
+
392
+
393
+ def logical_op(left: ArrayLike, right: Any, op) -> ArrayLike:
394
+ """
395
+ Evaluate a logical operation `|`, `&`, or `^`.
396
+
397
+ Parameters
398
+ ----------
399
+ left : np.ndarray or ExtensionArray
400
+ right : object
401
+ Cannot be a DataFrame, Series, or Index.
402
+ op : {operator.and_, operator.or_, operator.xor}
403
+ Or one of the reversed variants from roperator.
404
+
405
+ Returns
406
+ -------
407
+ ndarray or ExtensionArray
408
+ """
409
+
410
+ def fill_bool(x, left=None):
411
+ # if `left` is specifically not-boolean, we do not cast to bool
412
+ if x.dtype.kind in "cfO":
413
+ # dtypes that can hold NA
414
+ mask = isna(x)
415
+ if mask.any():
416
+ x = x.astype(object)
417
+ x[mask] = False
418
+
419
+ if left is None or left.dtype.kind == "b":
420
+ x = x.astype(bool)
421
+ return x
422
+
423
+ right = lib.item_from_zerodim(right)
424
+ if is_list_like(right) and not hasattr(right, "dtype"):
425
+ # e.g. list, tuple
426
+ warnings.warn(
427
+ "Logical ops (and, or, xor) between Pandas objects and dtype-less "
428
+ "sequences (e.g. list, tuple) are deprecated and will raise in a "
429
+ "future version. Wrap the object in a Series, Index, or np.array "
430
+ "before operating instead.",
431
+ FutureWarning,
432
+ stacklevel=find_stack_level(),
433
+ )
434
+ right = construct_1d_object_array_from_listlike(right)
435
+
436
+ # NB: We assume extract_array has already been called on left and right
437
+ lvalues = ensure_wrapped_if_datetimelike(left)
438
+ rvalues = right
439
+
440
+ if should_extension_dispatch(lvalues, rvalues):
441
+ # Call the method on lvalues
442
+ res_values = op(lvalues, rvalues)
443
+
444
+ else:
445
+ if isinstance(rvalues, np.ndarray):
446
+ is_other_int_dtype = rvalues.dtype.kind in "iu"
447
+ if not is_other_int_dtype:
448
+ rvalues = fill_bool(rvalues, lvalues)
449
+
450
+ else:
451
+ # i.e. scalar
452
+ is_other_int_dtype = lib.is_integer(rvalues)
453
+
454
+ res_values = na_logical_op(lvalues, rvalues, op)
455
+
456
+ # For int vs int `^`, `|`, `&` are bitwise operators and return
457
+ # integer dtypes. Otherwise these are boolean ops
458
+ if not (left.dtype.kind in "iu" and is_other_int_dtype):
459
+ res_values = fill_bool(res_values)
460
+
461
+ return res_values
462
+
463
+
464
+ def get_array_op(op):
465
+ """
466
+ Return a binary array operation corresponding to the given operator op.
467
+
468
+ Parameters
469
+ ----------
470
+ op : function
471
+ Binary operator from operator or roperator module.
472
+
473
+ Returns
474
+ -------
475
+ functools.partial
476
+ """
477
+ if isinstance(op, partial):
478
+ # We get here via dispatch_to_series in DataFrame case
479
+ # e.g. test_rolling_consistency_var_debiasing_factors
480
+ return op
481
+
482
+ op_name = op.__name__.strip("_").lstrip("r")
483
+ if op_name == "arith_op":
484
+ # Reached via DataFrame._combine_frame i.e. flex methods
485
+ # e.g. test_df_add_flex_filled_mixed_dtypes
486
+ return op
487
+
488
+ if op_name in {"eq", "ne", "lt", "le", "gt", "ge"}:
489
+ return partial(comparison_op, op=op)
490
+ elif op_name in {"and", "or", "xor", "rand", "ror", "rxor"}:
491
+ return partial(logical_op, op=op)
492
+ elif op_name in {
493
+ "add",
494
+ "sub",
495
+ "mul",
496
+ "truediv",
497
+ "floordiv",
498
+ "mod",
499
+ "divmod",
500
+ "pow",
501
+ }:
502
+ return partial(arithmetic_op, op=op)
503
+ else:
504
+ raise NotImplementedError(op_name)
505
+
506
+
507
+ def maybe_prepare_scalar_for_op(obj, shape: Shape):
508
+ """
509
+ Cast non-pandas objects to pandas types to unify behavior of arithmetic
510
+ and comparison operations.
511
+
512
+ Parameters
513
+ ----------
514
+ obj: object
515
+ shape : tuple[int]
516
+
517
+ Returns
518
+ -------
519
+ out : object
520
+
521
+ Notes
522
+ -----
523
+ Be careful to call this *after* determining the `name` attribute to be
524
+ attached to the result of the arithmetic operation.
525
+ """
526
+ if type(obj) is datetime.timedelta:
527
+ # GH#22390 cast up to Timedelta to rely on Timedelta
528
+ # implementation; otherwise operation against numeric-dtype
529
+ # raises TypeError
530
+ return Timedelta(obj)
531
+ elif type(obj) is datetime.datetime:
532
+ # cast up to Timestamp to rely on Timestamp implementation, see Timedelta above
533
+ return Timestamp(obj)
534
+ elif isinstance(obj, np.datetime64):
535
+ # GH#28080 numpy casts integer-dtype to datetime64 when doing
536
+ # array[int] + datetime64, which we do not allow
537
+ if isna(obj):
538
+ from pandas.core.arrays import DatetimeArray
539
+
540
+ # Avoid possible ambiguities with pd.NaT
541
+ # GH 52295
542
+ if is_unitless(obj.dtype):
543
+ obj = obj.astype("datetime64[ns]")
544
+ elif not is_supported_dtype(obj.dtype):
545
+ new_dtype = get_supported_dtype(obj.dtype)
546
+ obj = obj.astype(new_dtype)
547
+ right = np.broadcast_to(obj, shape)
548
+ return DatetimeArray._simple_new(right, dtype=right.dtype)
549
+
550
+ return Timestamp(obj)
551
+
552
+ elif isinstance(obj, np.timedelta64):
553
+ if isna(obj):
554
+ from pandas.core.arrays import TimedeltaArray
555
+
556
+ # wrapping timedelta64("NaT") in Timedelta returns NaT,
557
+ # which would incorrectly be treated as a datetime-NaT, so
558
+ # we broadcast and wrap in a TimedeltaArray
559
+ # GH 52295
560
+ if is_unitless(obj.dtype):
561
+ obj = obj.astype("timedelta64[ns]")
562
+ elif not is_supported_dtype(obj.dtype):
563
+ new_dtype = get_supported_dtype(obj.dtype)
564
+ obj = obj.astype(new_dtype)
565
+ right = np.broadcast_to(obj, shape)
566
+ return TimedeltaArray._simple_new(right, dtype=right.dtype)
567
+
568
+ # In particular non-nanosecond timedelta64 needs to be cast to
569
+ # nanoseconds, or else we get undesired behavior like
570
+ # np.timedelta64(3, 'D') / 2 == np.timedelta64(1, 'D')
571
+ return Timedelta(obj)
572
+
573
+ # We want NumPy numeric scalars to behave like Python scalars
574
+ # post NEP 50
575
+ elif isinstance(obj, np.integer):
576
+ return int(obj)
577
+
578
+ elif isinstance(obj, np.floating):
579
+ return float(obj)
580
+
581
+ return obj
582
+
583
+
584
+ _BOOL_OP_NOT_ALLOWED = {
585
+ operator.truediv,
586
+ roperator.rtruediv,
587
+ operator.floordiv,
588
+ roperator.rfloordiv,
589
+ operator.pow,
590
+ roperator.rpow,
591
+ }
592
+
593
+
594
+ def _bool_arith_check(op, a: np.ndarray, b):
595
+ """
596
+ In contrast to numpy, pandas raises an error for certain operations
597
+ with booleans.
598
+ """
599
+ if op in _BOOL_OP_NOT_ALLOWED:
600
+ if a.dtype.kind == "b" and (is_bool_dtype(b) or lib.is_bool(b)):
601
+ op_name = op.__name__.strip("_").lstrip("r")
602
+ raise NotImplementedError(
603
+ f"operator '{op_name}' not implemented for bool dtypes"
604
+ )
env-llmeval/lib/python3.10/site-packages/pandas/core/ops/common.py ADDED
@@ -0,0 +1,146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Boilerplate functions used in defining binary operations.
3
+ """
4
+ from __future__ import annotations
5
+
6
+ from functools import wraps
7
+ from typing import (
8
+ TYPE_CHECKING,
9
+ Callable,
10
+ )
11
+
12
+ from pandas._libs.lib import item_from_zerodim
13
+ from pandas._libs.missing import is_matching_na
14
+
15
+ from pandas.core.dtypes.generic import (
16
+ ABCIndex,
17
+ ABCSeries,
18
+ )
19
+
20
+ if TYPE_CHECKING:
21
+ from pandas._typing import F
22
+
23
+
24
+ def unpack_zerodim_and_defer(name: str) -> Callable[[F], F]:
25
+ """
26
+ Boilerplate for pandas conventions in arithmetic and comparison methods.
27
+
28
+ Parameters
29
+ ----------
30
+ name : str
31
+
32
+ Returns
33
+ -------
34
+ decorator
35
+ """
36
+
37
+ def wrapper(method: F) -> F:
38
+ return _unpack_zerodim_and_defer(method, name)
39
+
40
+ return wrapper
41
+
42
+
43
+ def _unpack_zerodim_and_defer(method, name: str):
44
+ """
45
+ Boilerplate for pandas conventions in arithmetic and comparison methods.
46
+
47
+ Ensure method returns NotImplemented when operating against "senior"
48
+ classes. Ensure zero-dimensional ndarrays are always unpacked.
49
+
50
+ Parameters
51
+ ----------
52
+ method : binary method
53
+ name : str
54
+
55
+ Returns
56
+ -------
57
+ method
58
+ """
59
+ stripped_name = name.removeprefix("__").removesuffix("__")
60
+ is_cmp = stripped_name in {"eq", "ne", "lt", "le", "gt", "ge"}
61
+
62
+ @wraps(method)
63
+ def new_method(self, other):
64
+ if is_cmp and isinstance(self, ABCIndex) and isinstance(other, ABCSeries):
65
+ # For comparison ops, Index does *not* defer to Series
66
+ pass
67
+ else:
68
+ prio = getattr(other, "__pandas_priority__", None)
69
+ if prio is not None:
70
+ if prio > self.__pandas_priority__:
71
+ # e.g. other is DataFrame while self is Index/Series/EA
72
+ return NotImplemented
73
+
74
+ other = item_from_zerodim(other)
75
+
76
+ return method(self, other)
77
+
78
+ return new_method
79
+
80
+
81
+ def get_op_result_name(left, right):
82
+ """
83
+ Find the appropriate name to pin to an operation result. This result
84
+ should always be either an Index or a Series.
85
+
86
+ Parameters
87
+ ----------
88
+ left : {Series, Index}
89
+ right : object
90
+
91
+ Returns
92
+ -------
93
+ name : object
94
+ Usually a string
95
+ """
96
+ if isinstance(right, (ABCSeries, ABCIndex)):
97
+ name = _maybe_match_name(left, right)
98
+ else:
99
+ name = left.name
100
+ return name
101
+
102
+
103
+ def _maybe_match_name(a, b):
104
+ """
105
+ Try to find a name to attach to the result of an operation between
106
+ a and b. If only one of these has a `name` attribute, return that
107
+ name. Otherwise return a consensus name if they match or None if
108
+ they have different names.
109
+
110
+ Parameters
111
+ ----------
112
+ a : object
113
+ b : object
114
+
115
+ Returns
116
+ -------
117
+ name : str or None
118
+
119
+ See Also
120
+ --------
121
+ pandas.core.common.consensus_name_attr
122
+ """
123
+ a_has = hasattr(a, "name")
124
+ b_has = hasattr(b, "name")
125
+ if a_has and b_has:
126
+ try:
127
+ if a.name == b.name:
128
+ return a.name
129
+ elif is_matching_na(a.name, b.name):
130
+ # e.g. both are np.nan
131
+ return a.name
132
+ else:
133
+ return None
134
+ except TypeError:
135
+ # pd.NA
136
+ if is_matching_na(a.name, b.name):
137
+ return a.name
138
+ return None
139
+ except ValueError:
140
+ # e.g. np.int64(1) vs (np.int64(1), np.int64(2))
141
+ return None
142
+ elif a_has:
143
+ return a.name
144
+ elif b_has:
145
+ return b.name
146
+ return None