applied-ai-018 commited on
Commit
f89dba6
·
verified ·
1 Parent(s): d020ce8

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. llmeval-env/lib/python3.10/site-packages/pyarrow/interchange/__init__.py +20 -0
  2. llmeval-env/lib/python3.10/site-packages/pyarrow/interchange/__pycache__/__init__.cpython-310.pyc +0 -0
  3. llmeval-env/lib/python3.10/site-packages/pyarrow/interchange/__pycache__/buffer.cpython-310.pyc +0 -0
  4. llmeval-env/lib/python3.10/site-packages/pyarrow/interchange/__pycache__/column.cpython-310.pyc +0 -0
  5. llmeval-env/lib/python3.10/site-packages/pyarrow/interchange/__pycache__/dataframe.cpython-310.pyc +0 -0
  6. llmeval-env/lib/python3.10/site-packages/pyarrow/interchange/__pycache__/from_dataframe.cpython-310.pyc +0 -0
  7. llmeval-env/lib/python3.10/site-packages/pyarrow/interchange/column.py +529 -0
  8. llmeval-env/lib/python3.10/site-packages/pyarrow/interchange/dataframe.py +217 -0
  9. llmeval-env/lib/python3.10/site-packages/pyarrow/interchange/from_dataframe.py +614 -0
  10. llmeval-env/lib/python3.10/site-packages/pyarrow/parquet/__init__.py +20 -0
  11. llmeval-env/lib/python3.10/site-packages/pyarrow/parquet/__pycache__/__init__.cpython-310.pyc +0 -0
  12. llmeval-env/lib/python3.10/site-packages/pyarrow/parquet/__pycache__/core.cpython-310.pyc +0 -0
  13. llmeval-env/lib/python3.10/site-packages/pyarrow/parquet/__pycache__/encryption.cpython-310.pyc +0 -0
  14. llmeval-env/lib/python3.10/site-packages/pyarrow/parquet/core.py +2341 -0
  15. llmeval-env/lib/python3.10/site-packages/pyarrow/parquet/encryption.py +23 -0
  16. llmeval-env/lib/python3.10/site-packages/pyarrow/src/arrow/python/CMakeLists.txt +18 -0
  17. llmeval-env/lib/python3.10/site-packages/pyarrow/src/arrow/python/api.h +30 -0
  18. llmeval-env/lib/python3.10/site-packages/pyarrow/src/arrow/python/arrow_to_pandas.cc +2645 -0
  19. llmeval-env/lib/python3.10/site-packages/pyarrow/src/arrow/python/arrow_to_pandas.h +146 -0
  20. llmeval-env/lib/python3.10/site-packages/pyarrow/src/arrow/python/arrow_to_python_internal.h +49 -0
  21. llmeval-env/lib/python3.10/site-packages/pyarrow/src/arrow/python/async.h +60 -0
  22. llmeval-env/lib/python3.10/site-packages/pyarrow/src/arrow/python/benchmark.cc +38 -0
  23. llmeval-env/lib/python3.10/site-packages/pyarrow/src/arrow/python/benchmark.h +36 -0
  24. llmeval-env/lib/python3.10/site-packages/pyarrow/src/arrow/python/common.cc +246 -0
  25. llmeval-env/lib/python3.10/site-packages/pyarrow/src/arrow/python/csv.cc +62 -0
  26. llmeval-env/lib/python3.10/site-packages/pyarrow/src/arrow/python/csv.h +42 -0
  27. llmeval-env/lib/python3.10/site-packages/pyarrow/src/arrow/python/datetime.cc +663 -0
  28. llmeval-env/lib/python3.10/site-packages/pyarrow/src/arrow/python/datetime.h +231 -0
  29. llmeval-env/lib/python3.10/site-packages/pyarrow/src/arrow/python/decimal.cc +246 -0
  30. llmeval-env/lib/python3.10/site-packages/pyarrow/src/arrow/python/decimal.h +128 -0
  31. llmeval-env/lib/python3.10/site-packages/pyarrow/src/arrow/python/deserialize.cc +495 -0
  32. llmeval-env/lib/python3.10/site-packages/pyarrow/src/arrow/python/deserialize.h +106 -0
  33. llmeval-env/lib/python3.10/site-packages/pyarrow/src/arrow/python/extension_type.cc +217 -0
  34. llmeval-env/lib/python3.10/site-packages/pyarrow/src/arrow/python/extension_type.h +85 -0
  35. llmeval-env/lib/python3.10/site-packages/pyarrow/src/arrow/python/filesystem.cc +206 -0
  36. llmeval-env/lib/python3.10/site-packages/pyarrow/src/arrow/python/filesystem.h +130 -0
  37. llmeval-env/lib/python3.10/site-packages/pyarrow/src/arrow/python/flight.cc +388 -0
  38. llmeval-env/lib/python3.10/site-packages/pyarrow/src/arrow/python/flight.h +350 -0
  39. llmeval-env/lib/python3.10/site-packages/pyarrow/src/arrow/python/gdb.cc +530 -0
  40. llmeval-env/lib/python3.10/site-packages/pyarrow/src/arrow/python/gdb.h +29 -0
  41. llmeval-env/lib/python3.10/site-packages/pyarrow/src/arrow/python/helpers.cc +472 -0
  42. llmeval-env/lib/python3.10/site-packages/pyarrow/src/arrow/python/inference.cc +745 -0
  43. llmeval-env/lib/python3.10/site-packages/pyarrow/src/arrow/python/inference.h +64 -0
  44. llmeval-env/lib/python3.10/site-packages/pyarrow/src/arrow/python/init.cc +24 -0
  45. llmeval-env/lib/python3.10/site-packages/pyarrow/src/arrow/python/init.h +26 -0
  46. llmeval-env/lib/python3.10/site-packages/pyarrow/src/arrow/python/io.cc +387 -0
  47. llmeval-env/lib/python3.10/site-packages/pyarrow/src/arrow/python/io.h +121 -0
  48. llmeval-env/lib/python3.10/site-packages/pyarrow/src/arrow/python/ipc.cc +133 -0
  49. llmeval-env/lib/python3.10/site-packages/pyarrow/src/arrow/python/ipc.h +72 -0
  50. llmeval-env/lib/python3.10/site-packages/pyarrow/src/arrow/python/iterators.h +194 -0
llmeval-env/lib/python3.10/site-packages/pyarrow/interchange/__init__.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ # flake8: noqa
19
+
20
+ from .from_dataframe import from_dataframe
llmeval-env/lib/python3.10/site-packages/pyarrow/interchange/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (227 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/pyarrow/interchange/__pycache__/buffer.cpython-310.pyc ADDED
Binary file (3.34 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pyarrow/interchange/__pycache__/column.cpython-310.pyc ADDED
Binary file (16.7 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pyarrow/interchange/__pycache__/dataframe.cpython-310.pyc ADDED
Binary file (7.45 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pyarrow/interchange/__pycache__/from_dataframe.cpython-310.pyc ADDED
Binary file (13.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pyarrow/interchange/column.py ADDED
@@ -0,0 +1,529 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ from __future__ import annotations
19
+
20
+ import enum
21
+ from typing import (
22
+ Any,
23
+ Dict,
24
+ Iterable,
25
+ Optional,
26
+ Tuple,
27
+ )
28
+
29
+ import sys
30
+ if sys.version_info >= (3, 8):
31
+ from typing import TypedDict
32
+ else:
33
+ from typing_extensions import TypedDict
34
+
35
+ import pyarrow as pa
36
+ import pyarrow.compute as pc
37
+ from pyarrow.interchange.buffer import _PyArrowBuffer
38
+
39
+
40
+ class DtypeKind(enum.IntEnum):
41
+ """
42
+ Integer enum for data types.
43
+
44
+ Attributes
45
+ ----------
46
+ INT : int
47
+ Matches to signed integer data type.
48
+ UINT : int
49
+ Matches to unsigned integer data type.
50
+ FLOAT : int
51
+ Matches to floating point data type.
52
+ BOOL : int
53
+ Matches to boolean data type.
54
+ STRING : int
55
+ Matches to string data type (UTF-8 encoded).
56
+ DATETIME : int
57
+ Matches to datetime data type.
58
+ CATEGORICAL : int
59
+ Matches to categorical data type.
60
+ """
61
+
62
+ INT = 0
63
+ UINT = 1
64
+ FLOAT = 2
65
+ BOOL = 20
66
+ STRING = 21 # UTF-8
67
+ DATETIME = 22
68
+ CATEGORICAL = 23
69
+
70
+
71
+ Dtype = Tuple[DtypeKind, int, str, str] # see Column.dtype
72
+
73
+
74
+ _PYARROW_KINDS = {
75
+ pa.int8(): (DtypeKind.INT, "c"),
76
+ pa.int16(): (DtypeKind.INT, "s"),
77
+ pa.int32(): (DtypeKind.INT, "i"),
78
+ pa.int64(): (DtypeKind.INT, "l"),
79
+ pa.uint8(): (DtypeKind.UINT, "C"),
80
+ pa.uint16(): (DtypeKind.UINT, "S"),
81
+ pa.uint32(): (DtypeKind.UINT, "I"),
82
+ pa.uint64(): (DtypeKind.UINT, "L"),
83
+ pa.float16(): (DtypeKind.FLOAT, "e"),
84
+ pa.float32(): (DtypeKind.FLOAT, "f"),
85
+ pa.float64(): (DtypeKind.FLOAT, "g"),
86
+ pa.bool_(): (DtypeKind.BOOL, "b"),
87
+ pa.string(): (DtypeKind.STRING, "u"),
88
+ pa.large_string(): (DtypeKind.STRING, "U"),
89
+ }
90
+
91
+
92
+ class ColumnNullType(enum.IntEnum):
93
+ """
94
+ Integer enum for null type representation.
95
+
96
+ Attributes
97
+ ----------
98
+ NON_NULLABLE : int
99
+ Non-nullable column.
100
+ USE_NAN : int
101
+ Use explicit float NaN value.
102
+ USE_SENTINEL : int
103
+ Sentinel value besides NaN.
104
+ USE_BITMASK : int
105
+ The bit is set/unset representing a null on a certain position.
106
+ USE_BYTEMASK : int
107
+ The byte is set/unset representing a null on a certain position.
108
+ """
109
+
110
+ NON_NULLABLE = 0
111
+ USE_NAN = 1
112
+ USE_SENTINEL = 2
113
+ USE_BITMASK = 3
114
+ USE_BYTEMASK = 4
115
+
116
+
117
+ class ColumnBuffers(TypedDict):
118
+ # first element is a buffer containing the column data;
119
+ # second element is the data buffer's associated dtype
120
+ data: Tuple[_PyArrowBuffer, Dtype]
121
+
122
+ # first element is a buffer containing mask values indicating missing data;
123
+ # second element is the mask value buffer's associated dtype.
124
+ # None if the null representation is not a bit or byte mask
125
+ validity: Optional[Tuple[_PyArrowBuffer, Dtype]]
126
+
127
+ # first element is a buffer containing the offset values for
128
+ # variable-size binary data (e.g., variable-length strings);
129
+ # second element is the offsets buffer's associated dtype.
130
+ # None if the data buffer does not have an associated offsets buffer
131
+ offsets: Optional[Tuple[_PyArrowBuffer, Dtype]]
132
+
133
+
134
+ class CategoricalDescription(TypedDict):
135
+ # whether the ordering of dictionary indices is semantically meaningful
136
+ is_ordered: bool
137
+ # whether a dictionary-style mapping of categorical values to other objects
138
+ # exists
139
+ is_dictionary: bool
140
+ # Python-level only (e.g. ``{int: str}``).
141
+ # None if not a dictionary-style categorical.
142
+ categories: Optional[_PyArrowColumn]
143
+
144
+
145
+ class Endianness:
146
+ """Enum indicating the byte-order of a data-type."""
147
+
148
+ LITTLE = "<"
149
+ BIG = ">"
150
+ NATIVE = "="
151
+ NA = "|"
152
+
153
+
154
+ class NoBufferPresent(Exception):
155
+ """Exception to signal that there is no requested buffer."""
156
+
157
+
158
+ class _PyArrowColumn:
159
+ """
160
+ A column object, with only the methods and properties required by the
161
+ interchange protocol defined.
162
+
163
+ A column can contain one or more chunks. Each chunk can contain up to three
164
+ buffers - a data buffer, a mask buffer (depending on null representation),
165
+ and an offsets buffer (if variable-size binary; e.g., variable-length
166
+ strings).
167
+
168
+ TBD: Arrow has a separate "null" dtype, and has no separate mask concept.
169
+ Instead, it seems to use "children" for both columns with a bit mask,
170
+ and for nested dtypes. Unclear whether this is elegant or confusing.
171
+ This design requires checking the null representation explicitly.
172
+
173
+ The Arrow design requires checking:
174
+ 1. the ARROW_FLAG_NULLABLE (for sentinel values)
175
+ 2. if a column has two children, combined with one of those children
176
+ having a null dtype.
177
+
178
+ Making the mask concept explicit seems useful. One null dtype would
179
+ not be enough to cover both bit and byte masks, so that would mean
180
+ even more checking if we did it the Arrow way.
181
+
182
+ TBD: there's also the "chunk" concept here, which is implicit in Arrow as
183
+ multiple buffers per array (= column here). Semantically it may make
184
+ sense to have both: chunks were meant for example for lazy evaluation
185
+ of data which doesn't fit in memory, while multiple buffers per column
186
+ could also come from doing a selection operation on a single
187
+ contiguous buffer.
188
+
189
+ Given these concepts, one would expect chunks to be all of the same
190
+ size (say a 10,000 row dataframe could have 10 chunks of 1,000 rows),
191
+ while multiple buffers could have data-dependent lengths. Not an issue
192
+ in pandas if one column is backed by a single NumPy array, but in
193
+ Arrow it seems possible.
194
+ Are multiple chunks *and* multiple buffers per column necessary for
195
+ the purposes of this interchange protocol, or must producers either
196
+ reuse the chunk concept for this or copy the data?
197
+
198
+ Note: this Column object can only be produced by ``__dataframe__``, so
199
+ doesn't need its own version or ``__column__`` protocol.
200
+ """
201
+
202
+ def __init__(
203
+ self, column: pa.Array | pa.ChunkedArray, allow_copy: bool = True
204
+ ) -> None:
205
+ """
206
+ Handles PyArrow Arrays and ChunkedArrays.
207
+ """
208
+ # Store the column as a private attribute
209
+ if isinstance(column, pa.ChunkedArray):
210
+ if column.num_chunks == 1:
211
+ column = column.chunk(0)
212
+ else:
213
+ if not allow_copy:
214
+ raise RuntimeError(
215
+ "Chunks will be combined and a copy is required which "
216
+ "is forbidden by allow_copy=False"
217
+ )
218
+ column = column.combine_chunks()
219
+
220
+ self._allow_copy = allow_copy
221
+
222
+ if pa.types.is_boolean(column.type):
223
+ if not allow_copy:
224
+ raise RuntimeError(
225
+ "Boolean column will be casted to uint8 and a copy "
226
+ "is required which is forbidden by allow_copy=False"
227
+ )
228
+ self._dtype = self._dtype_from_arrowdtype(column.type, 8)
229
+ self._col = pc.cast(column, pa.uint8())
230
+ else:
231
+ self._col = column
232
+ dtype = self._col.type
233
+ try:
234
+ bit_width = dtype.bit_width
235
+ except ValueError:
236
+ # in case of a variable-length strings, considered as array
237
+ # of bytes (8 bits)
238
+ bit_width = 8
239
+ self._dtype = self._dtype_from_arrowdtype(dtype, bit_width)
240
+
241
+ def size(self) -> int:
242
+ """
243
+ Size of the column, in elements.
244
+
245
+ Corresponds to DataFrame.num_rows() if column is a single chunk;
246
+ equal to size of this current chunk otherwise.
247
+
248
+ Is a method rather than a property because it may cause a (potentially
249
+ expensive) computation for some dataframe implementations.
250
+ """
251
+ return len(self._col)
252
+
253
+ @property
254
+ def offset(self) -> int:
255
+ """
256
+ Offset of first element.
257
+
258
+ May be > 0 if using chunks; for example for a column with N chunks of
259
+ equal size M (only the last chunk may be shorter),
260
+ ``offset = n * M``, ``n = 0 .. N-1``.
261
+ """
262
+ return self._col.offset
263
+
264
+ @property
265
+ def dtype(self) -> Tuple[DtypeKind, int, str, str]:
266
+ """
267
+ Dtype description as a tuple ``(kind, bit-width, format string,
268
+ endianness)``.
269
+
270
+ Bit-width : the number of bits as an integer
271
+ Format string : data type description format string in Apache Arrow C
272
+ Data Interface format.
273
+ Endianness : current only native endianness (``=``) is supported
274
+
275
+ Notes:
276
+ - Kind specifiers are aligned with DLPack where possible (hence the
277
+ jump to 20, leave enough room for future extension)
278
+ - Masks must be specified as boolean with either bit width 1 (for
279
+ bit masks) or 8 (for byte masks).
280
+ - Dtype width in bits was preferred over bytes
281
+ - Endianness isn't too useful, but included now in case in the
282
+ future we need to support non-native endianness
283
+ - Went with Apache Arrow format strings over NumPy format strings
284
+ because they're more complete from a dataframe perspective
285
+ - Format strings are mostly useful for datetime specification, and
286
+ for categoricals.
287
+ - For categoricals, the format string describes the type of the
288
+ categorical in the data buffer. In case of a separate encoding of
289
+ the categorical (e.g. an integer to string mapping), this can
290
+ be derived from ``self.describe_categorical``.
291
+ - Data types not included: complex, Arrow-style null, binary,
292
+ decimal, and nested (list, struct, map, union) dtypes.
293
+ """
294
+ return self._dtype
295
+
296
+ def _dtype_from_arrowdtype(
297
+ self, dtype: pa.DataType, bit_width: int
298
+ ) -> Tuple[DtypeKind, int, str, str]:
299
+ """
300
+ See `self.dtype` for details.
301
+ """
302
+ # Note: 'c' (complex) not handled yet (not in array spec v1).
303
+ # 'b', 'B' (bytes), 'S', 'a', (old-style string) 'V' (void)
304
+ # not handled datetime and timedelta both map to datetime
305
+ # (is timedelta handled?)
306
+
307
+ if pa.types.is_timestamp(dtype):
308
+ kind = DtypeKind.DATETIME
309
+ ts = dtype.unit[0]
310
+ tz = dtype.tz if dtype.tz else ""
311
+ f_string = "ts{ts}:{tz}".format(ts=ts, tz=tz)
312
+ return kind, bit_width, f_string, Endianness.NATIVE
313
+ elif pa.types.is_dictionary(dtype):
314
+ kind = DtypeKind.CATEGORICAL
315
+ arr = self._col
316
+ indices_dtype = arr.indices.type
317
+ _, f_string = _PYARROW_KINDS.get(indices_dtype)
318
+ return kind, bit_width, f_string, Endianness.NATIVE
319
+ else:
320
+ kind, f_string = _PYARROW_KINDS.get(dtype, (None, None))
321
+ if kind is None:
322
+ raise ValueError(
323
+ f"Data type {dtype} not supported by interchange protocol")
324
+
325
+ return kind, bit_width, f_string, Endianness.NATIVE
326
+
327
+ @property
328
+ def describe_categorical(self) -> CategoricalDescription:
329
+ """
330
+ If the dtype is categorical, there are two options:
331
+ - There are only values in the data buffer.
332
+ - There is a separate non-categorical Column encoding categorical
333
+ values.
334
+
335
+ Raises TypeError if the dtype is not categorical
336
+
337
+ Returns the dictionary with description on how to interpret the
338
+ data buffer:
339
+ - "is_ordered" : bool, whether the ordering of dictionary indices
340
+ is semantically meaningful.
341
+ - "is_dictionary" : bool, whether a mapping of
342
+ categorical values to other objects exists
343
+ - "categories" : Column representing the (implicit) mapping of
344
+ indices to category values (e.g. an array of
345
+ cat1, cat2, ...). None if not a dictionary-style
346
+ categorical.
347
+
348
+ TBD: are there any other in-memory representations that are needed?
349
+ """
350
+ arr = self._col
351
+ if not pa.types.is_dictionary(arr.type):
352
+ raise TypeError(
353
+ "describe_categorical only works on a column with "
354
+ "categorical dtype!"
355
+ )
356
+
357
+ return {
358
+ "is_ordered": self._col.type.ordered,
359
+ "is_dictionary": True,
360
+ "categories": _PyArrowColumn(arr.dictionary),
361
+ }
362
+
363
+ @property
364
+ def describe_null(self) -> Tuple[ColumnNullType, Any]:
365
+ """
366
+ Return the missing value (or "null") representation the column dtype
367
+ uses, as a tuple ``(kind, value)``.
368
+
369
+ Value : if kind is "sentinel value", the actual value. If kind is a bit
370
+ mask or a byte mask, the value (0 or 1) indicating a missing value.
371
+ None otherwise.
372
+ """
373
+ # In case of no missing values, we need to set ColumnNullType to
374
+ # non nullable as in the current __dataframe__ protocol bit/byte masks
375
+ # cannot be None
376
+ if self.null_count == 0:
377
+ return ColumnNullType.NON_NULLABLE, None
378
+ else:
379
+ return ColumnNullType.USE_BITMASK, 0
380
+
381
+ @property
382
+ def null_count(self) -> int:
383
+ """
384
+ Number of null elements, if known.
385
+
386
+ Note: Arrow uses -1 to indicate "unknown", but None seems cleaner.
387
+ """
388
+ arrow_null_count = self._col.null_count
389
+ n = arrow_null_count if arrow_null_count != -1 else None
390
+ return n
391
+
392
+ @property
393
+ def metadata(self) -> Dict[str, Any]:
394
+ """
395
+ The metadata for the column. See `DataFrame.metadata` for more details.
396
+ """
397
+ pass
398
+
399
+ def num_chunks(self) -> int:
400
+ """
401
+ Return the number of chunks the column consists of.
402
+ """
403
+ return 1
404
+
405
+ def get_chunks(
406
+ self, n_chunks: Optional[int] = None
407
+ ) -> Iterable[_PyArrowColumn]:
408
+ """
409
+ Return an iterator yielding the chunks.
410
+
411
+ See `DataFrame.get_chunks` for details on ``n_chunks``.
412
+ """
413
+ if n_chunks and n_chunks > 1:
414
+ chunk_size = self.size() // n_chunks
415
+ if self.size() % n_chunks != 0:
416
+ chunk_size += 1
417
+
418
+ array = self._col
419
+ i = 0
420
+ for start in range(0, chunk_size * n_chunks, chunk_size):
421
+ yield _PyArrowColumn(
422
+ array.slice(start, chunk_size), self._allow_copy
423
+ )
424
+ i += 1
425
+ else:
426
+ yield self
427
+
428
+ def get_buffers(self) -> ColumnBuffers:
429
+ """
430
+ Return a dictionary containing the underlying buffers.
431
+
432
+ The returned dictionary has the following contents:
433
+
434
+ - "data": a two-element tuple whose first element is a buffer
435
+ containing the data and whose second element is the data
436
+ buffer's associated dtype.
437
+ - "validity": a two-element tuple whose first element is a buffer
438
+ containing mask values indicating missing data and
439
+ whose second element is the mask value buffer's
440
+ associated dtype. None if the null representation is
441
+ not a bit or byte mask.
442
+ - "offsets": a two-element tuple whose first element is a buffer
443
+ containing the offset values for variable-size binary
444
+ data (e.g., variable-length strings) and whose second
445
+ element is the offsets buffer's associated dtype. None
446
+ if the data buffer does not have an associated offsets
447
+ buffer.
448
+ """
449
+ buffers: ColumnBuffers = {
450
+ "data": self._get_data_buffer(),
451
+ "validity": None,
452
+ "offsets": None,
453
+ }
454
+
455
+ try:
456
+ buffers["validity"] = self._get_validity_buffer()
457
+ except NoBufferPresent:
458
+ pass
459
+
460
+ try:
461
+ buffers["offsets"] = self._get_offsets_buffer()
462
+ except NoBufferPresent:
463
+ pass
464
+
465
+ return buffers
466
+
467
+ def _get_data_buffer(
468
+ self,
469
+ ) -> Tuple[_PyArrowBuffer, Any]: # Any is for self.dtype tuple
470
+ """
471
+ Return the buffer containing the data and the buffer's
472
+ associated dtype.
473
+ """
474
+ array = self._col
475
+ dtype = self.dtype
476
+
477
+ # In case of dictionary arrays, use indices
478
+ # to define a buffer, codes are transferred through
479
+ # describe_categorical()
480
+ if pa.types.is_dictionary(array.type):
481
+ array = array.indices
482
+ dtype = _PyArrowColumn(array).dtype
483
+
484
+ n = len(array.buffers())
485
+ if n == 2:
486
+ return _PyArrowBuffer(array.buffers()[1]), dtype
487
+ elif n == 3:
488
+ return _PyArrowBuffer(array.buffers()[2]), dtype
489
+
490
+ def _get_validity_buffer(self) -> Tuple[_PyArrowBuffer, Any]:
491
+ """
492
+ Return the buffer containing the mask values indicating missing data
493
+ and the buffer's associated dtype.
494
+ Raises NoBufferPresent if null representation is not a bit or byte
495
+ mask.
496
+ """
497
+ # Define the dtype of the returned buffer
498
+ dtype = (DtypeKind.BOOL, 1, "b", Endianness.NATIVE)
499
+ array = self._col
500
+ buff = array.buffers()[0]
501
+ if buff:
502
+ return _PyArrowBuffer(buff), dtype
503
+ else:
504
+ raise NoBufferPresent(
505
+ "There are no missing values so "
506
+ "does not have a separate mask")
507
+
508
+ def _get_offsets_buffer(self) -> Tuple[_PyArrowBuffer, Any]:
509
+ """
510
+ Return the buffer containing the offset values for variable-size binary
511
+ data (e.g., variable-length strings) and the buffer's associated dtype.
512
+ Raises NoBufferPresent if the data buffer does not have an associated
513
+ offsets buffer.
514
+ """
515
+ array = self._col
516
+ n = len(array.buffers())
517
+ if n == 2:
518
+ raise NoBufferPresent(
519
+ "This column has a fixed-length dtype so "
520
+ "it does not have an offsets buffer"
521
+ )
522
+ elif n == 3:
523
+ # Define the dtype of the returned buffer
524
+ dtype = self._col.type
525
+ if pa.types.is_large_string(dtype):
526
+ dtype = (DtypeKind.INT, 64, "l", Endianness.NATIVE)
527
+ else:
528
+ dtype = (DtypeKind.INT, 32, "i", Endianness.NATIVE)
529
+ return _PyArrowBuffer(array.buffers()[1]), dtype
llmeval-env/lib/python3.10/site-packages/pyarrow/interchange/dataframe.py ADDED
@@ -0,0 +1,217 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ from __future__ import annotations
19
+ from typing import (
20
+ Any,
21
+ Iterable,
22
+ Optional,
23
+ Sequence,
24
+ )
25
+
26
+ import pyarrow as pa
27
+
28
+ from pyarrow.interchange.column import _PyArrowColumn
29
+
30
+
31
+ class _PyArrowDataFrame:
32
+ """
33
+ A data frame class, with only the methods required by the interchange
34
+ protocol defined.
35
+
36
+ A "data frame" represents an ordered collection of named columns.
37
+ A column's "name" must be a unique string.
38
+ Columns may be accessed by name or by position.
39
+
40
+ This could be a public data frame class, or an object with the methods and
41
+ attributes defined on this DataFrame class could be returned from the
42
+ ``__dataframe__`` method of a public data frame class in a library adhering
43
+ to the dataframe interchange protocol specification.
44
+ """
45
+
46
+ def __init__(
47
+ self, df: pa.Table | pa.RecordBatch,
48
+ nan_as_null: bool = False,
49
+ allow_copy: bool = True
50
+ ) -> None:
51
+ """
52
+ Constructor - an instance of this (private) class is returned from
53
+ `pa.Table.__dataframe__` or `pa.RecordBatch.__dataframe__`.
54
+ """
55
+ self._df = df
56
+ # ``nan_as_null`` is a keyword intended for the consumer to tell the
57
+ # producer to overwrite null values in the data with ``NaN`` (or
58
+ # ``NaT``).
59
+ if nan_as_null is True:
60
+ raise RuntimeError(
61
+ "nan_as_null=True currently has no effect, "
62
+ "use the default nan_as_null=False"
63
+ )
64
+ self._nan_as_null = nan_as_null
65
+ self._allow_copy = allow_copy
66
+
67
+ def __dataframe__(
68
+ self, nan_as_null: bool = False, allow_copy: bool = True
69
+ ) -> _PyArrowDataFrame:
70
+ """
71
+ Construct a new exchange object, potentially changing the parameters.
72
+ ``nan_as_null`` is a keyword intended for the consumer to tell the
73
+ producer to overwrite null values in the data with ``NaN``.
74
+ It is intended for cases where the consumer does not support the bit
75
+ mask or byte mask that is the producer's native representation.
76
+ ``allow_copy`` is a keyword that defines whether or not the library is
77
+ allowed to make a copy of the data. For example, copying data would be
78
+ necessary if a library supports strided buffers, given that this
79
+ protocol specifies contiguous buffers.
80
+ """
81
+ return _PyArrowDataFrame(self._df, nan_as_null, allow_copy)
82
+
83
+ @property
84
+ def metadata(self) -> dict[str, Any]:
85
+ """
86
+ The metadata for the data frame, as a dictionary with string keys. The
87
+ contents of `metadata` may be anything, they are meant for a library
88
+ to store information that it needs to, e.g., roundtrip losslessly or
89
+ for two implementations to share data that is not (yet) part of the
90
+ interchange protocol specification. For avoiding collisions with other
91
+ entries, please add name the keys with the name of the library
92
+ followed by a period and the desired name, e.g, ``pandas.indexcol``.
93
+ """
94
+ # The metadata for the data frame, as a dictionary with string keys.
95
+ # Add schema metadata here (pandas metadata or custom metadata)
96
+ if self._df.schema.metadata:
97
+ schema_metadata = {"pyarrow." + k.decode('utf8'): v.decode('utf8')
98
+ for k, v in self._df.schema.metadata.items()}
99
+ return schema_metadata
100
+ else:
101
+ return {}
102
+
103
+ def num_columns(self) -> int:
104
+ """
105
+ Return the number of columns in the DataFrame.
106
+ """
107
+ return self._df.num_columns
108
+
109
+ def num_rows(self) -> int:
110
+ """
111
+ Return the number of rows in the DataFrame, if available.
112
+ """
113
+ return self._df.num_rows
114
+
115
+ def num_chunks(self) -> int:
116
+ """
117
+ Return the number of chunks the DataFrame consists of.
118
+ """
119
+ if isinstance(self._df, pa.RecordBatch):
120
+ return 1
121
+ else:
122
+ # pyarrow.Table can have columns with different number
123
+ # of chunks so we take the number of chunks that
124
+ # .to_batches() returns as it takes the min chunk size
125
+ # of all the columns (to_batches is a zero copy method)
126
+ batches = self._df.to_batches()
127
+ return len(batches)
128
+
129
+ def column_names(self) -> Iterable[str]:
130
+ """
131
+ Return an iterator yielding the column names.
132
+ """
133
+ return self._df.schema.names
134
+
135
+ def get_column(self, i: int) -> _PyArrowColumn:
136
+ """
137
+ Return the column at the indicated position.
138
+ """
139
+ return _PyArrowColumn(self._df.column(i),
140
+ allow_copy=self._allow_copy)
141
+
142
+ def get_column_by_name(self, name: str) -> _PyArrowColumn:
143
+ """
144
+ Return the column whose name is the indicated name.
145
+ """
146
+ return _PyArrowColumn(self._df.column(name),
147
+ allow_copy=self._allow_copy)
148
+
149
+ def get_columns(self) -> Iterable[_PyArrowColumn]:
150
+ """
151
+ Return an iterator yielding the columns.
152
+ """
153
+ return [
154
+ _PyArrowColumn(col, allow_copy=self._allow_copy)
155
+ for col in self._df.columns
156
+ ]
157
+
158
+ def select_columns(self, indices: Sequence[int]) -> _PyArrowDataFrame:
159
+ """
160
+ Create a new DataFrame by selecting a subset of columns by index.
161
+ """
162
+ return _PyArrowDataFrame(
163
+ self._df.select(list(indices)), self._nan_as_null, self._allow_copy
164
+ )
165
+
166
+ def select_columns_by_name(
167
+ self, names: Sequence[str]
168
+ ) -> _PyArrowDataFrame:
169
+ """
170
+ Create a new DataFrame by selecting a subset of columns by name.
171
+ """
172
+ return _PyArrowDataFrame(
173
+ self._df.select(list(names)), self._nan_as_null, self._allow_copy
174
+ )
175
+
176
+ def get_chunks(
177
+ self, n_chunks: Optional[int] = None
178
+ ) -> Iterable[_PyArrowDataFrame]:
179
+ """
180
+ Return an iterator yielding the chunks.
181
+
182
+ By default (None), yields the chunks that the data is stored as by the
183
+ producer. If given, ``n_chunks`` must be a multiple of
184
+ ``self.num_chunks()``, meaning the producer must subdivide each chunk
185
+ before yielding it.
186
+
187
+ Note that the producer must ensure that all columns are chunked the
188
+ same way.
189
+ """
190
+ # Subdivide chunks
191
+ if n_chunks and n_chunks > 1:
192
+ chunk_size = self.num_rows() // n_chunks
193
+ if self.num_rows() % n_chunks != 0:
194
+ chunk_size += 1
195
+ if isinstance(self._df, pa.Table):
196
+ batches = self._df.to_batches(max_chunksize=chunk_size)
197
+ else:
198
+ batches = []
199
+ for start in range(0, chunk_size * n_chunks, chunk_size):
200
+ batches.append(self._df.slice(start, chunk_size))
201
+ # In case when the size of the chunk is such that the resulting
202
+ # list is one less chunk then n_chunks -> append an empty chunk
203
+ if len(batches) == n_chunks - 1:
204
+ batches.append(pa.record_batch([[]], schema=self._df.schema))
205
+ # yields the chunks that the data is stored as
206
+ else:
207
+ if isinstance(self._df, pa.Table):
208
+ batches = self._df.to_batches()
209
+ else:
210
+ batches = [self._df]
211
+
212
+ # Create an iterator of RecordBatches
213
+ iterator = [_PyArrowDataFrame(batch,
214
+ self._nan_as_null,
215
+ self._allow_copy)
216
+ for batch in batches]
217
+ return iterator
llmeval-env/lib/python3.10/site-packages/pyarrow/interchange/from_dataframe.py ADDED
@@ -0,0 +1,614 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ from __future__ import annotations
19
+
20
+ from typing import (
21
+ Any,
22
+ Tuple,
23
+ )
24
+
25
+ from pyarrow.interchange.column import (
26
+ DtypeKind,
27
+ ColumnBuffers,
28
+ ColumnNullType,
29
+ )
30
+
31
+ import pyarrow as pa
32
+ import re
33
+
34
+ import pyarrow.compute as pc
35
+ from pyarrow.interchange.column import Dtype
36
+
37
+
38
+ # A typing protocol could be added later to let Mypy validate code using
39
+ # `from_dataframe` better.
40
+ DataFrameObject = Any
41
+ ColumnObject = Any
42
+ BufferObject = Any
43
+
44
+
45
+ _PYARROW_DTYPES: dict[DtypeKind, dict[int, Any]] = {
46
+ DtypeKind.INT: {8: pa.int8(),
47
+ 16: pa.int16(),
48
+ 32: pa.int32(),
49
+ 64: pa.int64()},
50
+ DtypeKind.UINT: {8: pa.uint8(),
51
+ 16: pa.uint16(),
52
+ 32: pa.uint32(),
53
+ 64: pa.uint64()},
54
+ DtypeKind.FLOAT: {16: pa.float16(),
55
+ 32: pa.float32(),
56
+ 64: pa.float64()},
57
+ DtypeKind.BOOL: {1: pa.bool_(),
58
+ 8: pa.uint8()},
59
+ DtypeKind.STRING: {8: pa.string()},
60
+ }
61
+
62
+
63
+ def from_dataframe(df: DataFrameObject, allow_copy=True) -> pa.Table:
64
+ """
65
+ Build a ``pa.Table`` from any DataFrame supporting the interchange protocol.
66
+
67
+ Parameters
68
+ ----------
69
+ df : DataFrameObject
70
+ Object supporting the interchange protocol, i.e. `__dataframe__`
71
+ method.
72
+ allow_copy : bool, default: True
73
+ Whether to allow copying the memory to perform the conversion
74
+ (if false then zero-copy approach is requested).
75
+
76
+ Returns
77
+ -------
78
+ pa.Table
79
+
80
+ Examples
81
+ --------
82
+ >>> import pyarrow
83
+ >>> from pyarrow.interchange import from_dataframe
84
+
85
+ Convert a pandas dataframe to a pyarrow table:
86
+
87
+ >>> import pandas as pd
88
+ >>> df = pd.DataFrame({
89
+ ... "n_attendees": [100, 10, 1],
90
+ ... "country": ["Italy", "Spain", "Slovenia"],
91
+ ... })
92
+ >>> df
93
+ n_attendees country
94
+ 0 100 Italy
95
+ 1 10 Spain
96
+ 2 1 Slovenia
97
+ >>> from_dataframe(df)
98
+ pyarrow.Table
99
+ n_attendees: int64
100
+ country: large_string
101
+ ----
102
+ n_attendees: [[100,10,1]]
103
+ country: [["Italy","Spain","Slovenia"]]
104
+ """
105
+ if isinstance(df, pa.Table):
106
+ return df
107
+ elif isinstance(df, pa.RecordBatch):
108
+ return pa.Table.from_batches([df])
109
+
110
+ if not hasattr(df, "__dataframe__"):
111
+ raise ValueError("`df` does not support __dataframe__")
112
+
113
+ return _from_dataframe(df.__dataframe__(allow_copy=allow_copy),
114
+ allow_copy=allow_copy)
115
+
116
+
117
+ def _from_dataframe(df: DataFrameObject, allow_copy=True):
118
+ """
119
+ Build a ``pa.Table`` from the DataFrame interchange object.
120
+
121
+ Parameters
122
+ ----------
123
+ df : DataFrameObject
124
+ Object supporting the interchange protocol, i.e. `__dataframe__`
125
+ method.
126
+ allow_copy : bool, default: True
127
+ Whether to allow copying the memory to perform the conversion
128
+ (if false then zero-copy approach is requested).
129
+
130
+ Returns
131
+ -------
132
+ pa.Table
133
+ """
134
+ batches = []
135
+ for chunk in df.get_chunks():
136
+ batch = protocol_df_chunk_to_pyarrow(chunk, allow_copy)
137
+ batches.append(batch)
138
+
139
+ if not batches:
140
+ batch = protocol_df_chunk_to_pyarrow(df)
141
+ batches.append(batch)
142
+
143
+ return pa.Table.from_batches(batches)
144
+
145
+
146
+ def protocol_df_chunk_to_pyarrow(
147
+ df: DataFrameObject,
148
+ allow_copy: bool = True
149
+ ) -> pa.RecordBatch:
150
+ """
151
+ Convert interchange protocol chunk to ``pa.RecordBatch``.
152
+
153
+ Parameters
154
+ ----------
155
+ df : DataFrameObject
156
+ Object supporting the interchange protocol, i.e. `__dataframe__`
157
+ method.
158
+ allow_copy : bool, default: True
159
+ Whether to allow copying the memory to perform the conversion
160
+ (if false then zero-copy approach is requested).
161
+
162
+ Returns
163
+ -------
164
+ pa.RecordBatch
165
+ """
166
+ # We need a dict of columns here, with each column being a pa.Array
167
+ columns: dict[str, pa.Array] = {}
168
+ for name in df.column_names():
169
+ if not isinstance(name, str):
170
+ raise ValueError(f"Column {name} is not a string")
171
+ if name in columns:
172
+ raise ValueError(f"Column {name} is not unique")
173
+ col = df.get_column_by_name(name)
174
+ dtype = col.dtype[0]
175
+ if dtype in (
176
+ DtypeKind.INT,
177
+ DtypeKind.UINT,
178
+ DtypeKind.FLOAT,
179
+ DtypeKind.STRING,
180
+ DtypeKind.DATETIME,
181
+ ):
182
+ columns[name] = column_to_array(col, allow_copy)
183
+ elif dtype == DtypeKind.BOOL:
184
+ columns[name] = bool_column_to_array(col, allow_copy)
185
+ elif dtype == DtypeKind.CATEGORICAL:
186
+ columns[name] = categorical_column_to_dictionary(col, allow_copy)
187
+ else:
188
+ raise NotImplementedError(f"Data type {dtype} not handled yet")
189
+
190
+ return pa.RecordBatch.from_pydict(columns)
191
+
192
+
193
+ def column_to_array(
194
+ col: ColumnObject,
195
+ allow_copy: bool = True,
196
+ ) -> pa.Array:
197
+ """
198
+ Convert a column holding one of the primitive dtypes to a PyArrow array.
199
+ A primitive type is one of: int, uint, float, bool (1 bit).
200
+
201
+ Parameters
202
+ ----------
203
+ col : ColumnObject
204
+ allow_copy : bool, default: True
205
+ Whether to allow copying the memory to perform the conversion
206
+ (if false then zero-copy approach is requested).
207
+
208
+ Returns
209
+ -------
210
+ pa.Array
211
+ """
212
+ buffers = col.get_buffers()
213
+ data_type = col.dtype
214
+ data = buffers_to_array(buffers, data_type,
215
+ col.size(),
216
+ col.describe_null,
217
+ col.offset,
218
+ allow_copy)
219
+ return data
220
+
221
+
222
+ def bool_column_to_array(
223
+ col: ColumnObject,
224
+ allow_copy: bool = True,
225
+ ) -> pa.Array:
226
+ """
227
+ Convert a column holding boolean dtype to a PyArrow array.
228
+
229
+ Parameters
230
+ ----------
231
+ col : ColumnObject
232
+ allow_copy : bool, default: True
233
+ Whether to allow copying the memory to perform the conversion
234
+ (if false then zero-copy approach is requested).
235
+
236
+ Returns
237
+ -------
238
+ pa.Array
239
+ """
240
+ buffers = col.get_buffers()
241
+ size = buffers["data"][1][1]
242
+
243
+ # If booleans are byte-packed a copy to bit-packed will be made
244
+ if size == 8 and not allow_copy:
245
+ raise RuntimeError(
246
+ "Boolean column will be casted from uint8 and a copy "
247
+ "is required which is forbidden by allow_copy=False"
248
+ )
249
+
250
+ data_type = col.dtype
251
+ data = buffers_to_array(buffers, data_type,
252
+ col.size(),
253
+ col.describe_null,
254
+ col.offset)
255
+ if size == 8:
256
+ data = pc.cast(data, pa.bool_())
257
+
258
+ return data
259
+
260
+
261
+ def categorical_column_to_dictionary(
262
+ col: ColumnObject,
263
+ allow_copy: bool = True,
264
+ ) -> pa.DictionaryArray:
265
+ """
266
+ Convert a column holding categorical data to a pa.DictionaryArray.
267
+
268
+ Parameters
269
+ ----------
270
+ col : ColumnObject
271
+ allow_copy : bool, default: True
272
+ Whether to allow copying the memory to perform the conversion
273
+ (if false then zero-copy approach is requested).
274
+
275
+ Returns
276
+ -------
277
+ pa.DictionaryArray
278
+ """
279
+ if not allow_copy:
280
+ raise RuntimeError(
281
+ "Categorical column will be casted from uint8 and a copy "
282
+ "is required which is forbidden by allow_copy=False"
283
+ )
284
+
285
+ categorical = col.describe_categorical
286
+
287
+ if not categorical["is_dictionary"]:
288
+ raise NotImplementedError(
289
+ "Non-dictionary categoricals not supported yet")
290
+
291
+ # We need to first convert the dictionary column
292
+ cat_column = categorical["categories"]
293
+ dictionary = column_to_array(cat_column)
294
+ # Then we need to convert the indices
295
+ # Here we need to use the buffer data type!
296
+ buffers = col.get_buffers()
297
+ _, data_type = buffers["data"]
298
+ indices = buffers_to_array(buffers, data_type,
299
+ col.size(),
300
+ col.describe_null,
301
+ col.offset)
302
+
303
+ # Constructing a pa.DictionaryArray
304
+ dict_array = pa.DictionaryArray.from_arrays(indices, dictionary)
305
+
306
+ return dict_array
307
+
308
+
309
+ def parse_datetime_format_str(format_str):
310
+ """Parse datetime `format_str` to interpret the `data`."""
311
+
312
+ # timestamp 'ts{unit}:tz'
313
+ timestamp_meta = re.match(r"ts([smun]):(.*)", format_str)
314
+ if timestamp_meta:
315
+ unit, tz = timestamp_meta.group(1), timestamp_meta.group(2)
316
+ if unit != "s":
317
+ # the format string describes only a first letter of the unit, so
318
+ # add one extra letter to convert the unit to numpy-style:
319
+ # 'm' -> 'ms', 'u' -> 'us', 'n' -> 'ns'
320
+ unit += "s"
321
+
322
+ return unit, tz
323
+
324
+ raise NotImplementedError(f"DateTime kind is not supported: {format_str}")
325
+
326
+
327
+ def map_date_type(data_type):
328
+ """Map column date type to pyarrow date type. """
329
+ kind, bit_width, f_string, _ = data_type
330
+
331
+ if kind == DtypeKind.DATETIME:
332
+ unit, tz = parse_datetime_format_str(f_string)
333
+ return pa.timestamp(unit, tz=tz)
334
+ else:
335
+ pa_dtype = _PYARROW_DTYPES.get(kind, {}).get(bit_width, None)
336
+
337
+ # Error if dtype is not supported
338
+ if pa_dtype:
339
+ return pa_dtype
340
+ else:
341
+ raise NotImplementedError(
342
+ f"Conversion for {data_type} is not yet supported.")
343
+
344
+
345
+ def buffers_to_array(
346
+ buffers: ColumnBuffers,
347
+ data_type: Tuple[DtypeKind, int, str, str],
348
+ length: int,
349
+ describe_null: ColumnNullType,
350
+ offset: int = 0,
351
+ allow_copy: bool = True,
352
+ ) -> pa.Array:
353
+ """
354
+ Build a PyArrow array from the passed buffer.
355
+
356
+ Parameters
357
+ ----------
358
+ buffer : ColumnBuffers
359
+ Dictionary containing tuples of underlying buffers and
360
+ their associated dtype.
361
+ data_type : Tuple[DtypeKind, int, str, str],
362
+ Dtype description of the column as a tuple ``(kind, bit-width, format string,
363
+ endianness)``.
364
+ length : int
365
+ The number of values in the array.
366
+ describe_null: ColumnNullType
367
+ Null representation the column dtype uses,
368
+ as a tuple ``(kind, value)``
369
+ offset : int, default: 0
370
+ Number of elements to offset from the start of the buffer.
371
+ allow_copy : bool, default: True
372
+ Whether to allow copying the memory to perform the conversion
373
+ (if false then zero-copy approach is requested).
374
+
375
+ Returns
376
+ -------
377
+ pa.Array
378
+
379
+ Notes
380
+ -----
381
+ The returned array doesn't own the memory. The caller of this function
382
+ is responsible for keeping the memory owner object alive as long as
383
+ the returned PyArrow array is being used.
384
+ """
385
+ data_buff, _ = buffers["data"]
386
+ try:
387
+ validity_buff, validity_dtype = buffers["validity"]
388
+ except TypeError:
389
+ validity_buff = None
390
+ try:
391
+ offset_buff, offset_dtype = buffers["offsets"]
392
+ except TypeError:
393
+ offset_buff = None
394
+
395
+ # Construct a pyarrow Buffer
396
+ data_pa_buffer = pa.foreign_buffer(data_buff.ptr, data_buff.bufsize,
397
+ base=data_buff)
398
+
399
+ # Construct a validity pyarrow Buffer, if applicable
400
+ if validity_buff:
401
+ validity_pa_buff = validity_buffer_from_mask(validity_buff,
402
+ validity_dtype,
403
+ describe_null,
404
+ length,
405
+ offset,
406
+ allow_copy)
407
+ else:
408
+ validity_pa_buff = validity_buffer_nan_sentinel(data_pa_buffer,
409
+ data_type,
410
+ describe_null,
411
+ length,
412
+ offset,
413
+ allow_copy)
414
+
415
+ # Construct a pyarrow Array from buffers
416
+ data_dtype = map_date_type(data_type)
417
+
418
+ if offset_buff:
419
+ _, offset_bit_width, _, _ = offset_dtype
420
+ # If an offset buffer exists, construct an offset pyarrow Buffer
421
+ # and add it to the construction of an array
422
+ offset_pa_buffer = pa.foreign_buffer(offset_buff.ptr,
423
+ offset_buff.bufsize,
424
+ base=offset_buff)
425
+
426
+ if data_type[2] == 'U':
427
+ string_type = pa.large_string()
428
+ else:
429
+ if offset_bit_width == 64:
430
+ string_type = pa.large_string()
431
+ else:
432
+ string_type = pa.string()
433
+ array = pa.Array.from_buffers(
434
+ string_type,
435
+ length,
436
+ [validity_pa_buff, offset_pa_buffer, data_pa_buffer],
437
+ offset=offset,
438
+ )
439
+ else:
440
+ array = pa.Array.from_buffers(
441
+ data_dtype,
442
+ length,
443
+ [validity_pa_buff, data_pa_buffer],
444
+ offset=offset,
445
+ )
446
+
447
+ return array
448
+
449
+
450
+ def validity_buffer_from_mask(
451
+ validity_buff: BufferObject,
452
+ validity_dtype: Dtype,
453
+ describe_null: ColumnNullType,
454
+ length: int,
455
+ offset: int = 0,
456
+ allow_copy: bool = True,
457
+ ) -> pa.Buffer:
458
+ """
459
+ Build a PyArrow buffer from the passed mask buffer.
460
+
461
+ Parameters
462
+ ----------
463
+ validity_buff : BufferObject
464
+ Tuple of underlying validity buffer and associated dtype.
465
+ validity_dtype : Dtype
466
+ Dtype description as a tuple ``(kind, bit-width, format string,
467
+ endianness)``.
468
+ describe_null : ColumnNullType
469
+ Null representation the column dtype uses,
470
+ as a tuple ``(kind, value)``
471
+ length : int
472
+ The number of values in the array.
473
+ offset : int, default: 0
474
+ Number of elements to offset from the start of the buffer.
475
+ allow_copy : bool, default: True
476
+ Whether to allow copying the memory to perform the conversion
477
+ (if false then zero-copy approach is requested).
478
+
479
+ Returns
480
+ -------
481
+ pa.Buffer
482
+ """
483
+ null_kind, sentinel_val = describe_null
484
+ validity_kind, _, _, _ = validity_dtype
485
+ assert validity_kind == DtypeKind.BOOL
486
+
487
+ if null_kind == ColumnNullType.NON_NULLABLE:
488
+ # Sliced array can have a NON_NULLABLE ColumnNullType due
489
+ # to no missing values in that slice of an array though the bitmask
490
+ # exists and validity_buff must be set to None in this case
491
+ return None
492
+
493
+ elif null_kind == ColumnNullType.USE_BYTEMASK or (
494
+ null_kind == ColumnNullType.USE_BITMASK and sentinel_val == 1
495
+ ):
496
+ buff = pa.foreign_buffer(validity_buff.ptr,
497
+ validity_buff.bufsize,
498
+ base=validity_buff)
499
+
500
+ if null_kind == ColumnNullType.USE_BYTEMASK:
501
+ if not allow_copy:
502
+ raise RuntimeError(
503
+ "To create a bitmask a copy of the data is "
504
+ "required which is forbidden by allow_copy=False"
505
+ )
506
+ mask = pa.Array.from_buffers(pa.int8(), length,
507
+ [None, buff],
508
+ offset=offset)
509
+ mask_bool = pc.cast(mask, pa.bool_())
510
+ else:
511
+ mask_bool = pa.Array.from_buffers(pa.bool_(), length,
512
+ [None, buff],
513
+ offset=offset)
514
+
515
+ if sentinel_val == 1:
516
+ mask_bool = pc.invert(mask_bool)
517
+
518
+ return mask_bool.buffers()[1]
519
+
520
+ elif null_kind == ColumnNullType.USE_BITMASK and sentinel_val == 0:
521
+ return pa.foreign_buffer(validity_buff.ptr,
522
+ validity_buff.bufsize,
523
+ base=validity_buff)
524
+ else:
525
+ raise NotImplementedError(
526
+ f"{describe_null} null representation is not yet supported.")
527
+
528
+
529
+ def validity_buffer_nan_sentinel(
530
+ data_pa_buffer: BufferObject,
531
+ data_type: Dtype,
532
+ describe_null: ColumnNullType,
533
+ length: int,
534
+ offset: int = 0,
535
+ allow_copy: bool = True,
536
+ ) -> pa.Buffer:
537
+ """
538
+ Build a PyArrow buffer from NaN or sentinel values.
539
+
540
+ Parameters
541
+ ----------
542
+ data_pa_buffer : pa.Buffer
543
+ PyArrow buffer for the column data.
544
+ data_type : Dtype
545
+ Dtype description as a tuple ``(kind, bit-width, format string,
546
+ endianness)``.
547
+ describe_null : ColumnNullType
548
+ Null representation the column dtype uses,
549
+ as a tuple ``(kind, value)``
550
+ length : int
551
+ The number of values in the array.
552
+ offset : int, default: 0
553
+ Number of elements to offset from the start of the buffer.
554
+ allow_copy : bool, default: True
555
+ Whether to allow copying the memory to perform the conversion
556
+ (if false then zero-copy approach is requested).
557
+
558
+ Returns
559
+ -------
560
+ pa.Buffer
561
+ """
562
+ kind, bit_width, _, _ = data_type
563
+ data_dtype = map_date_type(data_type)
564
+ null_kind, sentinel_val = describe_null
565
+
566
+ # Check for float NaN values
567
+ if null_kind == ColumnNullType.USE_NAN:
568
+ if not allow_copy:
569
+ raise RuntimeError(
570
+ "To create a bitmask a copy of the data is "
571
+ "required which is forbidden by allow_copy=False"
572
+ )
573
+
574
+ if kind == DtypeKind.FLOAT and bit_width == 16:
575
+ # 'pyarrow.compute.is_nan' kernel not yet implemented
576
+ # for float16
577
+ raise NotImplementedError(
578
+ f"{data_type} with {null_kind} is not yet supported.")
579
+ else:
580
+ pyarrow_data = pa.Array.from_buffers(
581
+ data_dtype,
582
+ length,
583
+ [None, data_pa_buffer],
584
+ offset=offset,
585
+ )
586
+ mask = pc.is_nan(pyarrow_data)
587
+ mask = pc.invert(mask)
588
+ return mask.buffers()[1]
589
+
590
+ # Check for sentinel values
591
+ elif null_kind == ColumnNullType.USE_SENTINEL:
592
+ if not allow_copy:
593
+ raise RuntimeError(
594
+ "To create a bitmask a copy of the data is "
595
+ "required which is forbidden by allow_copy=False"
596
+ )
597
+
598
+ if kind == DtypeKind.DATETIME:
599
+ sentinel_dtype = pa.int64()
600
+ else:
601
+ sentinel_dtype = data_dtype
602
+ pyarrow_data = pa.Array.from_buffers(sentinel_dtype,
603
+ length,
604
+ [None, data_pa_buffer],
605
+ offset=offset)
606
+ sentinel_arr = pc.equal(pyarrow_data, sentinel_val)
607
+ mask_bool = pc.invert(sentinel_arr)
608
+ return mask_bool.buffers()[1]
609
+
610
+ elif null_kind == ColumnNullType.NON_NULLABLE:
611
+ pass
612
+ else:
613
+ raise NotImplementedError(
614
+ f"{describe_null} null representation is not yet supported.")
llmeval-env/lib/python3.10/site-packages/pyarrow/parquet/__init__.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ # flake8: noqa
19
+
20
+ from .core import *
llmeval-env/lib/python3.10/site-packages/pyarrow/parquet/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (209 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/pyarrow/parquet/__pycache__/core.cpython-310.pyc ADDED
Binary file (73.8 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pyarrow/parquet/__pycache__/encryption.cpython-310.pyc ADDED
Binary file (373 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/pyarrow/parquet/core.py ADDED
@@ -0,0 +1,2341 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+
19
+ from collections import defaultdict
20
+ from contextlib import nullcontext
21
+ from functools import reduce
22
+
23
+ import inspect
24
+ import json
25
+ import os
26
+ import re
27
+ import operator
28
+ import warnings
29
+
30
+ import pyarrow as pa
31
+
32
+ try:
33
+ import pyarrow._parquet as _parquet
34
+ except ImportError as exc:
35
+ raise ImportError(
36
+ "The pyarrow installation is not built with support "
37
+ f"for the Parquet file format ({str(exc)})"
38
+ ) from None
39
+
40
+ from pyarrow._parquet import (ParquetReader, Statistics, # noqa
41
+ FileMetaData, RowGroupMetaData,
42
+ ColumnChunkMetaData,
43
+ ParquetSchema, ColumnSchema,
44
+ ParquetLogicalType,
45
+ FileEncryptionProperties,
46
+ FileDecryptionProperties,
47
+ SortingColumn)
48
+ from pyarrow.fs import (LocalFileSystem, FileSystem, FileType,
49
+ _resolve_filesystem_and_path, _ensure_filesystem)
50
+ from pyarrow.util import guid, _is_path_like, _stringify_path, _deprecate_api
51
+
52
+
53
+ def _check_contains_null(val):
54
+ if isinstance(val, bytes):
55
+ for byte in val:
56
+ if isinstance(byte, bytes):
57
+ compare_to = chr(0)
58
+ else:
59
+ compare_to = 0
60
+ if byte == compare_to:
61
+ return True
62
+ elif isinstance(val, str):
63
+ return '\x00' in val
64
+ return False
65
+
66
+
67
+ def _check_filters(filters, check_null_strings=True):
68
+ """
69
+ Check if filters are well-formed.
70
+ """
71
+ if filters is not None:
72
+ if len(filters) == 0 or any(len(f) == 0 for f in filters):
73
+ raise ValueError("Malformed filters")
74
+ if isinstance(filters[0][0], str):
75
+ # We have encountered the situation where we have one nesting level
76
+ # too few:
77
+ # We have [(,,), ..] instead of [[(,,), ..]]
78
+ filters = [filters]
79
+ if check_null_strings:
80
+ for conjunction in filters:
81
+ for col, op, val in conjunction:
82
+ if (
83
+ isinstance(val, list) and
84
+ all(_check_contains_null(v) for v in val) or
85
+ _check_contains_null(val)
86
+ ):
87
+ raise NotImplementedError(
88
+ "Null-terminated binary strings are not supported "
89
+ "as filter values."
90
+ )
91
+ return filters
92
+
93
+
94
+ _DNF_filter_doc = """Predicates are expressed using an ``Expression`` or using
95
+ the disjunctive normal form (DNF), like ``[[('x', '=', 0), ...], ...]``.
96
+ DNF allows arbitrary boolean logical combinations of single column predicates.
97
+ The innermost tuples each describe a single column predicate. The list of inner
98
+ predicates is interpreted as a conjunction (AND), forming a more selective and
99
+ multiple column predicate. Finally, the most outer list combines these filters
100
+ as a disjunction (OR).
101
+
102
+ Predicates may also be passed as List[Tuple]. This form is interpreted
103
+ as a single conjunction. To express OR in predicates, one must
104
+ use the (preferred) List[List[Tuple]] notation.
105
+
106
+ Each tuple has format: (``key``, ``op``, ``value``) and compares the
107
+ ``key`` with the ``value``.
108
+ The supported ``op`` are: ``=`` or ``==``, ``!=``, ``<``, ``>``, ``<=``,
109
+ ``>=``, ``in`` and ``not in``. If the ``op`` is ``in`` or ``not in``, the
110
+ ``value`` must be a collection such as a ``list``, a ``set`` or a
111
+ ``tuple``.
112
+
113
+ Examples:
114
+
115
+ Using the ``Expression`` API:
116
+
117
+ .. code-block:: python
118
+
119
+ import pyarrow.compute as pc
120
+ pc.field('x') = 0
121
+ pc.field('y').isin(['a', 'b', 'c'])
122
+ ~pc.field('y').isin({'a', 'b'})
123
+
124
+ Using the DNF format:
125
+
126
+ .. code-block:: python
127
+
128
+ ('x', '=', 0)
129
+ ('y', 'in', ['a', 'b', 'c'])
130
+ ('z', 'not in', {'a','b'})
131
+
132
+ """
133
+
134
+
135
+ def filters_to_expression(filters):
136
+ """
137
+ Check if filters are well-formed and convert to an ``Expression``.
138
+
139
+ Parameters
140
+ ----------
141
+ filters : List[Tuple] or List[List[Tuple]]
142
+
143
+ Notes
144
+ -----
145
+ See internal ``pyarrow._DNF_filter_doc`` attribute for more details.
146
+
147
+ Examples
148
+ --------
149
+
150
+ >>> filters_to_expression([('foo', '==', 'bar')])
151
+ <pyarrow.compute.Expression (foo == "bar")>
152
+
153
+ Returns
154
+ -------
155
+ pyarrow.compute.Expression
156
+ An Expression representing the filters
157
+ """
158
+ import pyarrow.dataset as ds
159
+
160
+ if isinstance(filters, ds.Expression):
161
+ return filters
162
+
163
+ filters = _check_filters(filters, check_null_strings=False)
164
+
165
+ def convert_single_predicate(col, op, val):
166
+ field = ds.field(col)
167
+
168
+ if op == "=" or op == "==":
169
+ return field == val
170
+ elif op == "!=":
171
+ return field != val
172
+ elif op == '<':
173
+ return field < val
174
+ elif op == '>':
175
+ return field > val
176
+ elif op == '<=':
177
+ return field <= val
178
+ elif op == '>=':
179
+ return field >= val
180
+ elif op == 'in':
181
+ return field.isin(val)
182
+ elif op == 'not in':
183
+ return ~field.isin(val)
184
+ else:
185
+ raise ValueError(
186
+ '"{0}" is not a valid operator in predicates.'.format(
187
+ (col, op, val)))
188
+
189
+ disjunction_members = []
190
+
191
+ for conjunction in filters:
192
+ conjunction_members = [
193
+ convert_single_predicate(col, op, val)
194
+ for col, op, val in conjunction
195
+ ]
196
+
197
+ disjunction_members.append(reduce(operator.and_, conjunction_members))
198
+
199
+ return reduce(operator.or_, disjunction_members)
200
+
201
+
202
+ _filters_to_expression = _deprecate_api(
203
+ "_filters_to_expression", "filters_to_expression",
204
+ filters_to_expression, "10.0.0", DeprecationWarning)
205
+
206
+
207
+ # ----------------------------------------------------------------------
208
+ # Reading a single Parquet file
209
+
210
+
211
+ class ParquetFile:
212
+ """
213
+ Reader interface for a single Parquet file.
214
+
215
+ Parameters
216
+ ----------
217
+ source : str, pathlib.Path, pyarrow.NativeFile, or file-like object
218
+ Readable source. For passing bytes or buffer-like file containing a
219
+ Parquet file, use pyarrow.BufferReader.
220
+ metadata : FileMetaData, default None
221
+ Use existing metadata object, rather than reading from file.
222
+ common_metadata : FileMetaData, default None
223
+ Will be used in reads for pandas schema metadata if not found in the
224
+ main file's metadata, no other uses at the moment.
225
+ read_dictionary : list
226
+ List of column names to read directly as DictionaryArray.
227
+ memory_map : bool, default False
228
+ If the source is a file path, use a memory map to read file, which can
229
+ improve performance in some environments.
230
+ buffer_size : int, default 0
231
+ If positive, perform read buffering when deserializing individual
232
+ column chunks. Otherwise IO calls are unbuffered.
233
+ pre_buffer : bool, default False
234
+ Coalesce and issue file reads in parallel to improve performance on
235
+ high-latency filesystems (e.g. S3). If True, Arrow will use a
236
+ background I/O thread pool.
237
+ coerce_int96_timestamp_unit : str, default None
238
+ Cast timestamps that are stored in INT96 format to a particular
239
+ resolution (e.g. 'ms'). Setting to None is equivalent to 'ns'
240
+ and therefore INT96 timestamps will be inferred as timestamps
241
+ in nanoseconds.
242
+ decryption_properties : FileDecryptionProperties, default None
243
+ File decryption properties for Parquet Modular Encryption.
244
+ thrift_string_size_limit : int, default None
245
+ If not None, override the maximum total string size allocated
246
+ when decoding Thrift structures. The default limit should be
247
+ sufficient for most Parquet files.
248
+ thrift_container_size_limit : int, default None
249
+ If not None, override the maximum total size of containers allocated
250
+ when decoding Thrift structures. The default limit should be
251
+ sufficient for most Parquet files.
252
+ filesystem : FileSystem, default None
253
+ If nothing passed, will be inferred based on path.
254
+ Path will try to be found in the local on-disk filesystem otherwise
255
+ it will be parsed as an URI to determine the filesystem.
256
+ page_checksum_verification : bool, default False
257
+ If True, verify the checksum for each page read from the file.
258
+
259
+ Examples
260
+ --------
261
+
262
+ Generate an example PyArrow Table and write it to Parquet file:
263
+
264
+ >>> import pyarrow as pa
265
+ >>> table = pa.table({'n_legs': [2, 2, 4, 4, 5, 100],
266
+ ... 'animal': ["Flamingo", "Parrot", "Dog", "Horse",
267
+ ... "Brittle stars", "Centipede"]})
268
+
269
+ >>> import pyarrow.parquet as pq
270
+ >>> pq.write_table(table, 'example.parquet')
271
+
272
+ Create a ``ParquetFile`` object from the Parquet file:
273
+
274
+ >>> parquet_file = pq.ParquetFile('example.parquet')
275
+
276
+ Read the data:
277
+
278
+ >>> parquet_file.read()
279
+ pyarrow.Table
280
+ n_legs: int64
281
+ animal: string
282
+ ----
283
+ n_legs: [[2,2,4,4,5,100]]
284
+ animal: [["Flamingo","Parrot","Dog","Horse","Brittle stars","Centipede"]]
285
+
286
+ Create a ParquetFile object with "animal" column as DictionaryArray:
287
+
288
+ >>> parquet_file = pq.ParquetFile('example.parquet',
289
+ ... read_dictionary=["animal"])
290
+ >>> parquet_file.read()
291
+ pyarrow.Table
292
+ n_legs: int64
293
+ animal: dictionary<values=string, indices=int32, ordered=0>
294
+ ----
295
+ n_legs: [[2,2,4,4,5,100]]
296
+ animal: [ -- dictionary:
297
+ ["Flamingo","Parrot",...,"Brittle stars","Centipede"] -- indices:
298
+ [0,1,2,3,4,5]]
299
+ """
300
+
301
+ def __init__(self, source, *, metadata=None, common_metadata=None,
302
+ read_dictionary=None, memory_map=False, buffer_size=0,
303
+ pre_buffer=False, coerce_int96_timestamp_unit=None,
304
+ decryption_properties=None, thrift_string_size_limit=None,
305
+ thrift_container_size_limit=None, filesystem=None,
306
+ page_checksum_verification=False):
307
+
308
+ self._close_source = getattr(source, 'closed', True)
309
+
310
+ filesystem, source = _resolve_filesystem_and_path(
311
+ source, filesystem, memory_map=memory_map)
312
+ if filesystem is not None:
313
+ source = filesystem.open_input_file(source)
314
+ self._close_source = True # We opened it here, ensure we close it.
315
+
316
+ self.reader = ParquetReader()
317
+ self.reader.open(
318
+ source, use_memory_map=memory_map,
319
+ buffer_size=buffer_size, pre_buffer=pre_buffer,
320
+ read_dictionary=read_dictionary, metadata=metadata,
321
+ coerce_int96_timestamp_unit=coerce_int96_timestamp_unit,
322
+ decryption_properties=decryption_properties,
323
+ thrift_string_size_limit=thrift_string_size_limit,
324
+ thrift_container_size_limit=thrift_container_size_limit,
325
+ page_checksum_verification=page_checksum_verification,
326
+ )
327
+ self.common_metadata = common_metadata
328
+ self._nested_paths_by_prefix = self._build_nested_paths()
329
+
330
+ def __enter__(self):
331
+ return self
332
+
333
+ def __exit__(self, *args, **kwargs):
334
+ self.close()
335
+
336
+ def _build_nested_paths(self):
337
+ paths = self.reader.column_paths
338
+
339
+ result = defaultdict(list)
340
+
341
+ for i, path in enumerate(paths):
342
+ key = path[0]
343
+ rest = path[1:]
344
+ while True:
345
+ result[key].append(i)
346
+
347
+ if not rest:
348
+ break
349
+
350
+ key = '.'.join((key, rest[0]))
351
+ rest = rest[1:]
352
+
353
+ return result
354
+
355
+ @property
356
+ def metadata(self):
357
+ """
358
+ Return the Parquet metadata.
359
+ """
360
+ return self.reader.metadata
361
+
362
+ @property
363
+ def schema(self):
364
+ """
365
+ Return the Parquet schema, unconverted to Arrow types
366
+ """
367
+ return self.metadata.schema
368
+
369
+ @property
370
+ def schema_arrow(self):
371
+ """
372
+ Return the inferred Arrow schema, converted from the whole Parquet
373
+ file's schema
374
+
375
+ Examples
376
+ --------
377
+ Generate an example Parquet file:
378
+
379
+ >>> import pyarrow as pa
380
+ >>> table = pa.table({'n_legs': [2, 2, 4, 4, 5, 100],
381
+ ... 'animal': ["Flamingo", "Parrot", "Dog", "Horse",
382
+ ... "Brittle stars", "Centipede"]})
383
+ >>> import pyarrow.parquet as pq
384
+ >>> pq.write_table(table, 'example.parquet')
385
+ >>> parquet_file = pq.ParquetFile('example.parquet')
386
+
387
+ Read the Arrow schema:
388
+
389
+ >>> parquet_file.schema_arrow
390
+ n_legs: int64
391
+ animal: string
392
+ """
393
+ return self.reader.schema_arrow
394
+
395
+ @property
396
+ def num_row_groups(self):
397
+ """
398
+ Return the number of row groups of the Parquet file.
399
+
400
+ Examples
401
+ --------
402
+ >>> import pyarrow as pa
403
+ >>> table = pa.table({'n_legs': [2, 2, 4, 4, 5, 100],
404
+ ... 'animal': ["Flamingo", "Parrot", "Dog", "Horse",
405
+ ... "Brittle stars", "Centipede"]})
406
+ >>> import pyarrow.parquet as pq
407
+ >>> pq.write_table(table, 'example.parquet')
408
+ >>> parquet_file = pq.ParquetFile('example.parquet')
409
+
410
+ >>> parquet_file.num_row_groups
411
+ 1
412
+ """
413
+ return self.reader.num_row_groups
414
+
415
+ def close(self, force: bool = False):
416
+ if self._close_source or force:
417
+ self.reader.close()
418
+
419
+ @property
420
+ def closed(self) -> bool:
421
+ return self.reader.closed
422
+
423
+ def read_row_group(self, i, columns=None, use_threads=True,
424
+ use_pandas_metadata=False):
425
+ """
426
+ Read a single row group from a Parquet file.
427
+
428
+ Parameters
429
+ ----------
430
+ i : int
431
+ Index of the individual row group that we want to read.
432
+ columns : list
433
+ If not None, only these columns will be read from the row group. A
434
+ column name may be a prefix of a nested field, e.g. 'a' will select
435
+ 'a.b', 'a.c', and 'a.d.e'.
436
+ use_threads : bool, default True
437
+ Perform multi-threaded column reads.
438
+ use_pandas_metadata : bool, default False
439
+ If True and file has custom pandas schema metadata, ensure that
440
+ index columns are also loaded.
441
+
442
+ Returns
443
+ -------
444
+ pyarrow.table.Table
445
+ Content of the row group as a table (of columns)
446
+
447
+ Examples
448
+ --------
449
+ >>> import pyarrow as pa
450
+ >>> table = pa.table({'n_legs': [2, 2, 4, 4, 5, 100],
451
+ ... 'animal': ["Flamingo", "Parrot", "Dog", "Horse",
452
+ ... "Brittle stars", "Centipede"]})
453
+ >>> import pyarrow.parquet as pq
454
+ >>> pq.write_table(table, 'example.parquet')
455
+ >>> parquet_file = pq.ParquetFile('example.parquet')
456
+
457
+ >>> parquet_file.read_row_group(0)
458
+ pyarrow.Table
459
+ n_legs: int64
460
+ animal: string
461
+ ----
462
+ n_legs: [[2,2,4,4,5,100]]
463
+ animal: [["Flamingo","Parrot",...,"Brittle stars","Centipede"]]
464
+ """
465
+ column_indices = self._get_column_indices(
466
+ columns, use_pandas_metadata=use_pandas_metadata)
467
+ return self.reader.read_row_group(i, column_indices=column_indices,
468
+ use_threads=use_threads)
469
+
470
+ def read_row_groups(self, row_groups, columns=None, use_threads=True,
471
+ use_pandas_metadata=False):
472
+ """
473
+ Read a multiple row groups from a Parquet file.
474
+
475
+ Parameters
476
+ ----------
477
+ row_groups : list
478
+ Only these row groups will be read from the file.
479
+ columns : list
480
+ If not None, only these columns will be read from the row group. A
481
+ column name may be a prefix of a nested field, e.g. 'a' will select
482
+ 'a.b', 'a.c', and 'a.d.e'.
483
+ use_threads : bool, default True
484
+ Perform multi-threaded column reads.
485
+ use_pandas_metadata : bool, default False
486
+ If True and file has custom pandas schema metadata, ensure that
487
+ index columns are also loaded.
488
+
489
+ Returns
490
+ -------
491
+ pyarrow.table.Table
492
+ Content of the row groups as a table (of columns).
493
+
494
+ Examples
495
+ --------
496
+ >>> import pyarrow as pa
497
+ >>> table = pa.table({'n_legs': [2, 2, 4, 4, 5, 100],
498
+ ... 'animal': ["Flamingo", "Parrot", "Dog", "Horse",
499
+ ... "Brittle stars", "Centipede"]})
500
+ >>> import pyarrow.parquet as pq
501
+ >>> pq.write_table(table, 'example.parquet')
502
+ >>> parquet_file = pq.ParquetFile('example.parquet')
503
+
504
+ >>> parquet_file.read_row_groups([0,0])
505
+ pyarrow.Table
506
+ n_legs: int64
507
+ animal: string
508
+ ----
509
+ n_legs: [[2,2,4,4,5,...,2,4,4,5,100]]
510
+ animal: [["Flamingo","Parrot","Dog",...,"Brittle stars","Centipede"]]
511
+ """
512
+ column_indices = self._get_column_indices(
513
+ columns, use_pandas_metadata=use_pandas_metadata)
514
+ return self.reader.read_row_groups(row_groups,
515
+ column_indices=column_indices,
516
+ use_threads=use_threads)
517
+
518
+ def iter_batches(self, batch_size=65536, row_groups=None, columns=None,
519
+ use_threads=True, use_pandas_metadata=False):
520
+ """
521
+ Read streaming batches from a Parquet file.
522
+
523
+ Parameters
524
+ ----------
525
+ batch_size : int, default 64K
526
+ Maximum number of records to yield per batch. Batches may be
527
+ smaller if there aren't enough rows in the file.
528
+ row_groups : list
529
+ Only these row groups will be read from the file.
530
+ columns : list
531
+ If not None, only these columns will be read from the file. A
532
+ column name may be a prefix of a nested field, e.g. 'a' will select
533
+ 'a.b', 'a.c', and 'a.d.e'.
534
+ use_threads : boolean, default True
535
+ Perform multi-threaded column reads.
536
+ use_pandas_metadata : boolean, default False
537
+ If True and file has custom pandas schema metadata, ensure that
538
+ index columns are also loaded.
539
+
540
+ Yields
541
+ ------
542
+ pyarrow.RecordBatch
543
+ Contents of each batch as a record batch
544
+
545
+ Examples
546
+ --------
547
+ Generate an example Parquet file:
548
+
549
+ >>> import pyarrow as pa
550
+ >>> table = pa.table({'n_legs': [2, 2, 4, 4, 5, 100],
551
+ ... 'animal': ["Flamingo", "Parrot", "Dog", "Horse",
552
+ ... "Brittle stars", "Centipede"]})
553
+ >>> import pyarrow.parquet as pq
554
+ >>> pq.write_table(table, 'example.parquet')
555
+ >>> parquet_file = pq.ParquetFile('example.parquet')
556
+ >>> for i in parquet_file.iter_batches():
557
+ ... print("RecordBatch")
558
+ ... print(i.to_pandas())
559
+ ...
560
+ RecordBatch
561
+ n_legs animal
562
+ 0 2 Flamingo
563
+ 1 2 Parrot
564
+ 2 4 Dog
565
+ 3 4 Horse
566
+ 4 5 Brittle stars
567
+ 5 100 Centipede
568
+ """
569
+ if row_groups is None:
570
+ row_groups = range(0, self.metadata.num_row_groups)
571
+ column_indices = self._get_column_indices(
572
+ columns, use_pandas_metadata=use_pandas_metadata)
573
+
574
+ batches = self.reader.iter_batches(batch_size,
575
+ row_groups=row_groups,
576
+ column_indices=column_indices,
577
+ use_threads=use_threads)
578
+ return batches
579
+
580
+ def read(self, columns=None, use_threads=True, use_pandas_metadata=False):
581
+ """
582
+ Read a Table from Parquet format.
583
+
584
+ Parameters
585
+ ----------
586
+ columns : list
587
+ If not None, only these columns will be read from the file. A
588
+ column name may be a prefix of a nested field, e.g. 'a' will select
589
+ 'a.b', 'a.c', and 'a.d.e'.
590
+ use_threads : bool, default True
591
+ Perform multi-threaded column reads.
592
+ use_pandas_metadata : bool, default False
593
+ If True and file has custom pandas schema metadata, ensure that
594
+ index columns are also loaded.
595
+
596
+ Returns
597
+ -------
598
+ pyarrow.table.Table
599
+ Content of the file as a table (of columns).
600
+
601
+ Examples
602
+ --------
603
+ Generate an example Parquet file:
604
+
605
+ >>> import pyarrow as pa
606
+ >>> table = pa.table({'n_legs': [2, 2, 4, 4, 5, 100],
607
+ ... 'animal': ["Flamingo", "Parrot", "Dog", "Horse",
608
+ ... "Brittle stars", "Centipede"]})
609
+ >>> import pyarrow.parquet as pq
610
+ >>> pq.write_table(table, 'example.parquet')
611
+ >>> parquet_file = pq.ParquetFile('example.parquet')
612
+
613
+ Read a Table:
614
+
615
+ >>> parquet_file.read(columns=["animal"])
616
+ pyarrow.Table
617
+ animal: string
618
+ ----
619
+ animal: [["Flamingo","Parrot",...,"Brittle stars","Centipede"]]
620
+ """
621
+ column_indices = self._get_column_indices(
622
+ columns, use_pandas_metadata=use_pandas_metadata)
623
+ return self.reader.read_all(column_indices=column_indices,
624
+ use_threads=use_threads)
625
+
626
+ def scan_contents(self, columns=None, batch_size=65536):
627
+ """
628
+ Read contents of file for the given columns and batch size.
629
+
630
+ Notes
631
+ -----
632
+ This function's primary purpose is benchmarking.
633
+ The scan is executed on a single thread.
634
+
635
+ Parameters
636
+ ----------
637
+ columns : list of integers, default None
638
+ Select columns to read, if None scan all columns.
639
+ batch_size : int, default 64K
640
+ Number of rows to read at a time internally.
641
+
642
+ Returns
643
+ -------
644
+ num_rows : int
645
+ Number of rows in file
646
+
647
+ Examples
648
+ --------
649
+ >>> import pyarrow as pa
650
+ >>> table = pa.table({'n_legs': [2, 2, 4, 4, 5, 100],
651
+ ... 'animal': ["Flamingo", "Parrot", "Dog", "Horse",
652
+ ... "Brittle stars", "Centipede"]})
653
+ >>> import pyarrow.parquet as pq
654
+ >>> pq.write_table(table, 'example.parquet')
655
+ >>> parquet_file = pq.ParquetFile('example.parquet')
656
+
657
+ >>> parquet_file.scan_contents()
658
+ 6
659
+ """
660
+ column_indices = self._get_column_indices(columns)
661
+ return self.reader.scan_contents(column_indices,
662
+ batch_size=batch_size)
663
+
664
+ def _get_column_indices(self, column_names, use_pandas_metadata=False):
665
+ if column_names is None:
666
+ return None
667
+
668
+ indices = []
669
+
670
+ for name in column_names:
671
+ if name in self._nested_paths_by_prefix:
672
+ indices.extend(self._nested_paths_by_prefix[name])
673
+
674
+ if use_pandas_metadata:
675
+ file_keyvalues = self.metadata.metadata
676
+ common_keyvalues = (self.common_metadata.metadata
677
+ if self.common_metadata is not None
678
+ else None)
679
+
680
+ if file_keyvalues and b'pandas' in file_keyvalues:
681
+ index_columns = _get_pandas_index_columns(file_keyvalues)
682
+ elif common_keyvalues and b'pandas' in common_keyvalues:
683
+ index_columns = _get_pandas_index_columns(common_keyvalues)
684
+ else:
685
+ index_columns = []
686
+
687
+ if indices is not None and index_columns:
688
+ indices += [self.reader.column_name_idx(descr)
689
+ for descr in index_columns
690
+ if not isinstance(descr, dict)]
691
+
692
+ return indices
693
+
694
+
695
+ _SPARK_DISALLOWED_CHARS = re.compile('[ ,;{}()\n\t=]')
696
+
697
+
698
+ def _sanitized_spark_field_name(name):
699
+ return _SPARK_DISALLOWED_CHARS.sub('_', name)
700
+
701
+
702
+ def _sanitize_schema(schema, flavor):
703
+ if 'spark' in flavor:
704
+ sanitized_fields = []
705
+
706
+ schema_changed = False
707
+
708
+ for field in schema:
709
+ name = field.name
710
+ sanitized_name = _sanitized_spark_field_name(name)
711
+
712
+ if sanitized_name != name:
713
+ schema_changed = True
714
+ sanitized_field = pa.field(sanitized_name, field.type,
715
+ field.nullable, field.metadata)
716
+ sanitized_fields.append(sanitized_field)
717
+ else:
718
+ sanitized_fields.append(field)
719
+
720
+ new_schema = pa.schema(sanitized_fields, metadata=schema.metadata)
721
+ return new_schema, schema_changed
722
+ else:
723
+ return schema, False
724
+
725
+
726
+ def _sanitize_table(table, new_schema, flavor):
727
+ # TODO: This will not handle prohibited characters in nested field names
728
+ if 'spark' in flavor:
729
+ column_data = [table[i] for i in range(table.num_columns)]
730
+ return pa.Table.from_arrays(column_data, schema=new_schema)
731
+ else:
732
+ return table
733
+
734
+
735
+ _parquet_writer_arg_docs = """version : {"1.0", "2.4", "2.6"}, default "2.6"
736
+ Determine which Parquet logical types are available for use, whether the
737
+ reduced set from the Parquet 1.x.x format or the expanded logical types
738
+ added in later format versions.
739
+ Files written with version='2.4' or '2.6' may not be readable in all
740
+ Parquet implementations, so version='1.0' is likely the choice that
741
+ maximizes file compatibility.
742
+ UINT32 and some logical types are only available with version '2.4'.
743
+ Nanosecond timestamps are only available with version '2.6'.
744
+ Other features such as compression algorithms or the new serialized
745
+ data page format must be enabled separately (see 'compression' and
746
+ 'data_page_version').
747
+ use_dictionary : bool or list, default True
748
+ Specify if we should use dictionary encoding in general or only for
749
+ some columns.
750
+ When encoding the column, if the dictionary size is too large, the
751
+ column will fallback to ``PLAIN`` encoding. Specially, ``BOOLEAN`` type
752
+ doesn't support dictionary encoding.
753
+ compression : str or dict, default 'snappy'
754
+ Specify the compression codec, either on a general basis or per-column.
755
+ Valid values: {'NONE', 'SNAPPY', 'GZIP', 'BROTLI', 'LZ4', 'ZSTD'}.
756
+ write_statistics : bool or list, default True
757
+ Specify if we should write statistics in general (default is True) or only
758
+ for some columns.
759
+ use_deprecated_int96_timestamps : bool, default None
760
+ Write timestamps to INT96 Parquet format. Defaults to False unless enabled
761
+ by flavor argument. This take priority over the coerce_timestamps option.
762
+ coerce_timestamps : str, default None
763
+ Cast timestamps to a particular resolution. If omitted, defaults are chosen
764
+ depending on `version`. By default, for ``version='1.0'`` (the default)
765
+ and ``version='2.4'``, nanoseconds are cast to microseconds ('us'), while
766
+ for other `version` values, they are written natively without loss
767
+ of resolution. Seconds are always cast to milliseconds ('ms') by default,
768
+ as Parquet does not have any temporal type with seconds resolution.
769
+ If the casting results in loss of data, it will raise an exception
770
+ unless ``allow_truncated_timestamps=True`` is given.
771
+ Valid values: {None, 'ms', 'us'}
772
+ allow_truncated_timestamps : bool, default False
773
+ Allow loss of data when coercing timestamps to a particular
774
+ resolution. E.g. if microsecond or nanosecond data is lost when coercing to
775
+ 'ms', do not raise an exception. Passing ``allow_truncated_timestamp=True``
776
+ will NOT result in the truncation exception being ignored unless
777
+ ``coerce_timestamps`` is not None.
778
+ data_page_size : int, default None
779
+ Set a target threshold for the approximate encoded size of data
780
+ pages within a column chunk (in bytes). If None, use the default data page
781
+ size of 1MByte.
782
+ flavor : {'spark'}, default None
783
+ Sanitize schema or set other compatibility options to work with
784
+ various target systems.
785
+ filesystem : FileSystem, default None
786
+ If nothing passed, will be inferred from `where` if path-like, else
787
+ `where` is already a file-like object so no filesystem is needed.
788
+ compression_level : int or dict, default None
789
+ Specify the compression level for a codec, either on a general basis or
790
+ per-column. If None is passed, arrow selects the compression level for
791
+ the compression codec in use. The compression level has a different
792
+ meaning for each codec, so you have to read the documentation of the
793
+ codec you are using.
794
+ An exception is thrown if the compression codec does not allow specifying
795
+ a compression level.
796
+ use_byte_stream_split : bool or list, default False
797
+ Specify if the byte_stream_split encoding should be used in general or
798
+ only for some columns. If both dictionary and byte_stream_stream are
799
+ enabled, then dictionary is preferred.
800
+ The byte_stream_split encoding is valid only for floating-point data types
801
+ and should be combined with a compression codec.
802
+ column_encoding : string or dict, default None
803
+ Specify the encoding scheme on a per column basis.
804
+ Can only be used when ``use_dictionary`` is set to False, and
805
+ cannot be used in combination with ``use_byte_stream_split``.
806
+ Currently supported values: {'PLAIN', 'BYTE_STREAM_SPLIT',
807
+ 'DELTA_BINARY_PACKED', 'DELTA_LENGTH_BYTE_ARRAY', 'DELTA_BYTE_ARRAY'}.
808
+ Certain encodings are only compatible with certain data types.
809
+ Please refer to the encodings section of `Reading and writing Parquet
810
+ files <https://arrow.apache.org/docs/cpp/parquet.html#encodings>`_.
811
+ data_page_version : {"1.0", "2.0"}, default "1.0"
812
+ The serialized Parquet data page format version to write, defaults to
813
+ 1.0. This does not impact the file schema logical types and Arrow to
814
+ Parquet type casting behavior; for that use the "version" option.
815
+ use_compliant_nested_type : bool, default True
816
+ Whether to write compliant Parquet nested type (lists) as defined
817
+ `here <https://github.com/apache/parquet-format/blob/master/
818
+ LogicalTypes.md#nested-types>`_, defaults to ``True``.
819
+ For ``use_compliant_nested_type=True``, this will write into a list
820
+ with 3-level structure where the middle level, named ``list``,
821
+ is a repeated group with a single field named ``element``::
822
+
823
+ <list-repetition> group <name> (LIST) {
824
+ repeated group list {
825
+ <element-repetition> <element-type> element;
826
+ }
827
+ }
828
+
829
+ For ``use_compliant_nested_type=False``, this will also write into a list
830
+ with 3-level structure, where the name of the single field of the middle
831
+ level ``list`` is taken from the element name for nested columns in Arrow,
832
+ which defaults to ``item``::
833
+
834
+ <list-repetition> group <name> (LIST) {
835
+ repeated group list {
836
+ <element-repetition> <element-type> item;
837
+ }
838
+ }
839
+ encryption_properties : FileEncryptionProperties, default None
840
+ File encryption properties for Parquet Modular Encryption.
841
+ If None, no encryption will be done.
842
+ The encryption properties can be created using:
843
+ ``CryptoFactory.file_encryption_properties()``.
844
+ write_batch_size : int, default None
845
+ Number of values to write to a page at a time. If None, use the default of
846
+ 1024. ``write_batch_size`` is complementary to ``data_page_size``. If pages
847
+ are exceeding the ``data_page_size`` due to large column values, lowering
848
+ the batch size can help keep page sizes closer to the intended size.
849
+ dictionary_pagesize_limit : int, default None
850
+ Specify the dictionary page size limit per row group. If None, use the
851
+ default 1MB.
852
+ store_schema : bool, default True
853
+ By default, the Arrow schema is serialized and stored in the Parquet
854
+ file metadata (in the "ARROW:schema" key). When reading the file,
855
+ if this key is available, it will be used to more faithfully recreate
856
+ the original Arrow data. For example, for tz-aware timestamp columns
857
+ it will restore the timezone (Parquet only stores the UTC values without
858
+ timezone), or columns with duration type will be restored from the int64
859
+ Parquet column.
860
+ write_page_index : bool, default False
861
+ Whether to write a page index in general for all columns.
862
+ Writing statistics to the page index disables the old method of writing
863
+ statistics to each data page header. The page index makes statistics-based
864
+ filtering more efficient than the page header, as it gathers all the
865
+ statistics for a Parquet file in a single place, avoiding scattered I/O.
866
+ Note that the page index is not yet used on the read size by PyArrow.
867
+ write_page_checksum : bool, default False
868
+ Whether to write page checksums in general for all columns.
869
+ Page checksums enable detection of data corruption, which might occur during
870
+ transmission or in the storage.
871
+ sorting_columns : Sequence of SortingColumn, default None
872
+ Specify the sort order of the data being written. The writer does not sort
873
+ the data nor does it verify that the data is sorted. The sort order is
874
+ written to the row group metadata, which can then be used by readers.
875
+ """
876
+
877
+ _parquet_writer_example_doc = """\
878
+ Generate an example PyArrow Table and RecordBatch:
879
+
880
+ >>> import pyarrow as pa
881
+ >>> table = pa.table({'n_legs': [2, 2, 4, 4, 5, 100],
882
+ ... 'animal': ["Flamingo", "Parrot", "Dog", "Horse",
883
+ ... "Brittle stars", "Centipede"]})
884
+ >>> batch = pa.record_batch([[2, 2, 4, 4, 5, 100],
885
+ ... ["Flamingo", "Parrot", "Dog", "Horse",
886
+ ... "Brittle stars", "Centipede"]],
887
+ ... names=['n_legs', 'animal'])
888
+
889
+ create a ParquetWriter object:
890
+
891
+ >>> import pyarrow.parquet as pq
892
+ >>> writer = pq.ParquetWriter('example.parquet', table.schema)
893
+
894
+ and write the Table into the Parquet file:
895
+
896
+ >>> writer.write_table(table)
897
+ >>> writer.close()
898
+
899
+ >>> pq.read_table('example.parquet').to_pandas()
900
+ n_legs animal
901
+ 0 2 Flamingo
902
+ 1 2 Parrot
903
+ 2 4 Dog
904
+ 3 4 Horse
905
+ 4 5 Brittle stars
906
+ 5 100 Centipede
907
+
908
+ create a ParquetWriter object for the RecordBatch:
909
+
910
+ >>> writer2 = pq.ParquetWriter('example2.parquet', batch.schema)
911
+
912
+ and write the RecordBatch into the Parquet file:
913
+
914
+ >>> writer2.write_batch(batch)
915
+ >>> writer2.close()
916
+
917
+ >>> pq.read_table('example2.parquet').to_pandas()
918
+ n_legs animal
919
+ 0 2 Flamingo
920
+ 1 2 Parrot
921
+ 2 4 Dog
922
+ 3 4 Horse
923
+ 4 5 Brittle stars
924
+ 5 100 Centipede
925
+ """
926
+
927
+
928
+ class ParquetWriter:
929
+
930
+ __doc__ = """
931
+ Class for incrementally building a Parquet file for Arrow tables.
932
+
933
+ Parameters
934
+ ----------
935
+ where : path or file-like object
936
+ schema : pyarrow.Schema
937
+ {}
938
+ writer_engine_version : unused
939
+ **options : dict
940
+ If options contains a key `metadata_collector` then the
941
+ corresponding value is assumed to be a list (or any object with
942
+ `.append` method) that will be filled with the file metadata instance
943
+ of the written file.
944
+
945
+ Examples
946
+ --------
947
+ {}
948
+ """.format(_parquet_writer_arg_docs, _parquet_writer_example_doc)
949
+
950
+ def __init__(self, where, schema, filesystem=None,
951
+ flavor=None,
952
+ version='2.6',
953
+ use_dictionary=True,
954
+ compression='snappy',
955
+ write_statistics=True,
956
+ use_deprecated_int96_timestamps=None,
957
+ compression_level=None,
958
+ use_byte_stream_split=False,
959
+ column_encoding=None,
960
+ writer_engine_version=None,
961
+ data_page_version='1.0',
962
+ use_compliant_nested_type=True,
963
+ encryption_properties=None,
964
+ write_batch_size=None,
965
+ dictionary_pagesize_limit=None,
966
+ store_schema=True,
967
+ write_page_index=False,
968
+ write_page_checksum=False,
969
+ sorting_columns=None,
970
+ **options):
971
+ if use_deprecated_int96_timestamps is None:
972
+ # Use int96 timestamps for Spark
973
+ if flavor is not None and 'spark' in flavor:
974
+ use_deprecated_int96_timestamps = True
975
+ else:
976
+ use_deprecated_int96_timestamps = False
977
+
978
+ self.flavor = flavor
979
+ if flavor is not None:
980
+ schema, self.schema_changed = _sanitize_schema(schema, flavor)
981
+ else:
982
+ self.schema_changed = False
983
+
984
+ self.schema = schema
985
+ self.where = where
986
+
987
+ # If we open a file using a filesystem, store file handle so we can be
988
+ # sure to close it when `self.close` is called.
989
+ self.file_handle = None
990
+
991
+ filesystem, path = _resolve_filesystem_and_path(where, filesystem)
992
+ if filesystem is not None:
993
+ # ARROW-10480: do not auto-detect compression. While
994
+ # a filename like foo.parquet.gz is nonconforming, it
995
+ # shouldn't implicitly apply compression.
996
+ sink = self.file_handle = filesystem.open_output_stream(
997
+ path, compression=None)
998
+ else:
999
+ sink = where
1000
+ self._metadata_collector = options.pop('metadata_collector', None)
1001
+ engine_version = 'V2'
1002
+ self.writer = _parquet.ParquetWriter(
1003
+ sink, schema,
1004
+ version=version,
1005
+ compression=compression,
1006
+ use_dictionary=use_dictionary,
1007
+ write_statistics=write_statistics,
1008
+ use_deprecated_int96_timestamps=use_deprecated_int96_timestamps,
1009
+ compression_level=compression_level,
1010
+ use_byte_stream_split=use_byte_stream_split,
1011
+ column_encoding=column_encoding,
1012
+ writer_engine_version=engine_version,
1013
+ data_page_version=data_page_version,
1014
+ use_compliant_nested_type=use_compliant_nested_type,
1015
+ encryption_properties=encryption_properties,
1016
+ write_batch_size=write_batch_size,
1017
+ dictionary_pagesize_limit=dictionary_pagesize_limit,
1018
+ store_schema=store_schema,
1019
+ write_page_index=write_page_index,
1020
+ write_page_checksum=write_page_checksum,
1021
+ sorting_columns=sorting_columns,
1022
+ **options)
1023
+ self.is_open = True
1024
+
1025
+ def __del__(self):
1026
+ if getattr(self, 'is_open', False):
1027
+ self.close()
1028
+
1029
+ def __enter__(self):
1030
+ return self
1031
+
1032
+ def __exit__(self, *args, **kwargs):
1033
+ self.close()
1034
+ # return false since we want to propagate exceptions
1035
+ return False
1036
+
1037
+ def write(self, table_or_batch, row_group_size=None):
1038
+ """
1039
+ Write RecordBatch or Table to the Parquet file.
1040
+
1041
+ Parameters
1042
+ ----------
1043
+ table_or_batch : {RecordBatch, Table}
1044
+ row_group_size : int, default None
1045
+ Maximum number of rows in each written row group. If None,
1046
+ the row group size will be the minimum of the input
1047
+ table or batch length and 1024 * 1024.
1048
+ """
1049
+ if isinstance(table_or_batch, pa.RecordBatch):
1050
+ self.write_batch(table_or_batch, row_group_size)
1051
+ elif isinstance(table_or_batch, pa.Table):
1052
+ self.write_table(table_or_batch, row_group_size)
1053
+ else:
1054
+ raise TypeError(type(table_or_batch))
1055
+
1056
+ def write_batch(self, batch, row_group_size=None):
1057
+ """
1058
+ Write RecordBatch to the Parquet file.
1059
+
1060
+ Parameters
1061
+ ----------
1062
+ batch : RecordBatch
1063
+ row_group_size : int, default None
1064
+ Maximum number of rows in written row group. If None, the
1065
+ row group size will be the minimum of the RecordBatch
1066
+ size and 1024 * 1024. If set larger than 64Mi then 64Mi
1067
+ will be used instead.
1068
+ """
1069
+ table = pa.Table.from_batches([batch], batch.schema)
1070
+ self.write_table(table, row_group_size)
1071
+
1072
+ def write_table(self, table, row_group_size=None):
1073
+ """
1074
+ Write Table to the Parquet file.
1075
+
1076
+ Parameters
1077
+ ----------
1078
+ table : Table
1079
+ row_group_size : int, default None
1080
+ Maximum number of rows in each written row group. If None,
1081
+ the row group size will be the minimum of the Table size
1082
+ and 1024 * 1024. If set larger than 64Mi then 64Mi will
1083
+ be used instead.
1084
+
1085
+ """
1086
+ if self.schema_changed:
1087
+ table = _sanitize_table(table, self.schema, self.flavor)
1088
+ assert self.is_open
1089
+
1090
+ if not table.schema.equals(self.schema, check_metadata=False):
1091
+ msg = ('Table schema does not match schema used to create file: '
1092
+ '\ntable:\n{!s} vs. \nfile:\n{!s}'
1093
+ .format(table.schema, self.schema))
1094
+ raise ValueError(msg)
1095
+
1096
+ self.writer.write_table(table, row_group_size=row_group_size)
1097
+
1098
+ def close(self):
1099
+ """
1100
+ Close the connection to the Parquet file.
1101
+ """
1102
+ if self.is_open:
1103
+ self.writer.close()
1104
+ self.is_open = False
1105
+ if self._metadata_collector is not None:
1106
+ self._metadata_collector.append(self.writer.metadata)
1107
+ if self.file_handle is not None:
1108
+ self.file_handle.close()
1109
+
1110
+
1111
+ def _get_pandas_index_columns(keyvalues):
1112
+ return (json.loads(keyvalues[b'pandas'].decode('utf8'))
1113
+ ['index_columns'])
1114
+
1115
+
1116
+ EXCLUDED_PARQUET_PATHS = {'_SUCCESS'}
1117
+
1118
+
1119
+ _read_docstring_common = """\
1120
+ read_dictionary : list, default None
1121
+ List of names or column paths (for nested types) to read directly
1122
+ as DictionaryArray. Only supported for BYTE_ARRAY storage. To read
1123
+ a flat column as dictionary-encoded pass the column name. For
1124
+ nested types, you must pass the full column "path", which could be
1125
+ something like level1.level2.list.item. Refer to the Parquet
1126
+ file's schema to obtain the paths.
1127
+ memory_map : bool, default False
1128
+ If the source is a file path, use a memory map to read file, which can
1129
+ improve performance in some environments.
1130
+ buffer_size : int, default 0
1131
+ If positive, perform read buffering when deserializing individual
1132
+ column chunks. Otherwise IO calls are unbuffered.
1133
+ partitioning : pyarrow.dataset.Partitioning or str or list of str, \
1134
+ default "hive"
1135
+ The partitioning scheme for a partitioned dataset. The default of "hive"
1136
+ assumes directory names with key=value pairs like "/year=2009/month=11".
1137
+ In addition, a scheme like "/2009/11" is also supported, in which case
1138
+ you need to specify the field names or a full schema. See the
1139
+ ``pyarrow.dataset.partitioning()`` function for more details."""
1140
+
1141
+
1142
+ _parquet_dataset_example = """\
1143
+ Generate an example PyArrow Table and write it to a partitioned dataset:
1144
+
1145
+ >>> import pyarrow as pa
1146
+ >>> table = pa.table({'year': [2020, 2022, 2021, 2022, 2019, 2021],
1147
+ ... 'n_legs': [2, 2, 4, 4, 5, 100],
1148
+ ... 'animal': ["Flamingo", "Parrot", "Dog", "Horse",
1149
+ ... "Brittle stars", "Centipede"]})
1150
+ >>> import pyarrow.parquet as pq
1151
+ >>> pq.write_to_dataset(table, root_path='dataset_v2',
1152
+ ... partition_cols=['year'])
1153
+
1154
+ create a ParquetDataset object from the dataset source:
1155
+
1156
+ >>> dataset = pq.ParquetDataset('dataset_v2/')
1157
+
1158
+ and read the data:
1159
+
1160
+ >>> dataset.read().to_pandas()
1161
+ n_legs animal year
1162
+ 0 5 Brittle stars 2019
1163
+ 1 2 Flamingo 2020
1164
+ 2 4 Dog 2021
1165
+ 3 100 Centipede 2021
1166
+ 4 2 Parrot 2022
1167
+ 5 4 Horse 2022
1168
+
1169
+ create a ParquetDataset object with filter:
1170
+
1171
+ >>> dataset = pq.ParquetDataset('dataset_v2/',
1172
+ ... filters=[('n_legs','=',4)])
1173
+ >>> dataset.read().to_pandas()
1174
+ n_legs animal year
1175
+ 0 4 Dog 2021
1176
+ 1 4 Horse 2022
1177
+ """
1178
+
1179
+
1180
+ class ParquetDataset:
1181
+ __doc__ = """
1182
+ Encapsulates details of reading a complete Parquet dataset possibly
1183
+ consisting of multiple files and partitions in subdirectories.
1184
+
1185
+ Parameters
1186
+ ----------
1187
+ path_or_paths : str or List[str]
1188
+ A directory name, single file name, or list of file names.
1189
+ filesystem : FileSystem, default None
1190
+ If nothing passed, will be inferred based on path.
1191
+ Path will try to be found in the local on-disk filesystem otherwise
1192
+ it will be parsed as an URI to determine the filesystem.
1193
+ schema : pyarrow.parquet.Schema
1194
+ Optionally provide the Schema for the Dataset, in which case it will
1195
+ not be inferred from the source.
1196
+ filters : pyarrow.compute.Expression or List[Tuple] or List[List[Tuple]], default None
1197
+ Rows which do not match the filter predicate will be removed from scanned
1198
+ data. Partition keys embedded in a nested directory structure will be
1199
+ exploited to avoid loading files at all if they contain no matching rows.
1200
+ Within-file level filtering and different partitioning schemes are supported.
1201
+
1202
+ {1}
1203
+ {0}
1204
+ ignore_prefixes : list, optional
1205
+ Files matching any of these prefixes will be ignored by the
1206
+ discovery process.
1207
+ This is matched to the basename of a path.
1208
+ By default this is ['.', '_'].
1209
+ Note that discovery happens only if a directory is passed as source.
1210
+ pre_buffer : bool, default True
1211
+ Coalesce and issue file reads in parallel to improve performance on
1212
+ high-latency filesystems (e.g. S3, GCS). If True, Arrow will use a
1213
+ background I/O thread pool. If using a filesystem layer that itself
1214
+ performs readahead (e.g. fsspec's S3FS), disable readahead for best
1215
+ results. Set to False if you want to prioritize minimal memory usage
1216
+ over maximum speed.
1217
+ coerce_int96_timestamp_unit : str, default None
1218
+ Cast timestamps that are stored in INT96 format to a particular resolution
1219
+ (e.g. 'ms'). Setting to None is equivalent to 'ns' and therefore INT96
1220
+ timestamps will be inferred as timestamps in nanoseconds.
1221
+ decryption_properties : FileDecryptionProperties or None
1222
+ File-level decryption properties.
1223
+ The decryption properties can be created using
1224
+ ``CryptoFactory.file_decryption_properties()``.
1225
+ thrift_string_size_limit : int, default None
1226
+ If not None, override the maximum total string size allocated
1227
+ when decoding Thrift structures. The default limit should be
1228
+ sufficient for most Parquet files.
1229
+ thrift_container_size_limit : int, default None
1230
+ If not None, override the maximum total size of containers allocated
1231
+ when decoding Thrift structures. The default limit should be
1232
+ sufficient for most Parquet files.
1233
+ page_checksum_verification : bool, default False
1234
+ If True, verify the page checksum for each page read from the file.
1235
+ use_legacy_dataset : bool, optional
1236
+ Deprecated and has no effect from PyArrow version 15.0.0.
1237
+
1238
+ Examples
1239
+ --------
1240
+ {2}
1241
+ """.format(_read_docstring_common, _DNF_filter_doc, _parquet_dataset_example)
1242
+
1243
+ def __init__(self, path_or_paths, filesystem=None, schema=None, *, filters=None,
1244
+ read_dictionary=None, memory_map=False, buffer_size=None,
1245
+ partitioning="hive", ignore_prefixes=None, pre_buffer=True,
1246
+ coerce_int96_timestamp_unit=None,
1247
+ decryption_properties=None, thrift_string_size_limit=None,
1248
+ thrift_container_size_limit=None,
1249
+ page_checksum_verification=False,
1250
+ use_legacy_dataset=None):
1251
+
1252
+ if use_legacy_dataset is not None:
1253
+ warnings.warn(
1254
+ "Passing 'use_legacy_dataset' is deprecated as of pyarrow 15.0.0 "
1255
+ "and will be removed in a future version.",
1256
+ FutureWarning, stacklevel=2)
1257
+
1258
+ import pyarrow.dataset as ds
1259
+
1260
+ # map format arguments
1261
+ read_options = {
1262
+ "pre_buffer": pre_buffer,
1263
+ "coerce_int96_timestamp_unit": coerce_int96_timestamp_unit,
1264
+ "thrift_string_size_limit": thrift_string_size_limit,
1265
+ "thrift_container_size_limit": thrift_container_size_limit,
1266
+ "page_checksum_verification": page_checksum_verification,
1267
+ }
1268
+ if buffer_size:
1269
+ read_options.update(use_buffered_stream=True,
1270
+ buffer_size=buffer_size)
1271
+ if read_dictionary is not None:
1272
+ read_options.update(dictionary_columns=read_dictionary)
1273
+
1274
+ if decryption_properties is not None:
1275
+ read_options.update(decryption_properties=decryption_properties)
1276
+
1277
+ self._filter_expression = None
1278
+ if filters is not None:
1279
+ self._filter_expression = filters_to_expression(filters)
1280
+
1281
+ # map old filesystems to new one
1282
+ if filesystem is not None:
1283
+ filesystem = _ensure_filesystem(
1284
+ filesystem, use_mmap=memory_map)
1285
+ elif filesystem is None and memory_map:
1286
+ # if memory_map is specified, assume local file system (string
1287
+ # path can in principle be URI for any filesystem)
1288
+ filesystem = LocalFileSystem(use_mmap=memory_map)
1289
+
1290
+ # This needs to be checked after _ensure_filesystem, because that
1291
+ # handles the case of an fsspec LocalFileSystem
1292
+ if (
1293
+ hasattr(path_or_paths, "__fspath__") and
1294
+ filesystem is not None and
1295
+ not isinstance(filesystem, LocalFileSystem)
1296
+ ):
1297
+ raise TypeError(
1298
+ "Path-like objects with __fspath__ must only be used with "
1299
+ f"local file systems, not {type(filesystem)}"
1300
+ )
1301
+
1302
+ # check for single fragment dataset
1303
+ single_file = None
1304
+ self._base_dir = None
1305
+ if not isinstance(path_or_paths, list):
1306
+ if _is_path_like(path_or_paths):
1307
+ path_or_paths = _stringify_path(path_or_paths)
1308
+ if filesystem is None:
1309
+ # path might be a URI describing the FileSystem as well
1310
+ try:
1311
+ filesystem, path_or_paths = FileSystem.from_uri(
1312
+ path_or_paths)
1313
+ except ValueError:
1314
+ filesystem = LocalFileSystem(use_mmap=memory_map)
1315
+ finfo = filesystem.get_file_info(path_or_paths)
1316
+ if finfo.is_file:
1317
+ single_file = path_or_paths
1318
+ if finfo.type == FileType.Directory:
1319
+ self._base_dir = path_or_paths
1320
+ else:
1321
+ single_file = path_or_paths
1322
+
1323
+ parquet_format = ds.ParquetFileFormat(**read_options)
1324
+
1325
+ if single_file is not None:
1326
+ fragment = parquet_format.make_fragment(single_file, filesystem)
1327
+
1328
+ self._dataset = ds.FileSystemDataset(
1329
+ [fragment], schema=schema or fragment.physical_schema,
1330
+ format=parquet_format,
1331
+ filesystem=fragment.filesystem
1332
+ )
1333
+ return
1334
+
1335
+ # check partitioning to enable dictionary encoding
1336
+ if partitioning == "hive":
1337
+ partitioning = ds.HivePartitioning.discover(
1338
+ infer_dictionary=True)
1339
+
1340
+ self._dataset = ds.dataset(path_or_paths, filesystem=filesystem,
1341
+ schema=schema, format=parquet_format,
1342
+ partitioning=partitioning,
1343
+ ignore_prefixes=ignore_prefixes)
1344
+
1345
+ def equals(self, other):
1346
+ if not isinstance(other, ParquetDataset):
1347
+ raise TypeError('`other` must be an instance of ParquetDataset')
1348
+
1349
+ return (self.schema == other.schema and
1350
+ self._dataset.format == other._dataset.format and
1351
+ self.filesystem == other.filesystem and
1352
+ # self.fragments == other.fragments and
1353
+ self.files == other.files)
1354
+
1355
+ def __eq__(self, other):
1356
+ try:
1357
+ return self.equals(other)
1358
+ except TypeError:
1359
+ return NotImplemented
1360
+
1361
+ @property
1362
+ def schema(self):
1363
+ """
1364
+ Schema of the Dataset.
1365
+
1366
+ Examples
1367
+ --------
1368
+ Generate an example dataset:
1369
+
1370
+ >>> import pyarrow as pa
1371
+ >>> table = pa.table({'year': [2020, 2022, 2021, 2022, 2019, 2021],
1372
+ ... 'n_legs': [2, 2, 4, 4, 5, 100],
1373
+ ... 'animal': ["Flamingo", "Parrot", "Dog", "Horse",
1374
+ ... "Brittle stars", "Centipede"]})
1375
+ >>> import pyarrow.parquet as pq
1376
+ >>> pq.write_to_dataset(table, root_path='dataset_v2_schema',
1377
+ ... partition_cols=['year'])
1378
+ >>> dataset = pq.ParquetDataset('dataset_v2_schema/')
1379
+
1380
+ Read the schema:
1381
+
1382
+ >>> dataset.schema
1383
+ n_legs: int64
1384
+ animal: string
1385
+ year: dictionary<values=int32, indices=int32, ordered=0>
1386
+ """
1387
+ return self._dataset.schema
1388
+
1389
+ def read(self, columns=None, use_threads=True, use_pandas_metadata=False):
1390
+ """
1391
+ Read (multiple) Parquet files as a single pyarrow.Table.
1392
+
1393
+ Parameters
1394
+ ----------
1395
+ columns : List[str]
1396
+ Names of columns to read from the dataset. The partition fields
1397
+ are not automatically included.
1398
+ use_threads : bool, default True
1399
+ Perform multi-threaded column reads.
1400
+ use_pandas_metadata : bool, default False
1401
+ If True and file has custom pandas schema metadata, ensure that
1402
+ index columns are also loaded.
1403
+
1404
+ Returns
1405
+ -------
1406
+ pyarrow.Table
1407
+ Content of the file as a table (of columns).
1408
+
1409
+ Examples
1410
+ --------
1411
+ Generate an example dataset:
1412
+
1413
+ >>> import pyarrow as pa
1414
+ >>> table = pa.table({'year': [2020, 2022, 2021, 2022, 2019, 2021],
1415
+ ... 'n_legs': [2, 2, 4, 4, 5, 100],
1416
+ ... 'animal': ["Flamingo", "Parrot", "Dog", "Horse",
1417
+ ... "Brittle stars", "Centipede"]})
1418
+ >>> import pyarrow.parquet as pq
1419
+ >>> pq.write_to_dataset(table, root_path='dataset_v2_read',
1420
+ ... partition_cols=['year'])
1421
+ >>> dataset = pq.ParquetDataset('dataset_v2_read/')
1422
+
1423
+ Read the dataset:
1424
+
1425
+ >>> dataset.read(columns=["n_legs"])
1426
+ pyarrow.Table
1427
+ n_legs: int64
1428
+ ----
1429
+ n_legs: [[5],[2],[4,100],[2,4]]
1430
+ """
1431
+ # if use_pandas_metadata, we need to include index columns in the
1432
+ # column selection, to be able to restore those in the pandas DataFrame
1433
+ metadata = self.schema.metadata or {}
1434
+
1435
+ if use_pandas_metadata:
1436
+ # if the dataset schema metadata itself doesn't have pandas
1437
+ # then try to get this from common file (for backwards compat)
1438
+ if b"pandas" not in metadata:
1439
+ common_metadata = self._get_common_pandas_metadata()
1440
+ if common_metadata:
1441
+ metadata = common_metadata
1442
+
1443
+ if columns is not None and use_pandas_metadata:
1444
+ if metadata and b'pandas' in metadata:
1445
+ # RangeIndex can be represented as dict instead of column name
1446
+ index_columns = [
1447
+ col for col in _get_pandas_index_columns(metadata)
1448
+ if not isinstance(col, dict)
1449
+ ]
1450
+ columns = (
1451
+ list(columns) + list(set(index_columns) - set(columns))
1452
+ )
1453
+
1454
+ table = self._dataset.to_table(
1455
+ columns=columns, filter=self._filter_expression,
1456
+ use_threads=use_threads
1457
+ )
1458
+
1459
+ # if use_pandas_metadata, restore the pandas metadata (which gets
1460
+ # lost if doing a specific `columns` selection in to_table)
1461
+ if use_pandas_metadata:
1462
+ if metadata and b"pandas" in metadata:
1463
+ new_metadata = table.schema.metadata or {}
1464
+ new_metadata.update({b"pandas": metadata[b"pandas"]})
1465
+ table = table.replace_schema_metadata(new_metadata)
1466
+
1467
+ return table
1468
+
1469
+ def _get_common_pandas_metadata(self):
1470
+
1471
+ if not self._base_dir:
1472
+ return None
1473
+
1474
+ metadata = None
1475
+ for name in ["_common_metadata", "_metadata"]:
1476
+ metadata_path = os.path.join(str(self._base_dir), name)
1477
+ finfo = self.filesystem.get_file_info(metadata_path)
1478
+ if finfo.is_file:
1479
+ pq_meta = read_metadata(
1480
+ metadata_path, filesystem=self.filesystem)
1481
+ metadata = pq_meta.metadata
1482
+ if metadata and b'pandas' in metadata:
1483
+ break
1484
+
1485
+ return metadata
1486
+
1487
+ def read_pandas(self, **kwargs):
1488
+ """
1489
+ Read dataset including pandas metadata, if any. Other arguments passed
1490
+ through to :func:`read`, see docstring for further details.
1491
+
1492
+ Parameters
1493
+ ----------
1494
+ **kwargs : optional
1495
+ Additional options for :func:`read`
1496
+
1497
+ Examples
1498
+ --------
1499
+ Generate an example parquet file:
1500
+
1501
+ >>> import pyarrow as pa
1502
+ >>> import pandas as pd
1503
+ >>> df = pd.DataFrame({'year': [2020, 2022, 2021, 2022, 2019, 2021],
1504
+ ... 'n_legs': [2, 2, 4, 4, 5, 100],
1505
+ ... 'animal': ["Flamingo", "Parrot", "Dog", "Horse",
1506
+ ... "Brittle stars", "Centipede"]})
1507
+ >>> table = pa.Table.from_pandas(df)
1508
+ >>> import pyarrow.parquet as pq
1509
+ >>> pq.write_table(table, 'table_V2.parquet')
1510
+ >>> dataset = pq.ParquetDataset('table_V2.parquet')
1511
+
1512
+ Read the dataset with pandas metadata:
1513
+
1514
+ >>> dataset.read_pandas(columns=["n_legs"])
1515
+ pyarrow.Table
1516
+ n_legs: int64
1517
+ ----
1518
+ n_legs: [[2,2,4,4,5,100]]
1519
+
1520
+ >>> dataset.read_pandas(columns=["n_legs"]).schema.pandas_metadata
1521
+ {'index_columns': [{'kind': 'range', 'name': None, 'start': 0, ...}
1522
+ """
1523
+ return self.read(use_pandas_metadata=True, **kwargs)
1524
+
1525
+ @property
1526
+ def fragments(self):
1527
+ """
1528
+ A list of the Dataset source fragments or pieces with absolute
1529
+ file paths.
1530
+
1531
+ Examples
1532
+ --------
1533
+ Generate an example dataset:
1534
+
1535
+ >>> import pyarrow as pa
1536
+ >>> table = pa.table({'year': [2020, 2022, 2021, 2022, 2019, 2021],
1537
+ ... 'n_legs': [2, 2, 4, 4, 5, 100],
1538
+ ... 'animal': ["Flamingo", "Parrot", "Dog", "Horse",
1539
+ ... "Brittle stars", "Centipede"]})
1540
+ >>> import pyarrow.parquet as pq
1541
+ >>> pq.write_to_dataset(table, root_path='dataset_v2_fragments',
1542
+ ... partition_cols=['year'])
1543
+ >>> dataset = pq.ParquetDataset('dataset_v2_fragments/')
1544
+
1545
+ List the fragments:
1546
+
1547
+ >>> dataset.fragments
1548
+ [<pyarrow.dataset.ParquetFileFragment path=dataset_v2_fragments/...
1549
+ """
1550
+ return list(self._dataset.get_fragments())
1551
+
1552
+ @property
1553
+ def files(self):
1554
+ """
1555
+ A list of absolute Parquet file paths in the Dataset source.
1556
+
1557
+ Examples
1558
+ --------
1559
+ Generate an example dataset:
1560
+
1561
+ >>> import pyarrow as pa
1562
+ >>> table = pa.table({'year': [2020, 2022, 2021, 2022, 2019, 2021],
1563
+ ... 'n_legs': [2, 2, 4, 4, 5, 100],
1564
+ ... 'animal': ["Flamingo", "Parrot", "Dog", "Horse",
1565
+ ... "Brittle stars", "Centipede"]})
1566
+ >>> import pyarrow.parquet as pq
1567
+ >>> pq.write_to_dataset(table, root_path='dataset_v2_files',
1568
+ ... partition_cols=['year'])
1569
+ >>> dataset = pq.ParquetDataset('dataset_v2_files/')
1570
+
1571
+ List the files:
1572
+
1573
+ >>> dataset.files
1574
+ ['dataset_v2_files/year=2019/...-0.parquet', ...
1575
+ """
1576
+ return self._dataset.files
1577
+
1578
+ @property
1579
+ def filesystem(self):
1580
+ """
1581
+ The filesystem type of the Dataset source.
1582
+ """
1583
+ return self._dataset.filesystem
1584
+
1585
+ @property
1586
+ def partitioning(self):
1587
+ """
1588
+ The partitioning of the Dataset source, if discovered.
1589
+ """
1590
+ return self._dataset.partitioning
1591
+
1592
+
1593
+ _read_table_docstring = """
1594
+ {0}
1595
+
1596
+ Parameters
1597
+ ----------
1598
+ source : str, pyarrow.NativeFile, or file-like object
1599
+ If a string passed, can be a single file name or directory name. For
1600
+ file-like objects, only read a single file. Use pyarrow.BufferReader to
1601
+ read a file contained in a bytes or buffer-like object.
1602
+ columns : list
1603
+ If not None, only these columns will be read from the file. A column
1604
+ name may be a prefix of a nested field, e.g. 'a' will select 'a.b',
1605
+ 'a.c', and 'a.d.e'. If empty, no columns will be read. Note
1606
+ that the table will still have the correct num_rows set despite having
1607
+ no columns.
1608
+ use_threads : bool, default True
1609
+ Perform multi-threaded column reads.
1610
+ schema : Schema, optional
1611
+ Optionally provide the Schema for the parquet dataset, in which case it
1612
+ will not be inferred from the source.
1613
+ {1}
1614
+ filesystem : FileSystem, default None
1615
+ If nothing passed, will be inferred based on path.
1616
+ Path will try to be found in the local on-disk filesystem otherwise
1617
+ it will be parsed as an URI to determine the filesystem.
1618
+ filters : pyarrow.compute.Expression or List[Tuple] or List[List[Tuple]], default None
1619
+ Rows which do not match the filter predicate will be removed from scanned
1620
+ data. Partition keys embedded in a nested directory structure will be
1621
+ exploited to avoid loading files at all if they contain no matching rows.
1622
+ Within-file level filtering and different partitioning schemes are supported.
1623
+
1624
+ {3}
1625
+ use_legacy_dataset : bool, optional
1626
+ Deprecated and has no effect from PyArrow version 15.0.0.
1627
+ ignore_prefixes : list, optional
1628
+ Files matching any of these prefixes will be ignored by the
1629
+ discovery process.
1630
+ This is matched to the basename of a path.
1631
+ By default this is ['.', '_'].
1632
+ Note that discovery happens only if a directory is passed as source.
1633
+ pre_buffer : bool, default True
1634
+ Coalesce and issue file reads in parallel to improve performance on
1635
+ high-latency filesystems (e.g. S3). If True, Arrow will use a
1636
+ background I/O thread pool. If using a filesystem layer that itself
1637
+ performs readahead (e.g. fsspec's S3FS), disable readahead for best
1638
+ results.
1639
+ coerce_int96_timestamp_unit : str, default None
1640
+ Cast timestamps that are stored in INT96 format to a particular
1641
+ resolution (e.g. 'ms'). Setting to None is equivalent to 'ns'
1642
+ and therefore INT96 timestamps will be inferred as timestamps
1643
+ in nanoseconds.
1644
+ decryption_properties : FileDecryptionProperties or None
1645
+ File-level decryption properties.
1646
+ The decryption properties can be created using
1647
+ ``CryptoFactory.file_decryption_properties()``.
1648
+ thrift_string_size_limit : int, default None
1649
+ If not None, override the maximum total string size allocated
1650
+ when decoding Thrift structures. The default limit should be
1651
+ sufficient for most Parquet files.
1652
+ thrift_container_size_limit : int, default None
1653
+ If not None, override the maximum total size of containers allocated
1654
+ when decoding Thrift structures. The default limit should be
1655
+ sufficient for most Parquet files.
1656
+ page_checksum_verification : bool, default False
1657
+ If True, verify the checksum for each page read from the file.
1658
+
1659
+ Returns
1660
+ -------
1661
+ {2}
1662
+
1663
+ {4}
1664
+ """
1665
+
1666
+ _read_table_example = """\
1667
+
1668
+ Examples
1669
+ --------
1670
+
1671
+ Generate an example PyArrow Table and write it to a partitioned dataset:
1672
+
1673
+ >>> import pyarrow as pa
1674
+ >>> table = pa.table({'year': [2020, 2022, 2021, 2022, 2019, 2021],
1675
+ ... 'n_legs': [2, 2, 4, 4, 5, 100],
1676
+ ... 'animal': ["Flamingo", "Parrot", "Dog", "Horse",
1677
+ ... "Brittle stars", "Centipede"]})
1678
+ >>> import pyarrow.parquet as pq
1679
+ >>> pq.write_to_dataset(table, root_path='dataset_name_2',
1680
+ ... partition_cols=['year'])
1681
+
1682
+ Read the data:
1683
+
1684
+ >>> pq.read_table('dataset_name_2').to_pandas()
1685
+ n_legs animal year
1686
+ 0 5 Brittle stars 2019
1687
+ 1 2 Flamingo 2020
1688
+ 2 4 Dog 2021
1689
+ 3 100 Centipede 2021
1690
+ 4 2 Parrot 2022
1691
+ 5 4 Horse 2022
1692
+
1693
+
1694
+ Read only a subset of columns:
1695
+
1696
+ >>> pq.read_table('dataset_name_2', columns=["n_legs", "animal"])
1697
+ pyarrow.Table
1698
+ n_legs: int64
1699
+ animal: string
1700
+ ----
1701
+ n_legs: [[5],[2],[4,100],[2,4]]
1702
+ animal: [["Brittle stars"],["Flamingo"],["Dog","Centipede"],["Parrot","Horse"]]
1703
+
1704
+ Read a subset of columns and read one column as DictionaryArray:
1705
+
1706
+ >>> pq.read_table('dataset_name_2', columns=["n_legs", "animal"],
1707
+ ... read_dictionary=["animal"])
1708
+ pyarrow.Table
1709
+ n_legs: int64
1710
+ animal: dictionary<values=string, indices=int32, ordered=0>
1711
+ ----
1712
+ n_legs: [[5],[2],[4,100],[2,4]]
1713
+ animal: [ -- dictionary:
1714
+ ["Brittle stars"] -- indices:
1715
+ [0], -- dictionary:
1716
+ ["Flamingo"] -- indices:
1717
+ [0], -- dictionary:
1718
+ ["Dog","Centipede"] -- indices:
1719
+ [0,1], -- dictionary:
1720
+ ["Parrot","Horse"] -- indices:
1721
+ [0,1]]
1722
+
1723
+ Read the table with filter:
1724
+
1725
+ >>> pq.read_table('dataset_name_2', columns=["n_legs", "animal"],
1726
+ ... filters=[('n_legs','<',4)]).to_pandas()
1727
+ n_legs animal
1728
+ 0 2 Flamingo
1729
+ 1 2 Parrot
1730
+
1731
+ Read data from a single Parquet file:
1732
+
1733
+ >>> pq.write_table(table, 'example.parquet')
1734
+ >>> pq.read_table('dataset_name_2').to_pandas()
1735
+ n_legs animal year
1736
+ 0 5 Brittle stars 2019
1737
+ 1 2 Flamingo 2020
1738
+ 2 4 Dog 2021
1739
+ 3 100 Centipede 2021
1740
+ 4 2 Parrot 2022
1741
+ 5 4 Horse 2022
1742
+ """
1743
+
1744
+
1745
+ def read_table(source, *, columns=None, use_threads=True,
1746
+ schema=None, use_pandas_metadata=False, read_dictionary=None,
1747
+ memory_map=False, buffer_size=0, partitioning="hive",
1748
+ filesystem=None, filters=None, use_legacy_dataset=None,
1749
+ ignore_prefixes=None, pre_buffer=True,
1750
+ coerce_int96_timestamp_unit=None,
1751
+ decryption_properties=None, thrift_string_size_limit=None,
1752
+ thrift_container_size_limit=None,
1753
+ page_checksum_verification=False):
1754
+
1755
+ if use_legacy_dataset is not None:
1756
+ warnings.warn(
1757
+ "Passing 'use_legacy_dataset' is deprecated as of pyarrow 15.0.0 "
1758
+ "and will be removed in a future version.",
1759
+ FutureWarning, stacklevel=2)
1760
+
1761
+ try:
1762
+ dataset = ParquetDataset(
1763
+ source,
1764
+ schema=schema,
1765
+ filesystem=filesystem,
1766
+ partitioning=partitioning,
1767
+ memory_map=memory_map,
1768
+ read_dictionary=read_dictionary,
1769
+ buffer_size=buffer_size,
1770
+ filters=filters,
1771
+ ignore_prefixes=ignore_prefixes,
1772
+ pre_buffer=pre_buffer,
1773
+ coerce_int96_timestamp_unit=coerce_int96_timestamp_unit,
1774
+ thrift_string_size_limit=thrift_string_size_limit,
1775
+ thrift_container_size_limit=thrift_container_size_limit,
1776
+ page_checksum_verification=page_checksum_verification,
1777
+ )
1778
+ except ImportError:
1779
+ # fall back on ParquetFile for simple cases when pyarrow.dataset
1780
+ # module is not available
1781
+ if filters is not None:
1782
+ raise ValueError(
1783
+ "the 'filters' keyword is not supported when the "
1784
+ "pyarrow.dataset module is not available"
1785
+ )
1786
+ if partitioning != "hive":
1787
+ raise ValueError(
1788
+ "the 'partitioning' keyword is not supported when the "
1789
+ "pyarrow.dataset module is not available"
1790
+ )
1791
+ if schema is not None:
1792
+ raise ValueError(
1793
+ "the 'schema' argument is not supported when the "
1794
+ "pyarrow.dataset module is not available"
1795
+ )
1796
+ filesystem, path = _resolve_filesystem_and_path(source, filesystem)
1797
+ if filesystem is not None:
1798
+ source = filesystem.open_input_file(path)
1799
+ # TODO test that source is not a directory or a list
1800
+ dataset = ParquetFile(
1801
+ source, read_dictionary=read_dictionary,
1802
+ memory_map=memory_map, buffer_size=buffer_size,
1803
+ pre_buffer=pre_buffer,
1804
+ coerce_int96_timestamp_unit=coerce_int96_timestamp_unit,
1805
+ decryption_properties=decryption_properties,
1806
+ thrift_string_size_limit=thrift_string_size_limit,
1807
+ thrift_container_size_limit=thrift_container_size_limit,
1808
+ page_checksum_verification=page_checksum_verification,
1809
+ )
1810
+
1811
+ return dataset.read(columns=columns, use_threads=use_threads,
1812
+ use_pandas_metadata=use_pandas_metadata)
1813
+
1814
+
1815
+ read_table.__doc__ = _read_table_docstring.format(
1816
+ """Read a Table from Parquet format""",
1817
+ "\n".join(("""use_pandas_metadata : bool, default False
1818
+ If True and file has custom pandas schema metadata, ensure that
1819
+ index columns are also loaded.""", _read_docstring_common)),
1820
+ """pyarrow.Table
1821
+ Content of the file as a table (of columns)""",
1822
+ _DNF_filter_doc, _read_table_example)
1823
+
1824
+
1825
+ def read_pandas(source, columns=None, **kwargs):
1826
+ return read_table(
1827
+ source, columns=columns, use_pandas_metadata=True, **kwargs
1828
+ )
1829
+
1830
+
1831
+ read_pandas.__doc__ = _read_table_docstring.format(
1832
+ 'Read a Table from Parquet format, also reading DataFrame\n'
1833
+ 'index values if known in the file metadata',
1834
+ "\n".join((_read_docstring_common,
1835
+ """**kwargs
1836
+ additional options for :func:`read_table`""")),
1837
+ """pyarrow.Table
1838
+ Content of the file as a Table of Columns, including DataFrame
1839
+ indexes as columns""",
1840
+ _DNF_filter_doc, "")
1841
+
1842
+
1843
+ def write_table(table, where, row_group_size=None, version='2.6',
1844
+ use_dictionary=True, compression='snappy',
1845
+ write_statistics=True,
1846
+ use_deprecated_int96_timestamps=None,
1847
+ coerce_timestamps=None,
1848
+ allow_truncated_timestamps=False,
1849
+ data_page_size=None, flavor=None,
1850
+ filesystem=None,
1851
+ compression_level=None,
1852
+ use_byte_stream_split=False,
1853
+ column_encoding=None,
1854
+ data_page_version='1.0',
1855
+ use_compliant_nested_type=True,
1856
+ encryption_properties=None,
1857
+ write_batch_size=None,
1858
+ dictionary_pagesize_limit=None,
1859
+ store_schema=True,
1860
+ write_page_index=False,
1861
+ write_page_checksum=False,
1862
+ sorting_columns=None,
1863
+ **kwargs):
1864
+ # Implementor's note: when adding keywords here / updating defaults, also
1865
+ # update it in write_to_dataset and _dataset_parquet.pyx ParquetFileWriteOptions
1866
+ row_group_size = kwargs.pop('chunk_size', row_group_size)
1867
+ use_int96 = use_deprecated_int96_timestamps
1868
+ try:
1869
+ with ParquetWriter(
1870
+ where, table.schema,
1871
+ filesystem=filesystem,
1872
+ version=version,
1873
+ flavor=flavor,
1874
+ use_dictionary=use_dictionary,
1875
+ write_statistics=write_statistics,
1876
+ coerce_timestamps=coerce_timestamps,
1877
+ data_page_size=data_page_size,
1878
+ allow_truncated_timestamps=allow_truncated_timestamps,
1879
+ compression=compression,
1880
+ use_deprecated_int96_timestamps=use_int96,
1881
+ compression_level=compression_level,
1882
+ use_byte_stream_split=use_byte_stream_split,
1883
+ column_encoding=column_encoding,
1884
+ data_page_version=data_page_version,
1885
+ use_compliant_nested_type=use_compliant_nested_type,
1886
+ encryption_properties=encryption_properties,
1887
+ write_batch_size=write_batch_size,
1888
+ dictionary_pagesize_limit=dictionary_pagesize_limit,
1889
+ store_schema=store_schema,
1890
+ write_page_index=write_page_index,
1891
+ write_page_checksum=write_page_checksum,
1892
+ sorting_columns=sorting_columns,
1893
+ **kwargs) as writer:
1894
+ writer.write_table(table, row_group_size=row_group_size)
1895
+ except Exception:
1896
+ if _is_path_like(where):
1897
+ try:
1898
+ os.remove(_stringify_path(where))
1899
+ except os.error:
1900
+ pass
1901
+ raise
1902
+
1903
+
1904
+ _write_table_example = """\
1905
+ Generate an example PyArrow Table:
1906
+
1907
+ >>> import pyarrow as pa
1908
+ >>> table = pa.table({'n_legs': [2, 2, 4, 4, 5, 100],
1909
+ ... 'animal': ["Flamingo", "Parrot", "Dog", "Horse",
1910
+ ... "Brittle stars", "Centipede"]})
1911
+
1912
+ and write the Table into Parquet file:
1913
+
1914
+ >>> import pyarrow.parquet as pq
1915
+ >>> pq.write_table(table, 'example.parquet')
1916
+
1917
+ Defining row group size for the Parquet file:
1918
+
1919
+ >>> pq.write_table(table, 'example.parquet', row_group_size=3)
1920
+
1921
+ Defining row group compression (default is Snappy):
1922
+
1923
+ >>> pq.write_table(table, 'example.parquet', compression='none')
1924
+
1925
+ Defining row group compression and encoding per-column:
1926
+
1927
+ >>> pq.write_table(table, 'example.parquet',
1928
+ ... compression={'n_legs': 'snappy', 'animal': 'gzip'},
1929
+ ... use_dictionary=['n_legs', 'animal'])
1930
+
1931
+ Defining column encoding per-column:
1932
+
1933
+ >>> pq.write_table(table, 'example.parquet',
1934
+ ... column_encoding={'animal':'PLAIN'},
1935
+ ... use_dictionary=False)
1936
+ """
1937
+
1938
+ write_table.__doc__ = """
1939
+ Write a Table to Parquet format.
1940
+
1941
+ Parameters
1942
+ ----------
1943
+ table : pyarrow.Table
1944
+ where : string or pyarrow.NativeFile
1945
+ row_group_size : int
1946
+ Maximum number of rows in each written row group. If None, the
1947
+ row group size will be the minimum of the Table size and
1948
+ 1024 * 1024.
1949
+ {}
1950
+ **kwargs : optional
1951
+ Additional options for ParquetWriter
1952
+
1953
+ Examples
1954
+ --------
1955
+ {}
1956
+ """.format(_parquet_writer_arg_docs, _write_table_example)
1957
+
1958
+
1959
+ def write_to_dataset(table, root_path, partition_cols=None,
1960
+ filesystem=None, use_legacy_dataset=None,
1961
+ schema=None, partitioning=None,
1962
+ basename_template=None, use_threads=None,
1963
+ file_visitor=None, existing_data_behavior=None,
1964
+ **kwargs):
1965
+ """Wrapper around dataset.write_dataset for writing a Table to
1966
+ Parquet format by partitions.
1967
+ For each combination of partition columns and values,
1968
+ a subdirectories are created in the following
1969
+ manner:
1970
+
1971
+ root_dir/
1972
+ group1=value1
1973
+ group2=value1
1974
+ <uuid>.parquet
1975
+ group2=value2
1976
+ <uuid>.parquet
1977
+ group1=valueN
1978
+ group2=value1
1979
+ <uuid>.parquet
1980
+ group2=valueN
1981
+ <uuid>.parquet
1982
+
1983
+ Parameters
1984
+ ----------
1985
+ table : pyarrow.Table
1986
+ root_path : str, pathlib.Path
1987
+ The root directory of the dataset.
1988
+ partition_cols : list,
1989
+ Column names by which to partition the dataset.
1990
+ Columns are partitioned in the order they are given.
1991
+ filesystem : FileSystem, default None
1992
+ If nothing passed, will be inferred based on path.
1993
+ Path will try to be found in the local on-disk filesystem otherwise
1994
+ it will be parsed as an URI to determine the filesystem.
1995
+ use_legacy_dataset : bool, optional
1996
+ Deprecated and has no effect from PyArrow version 15.0.0.
1997
+ schema : Schema, optional
1998
+ This Schema of the dataset.
1999
+ partitioning : Partitioning or list[str], optional
2000
+ The partitioning scheme specified with the
2001
+ ``pyarrow.dataset.partitioning()`` function or a list of field names.
2002
+ When providing a list of field names, you can use
2003
+ ``partitioning_flavor`` to drive which partitioning type should be
2004
+ used.
2005
+ basename_template : str, optional
2006
+ A template string used to generate basenames of written data files.
2007
+ The token '{i}' will be replaced with an automatically incremented
2008
+ integer. If not specified, it defaults to "guid-{i}.parquet".
2009
+ use_threads : bool, default True
2010
+ Write files in parallel. If enabled, then maximum parallelism will be
2011
+ used determined by the number of available CPU cores.
2012
+ file_visitor : function
2013
+ If set, this function will be called with a WrittenFile instance
2014
+ for each file created during the call. This object will have both
2015
+ a path attribute and a metadata attribute.
2016
+
2017
+ The path attribute will be a string containing the path to
2018
+ the created file.
2019
+
2020
+ The metadata attribute will be the parquet metadata of the file.
2021
+ This metadata will have the file path attribute set and can be used
2022
+ to build a _metadata file. The metadata attribute will be None if
2023
+ the format is not parquet.
2024
+
2025
+ Example visitor which simple collects the filenames created::
2026
+
2027
+ visited_paths = []
2028
+
2029
+ def file_visitor(written_file):
2030
+ visited_paths.append(written_file.path)
2031
+
2032
+ existing_data_behavior : 'overwrite_or_ignore' | 'error' | \
2033
+ 'delete_matching'
2034
+ Controls how the dataset will handle data that already exists in
2035
+ the destination. The default behaviour is 'overwrite_or_ignore'.
2036
+
2037
+ 'overwrite_or_ignore' will ignore any existing data and will
2038
+ overwrite files with the same name as an output file. Other
2039
+ existing files will be ignored. This behavior, in combination
2040
+ with a unique basename_template for each write, will allow for
2041
+ an append workflow.
2042
+
2043
+ 'error' will raise an error if any data exists in the destination.
2044
+
2045
+ 'delete_matching' is useful when you are writing a partitioned
2046
+ dataset. The first time each partition directory is encountered
2047
+ the entire directory will be deleted. This allows you to overwrite
2048
+ old partitions completely.
2049
+ **kwargs : dict,
2050
+ Used as additional kwargs for :func:`pyarrow.dataset.write_dataset`
2051
+ function for matching kwargs, and remainder to
2052
+ :func:`pyarrow.dataset.ParquetFileFormat.make_write_options`.
2053
+ See the docstring of :func:`write_table` and
2054
+ :func:`pyarrow.dataset.write_dataset` for the available options.
2055
+ Using `metadata_collector` in kwargs allows one to collect the
2056
+ file metadata instances of dataset pieces. The file paths in the
2057
+ ColumnChunkMetaData will be set relative to `root_path`.
2058
+
2059
+ Examples
2060
+ --------
2061
+ Generate an example PyArrow Table:
2062
+
2063
+ >>> import pyarrow as pa
2064
+ >>> table = pa.table({'year': [2020, 2022, 2021, 2022, 2019, 2021],
2065
+ ... 'n_legs': [2, 2, 4, 4, 5, 100],
2066
+ ... 'animal': ["Flamingo", "Parrot", "Dog", "Horse",
2067
+ ... "Brittle stars", "Centipede"]})
2068
+
2069
+ and write it to a partitioned dataset:
2070
+
2071
+ >>> import pyarrow.parquet as pq
2072
+ >>> pq.write_to_dataset(table, root_path='dataset_name_3',
2073
+ ... partition_cols=['year'])
2074
+ >>> pq.ParquetDataset('dataset_name_3').files
2075
+ ['dataset_name_3/year=2019/...-0.parquet', ...
2076
+
2077
+ Write a single Parquet file into the root folder:
2078
+
2079
+ >>> pq.write_to_dataset(table, root_path='dataset_name_4')
2080
+ >>> pq.ParquetDataset('dataset_name_4/').files
2081
+ ['dataset_name_4/...-0.parquet']
2082
+ """
2083
+ if use_legacy_dataset is not None:
2084
+ warnings.warn(
2085
+ "Passing 'use_legacy_dataset' is deprecated as of pyarrow 15.0.0 "
2086
+ "and will be removed in a future version.",
2087
+ FutureWarning, stacklevel=2)
2088
+
2089
+ metadata_collector = kwargs.pop('metadata_collector', None)
2090
+
2091
+ # Check for conflicting keywords
2092
+ msg_confl = (
2093
+ "The '{1}' argument is not supported. "
2094
+ "Use only '{0}' instead."
2095
+ )
2096
+ if partition_cols is not None and partitioning is not None:
2097
+ raise ValueError(msg_confl.format("partitioning",
2098
+ "partition_cols"))
2099
+
2100
+ if metadata_collector is not None and file_visitor is not None:
2101
+ raise ValueError(msg_confl.format("file_visitor",
2102
+ "metadata_collector"))
2103
+
2104
+ import pyarrow.dataset as ds
2105
+
2106
+ # extract write_dataset specific options
2107
+ # reset assumed to go to make_write_options
2108
+ write_dataset_kwargs = dict()
2109
+ for key in inspect.signature(ds.write_dataset).parameters:
2110
+ if key in kwargs:
2111
+ write_dataset_kwargs[key] = kwargs.pop(key)
2112
+ write_dataset_kwargs['max_rows_per_group'] = kwargs.pop(
2113
+ 'row_group_size', kwargs.pop("chunk_size", None)
2114
+ )
2115
+
2116
+ if metadata_collector is not None:
2117
+ def file_visitor(written_file):
2118
+ metadata_collector.append(written_file.metadata)
2119
+
2120
+ # map format arguments
2121
+ parquet_format = ds.ParquetFileFormat()
2122
+ write_options = parquet_format.make_write_options(**kwargs)
2123
+
2124
+ # map old filesystems to new one
2125
+ if filesystem is not None:
2126
+ filesystem = _ensure_filesystem(filesystem)
2127
+
2128
+ if partition_cols:
2129
+ part_schema = table.select(partition_cols).schema
2130
+ partitioning = ds.partitioning(part_schema, flavor="hive")
2131
+
2132
+ if basename_template is None:
2133
+ basename_template = guid() + '-{i}.parquet'
2134
+
2135
+ if existing_data_behavior is None:
2136
+ existing_data_behavior = 'overwrite_or_ignore'
2137
+
2138
+ ds.write_dataset(
2139
+ table, root_path, filesystem=filesystem,
2140
+ format=parquet_format, file_options=write_options, schema=schema,
2141
+ partitioning=partitioning, use_threads=use_threads,
2142
+ file_visitor=file_visitor,
2143
+ basename_template=basename_template,
2144
+ existing_data_behavior=existing_data_behavior,
2145
+ **write_dataset_kwargs)
2146
+ return
2147
+
2148
+
2149
+ def write_metadata(schema, where, metadata_collector=None, filesystem=None,
2150
+ **kwargs):
2151
+ """
2152
+ Write metadata-only Parquet file from schema. This can be used with
2153
+ `write_to_dataset` to generate `_common_metadata` and `_metadata` sidecar
2154
+ files.
2155
+
2156
+ Parameters
2157
+ ----------
2158
+ schema : pyarrow.Schema
2159
+ where : string or pyarrow.NativeFile
2160
+ metadata_collector : list
2161
+ where to collect metadata information.
2162
+ filesystem : FileSystem, default None
2163
+ If nothing passed, will be inferred from `where` if path-like, else
2164
+ `where` is already a file-like object so no filesystem is needed.
2165
+ **kwargs : dict,
2166
+ Additional kwargs for ParquetWriter class. See docstring for
2167
+ `ParquetWriter` for more information.
2168
+
2169
+ Examples
2170
+ --------
2171
+ Generate example data:
2172
+
2173
+ >>> import pyarrow as pa
2174
+ >>> table = pa.table({'n_legs': [2, 2, 4, 4, 5, 100],
2175
+ ... 'animal': ["Flamingo", "Parrot", "Dog", "Horse",
2176
+ ... "Brittle stars", "Centipede"]})
2177
+
2178
+ Write a dataset and collect metadata information.
2179
+
2180
+ >>> metadata_collector = []
2181
+ >>> import pyarrow.parquet as pq
2182
+ >>> pq.write_to_dataset(
2183
+ ... table, 'dataset_metadata',
2184
+ ... metadata_collector=metadata_collector)
2185
+
2186
+ Write the `_common_metadata` parquet file without row groups statistics.
2187
+
2188
+ >>> pq.write_metadata(
2189
+ ... table.schema, 'dataset_metadata/_common_metadata')
2190
+
2191
+ Write the `_metadata` parquet file with row groups statistics.
2192
+
2193
+ >>> pq.write_metadata(
2194
+ ... table.schema, 'dataset_metadata/_metadata',
2195
+ ... metadata_collector=metadata_collector)
2196
+ """
2197
+ filesystem, where = _resolve_filesystem_and_path(where, filesystem)
2198
+
2199
+ if hasattr(where, "seek"): # file-like
2200
+ cursor_position = where.tell()
2201
+
2202
+ writer = ParquetWriter(where, schema, filesystem, **kwargs)
2203
+ writer.close()
2204
+
2205
+ if metadata_collector is not None:
2206
+ # ParquetWriter doesn't expose the metadata until it's written. Write
2207
+ # it and read it again.
2208
+ metadata = read_metadata(where, filesystem=filesystem)
2209
+ if hasattr(where, "seek"):
2210
+ where.seek(cursor_position) # file-like, set cursor back.
2211
+
2212
+ for m in metadata_collector:
2213
+ metadata.append_row_groups(m)
2214
+ if filesystem is not None:
2215
+ with filesystem.open_output_stream(where) as f:
2216
+ metadata.write_metadata_file(f)
2217
+ else:
2218
+ metadata.write_metadata_file(where)
2219
+
2220
+
2221
+ def read_metadata(where, memory_map=False, decryption_properties=None,
2222
+ filesystem=None):
2223
+ """
2224
+ Read FileMetaData from footer of a single Parquet file.
2225
+
2226
+ Parameters
2227
+ ----------
2228
+ where : str (file path) or file-like object
2229
+ memory_map : bool, default False
2230
+ Create memory map when the source is a file path.
2231
+ decryption_properties : FileDecryptionProperties, default None
2232
+ Decryption properties for reading encrypted Parquet files.
2233
+ filesystem : FileSystem, default None
2234
+ If nothing passed, will be inferred based on path.
2235
+ Path will try to be found in the local on-disk filesystem otherwise
2236
+ it will be parsed as an URI to determine the filesystem.
2237
+
2238
+ Returns
2239
+ -------
2240
+ metadata : FileMetaData
2241
+ The metadata of the Parquet file
2242
+
2243
+ Examples
2244
+ --------
2245
+ >>> import pyarrow as pa
2246
+ >>> import pyarrow.parquet as pq
2247
+ >>> table = pa.table({'n_legs': [4, 5, 100],
2248
+ ... 'animal': ["Dog", "Brittle stars", "Centipede"]})
2249
+ >>> pq.write_table(table, 'example.parquet')
2250
+
2251
+ >>> pq.read_metadata('example.parquet')
2252
+ <pyarrow._parquet.FileMetaData object at ...>
2253
+ created_by: parquet-cpp-arrow version ...
2254
+ num_columns: 2
2255
+ num_rows: 3
2256
+ num_row_groups: 1
2257
+ format_version: 2.6
2258
+ serialized_size: ...
2259
+ """
2260
+ filesystem, where = _resolve_filesystem_and_path(where, filesystem)
2261
+ file_ctx = nullcontext()
2262
+ if filesystem is not None:
2263
+ file_ctx = where = filesystem.open_input_file(where)
2264
+
2265
+ with file_ctx:
2266
+ file = ParquetFile(where, memory_map=memory_map,
2267
+ decryption_properties=decryption_properties)
2268
+ return file.metadata
2269
+
2270
+
2271
+ def read_schema(where, memory_map=False, decryption_properties=None,
2272
+ filesystem=None):
2273
+ """
2274
+ Read effective Arrow schema from Parquet file metadata.
2275
+
2276
+ Parameters
2277
+ ----------
2278
+ where : str (file path) or file-like object
2279
+ memory_map : bool, default False
2280
+ Create memory map when the source is a file path.
2281
+ decryption_properties : FileDecryptionProperties, default None
2282
+ Decryption properties for reading encrypted Parquet files.
2283
+ filesystem : FileSystem, default None
2284
+ If nothing passed, will be inferred based on path.
2285
+ Path will try to be found in the local on-disk filesystem otherwise
2286
+ it will be parsed as an URI to determine the filesystem.
2287
+
2288
+ Returns
2289
+ -------
2290
+ schema : pyarrow.Schema
2291
+ The schema of the Parquet file
2292
+
2293
+ Examples
2294
+ --------
2295
+ >>> import pyarrow as pa
2296
+ >>> import pyarrow.parquet as pq
2297
+ >>> table = pa.table({'n_legs': [4, 5, 100],
2298
+ ... 'animal': ["Dog", "Brittle stars", "Centipede"]})
2299
+ >>> pq.write_table(table, 'example.parquet')
2300
+
2301
+ >>> pq.read_schema('example.parquet')
2302
+ n_legs: int64
2303
+ animal: string
2304
+ """
2305
+ filesystem, where = _resolve_filesystem_and_path(where, filesystem)
2306
+ file_ctx = nullcontext()
2307
+ if filesystem is not None:
2308
+ file_ctx = where = filesystem.open_input_file(where)
2309
+
2310
+ with file_ctx:
2311
+ file = ParquetFile(
2312
+ where, memory_map=memory_map,
2313
+ decryption_properties=decryption_properties)
2314
+ return file.schema.to_arrow_schema()
2315
+
2316
+
2317
+ __all__ = (
2318
+ "ColumnChunkMetaData",
2319
+ "ColumnSchema",
2320
+ "FileDecryptionProperties",
2321
+ "FileEncryptionProperties",
2322
+ "FileMetaData",
2323
+ "ParquetDataset",
2324
+ "ParquetFile",
2325
+ "ParquetLogicalType",
2326
+ "ParquetReader",
2327
+ "ParquetSchema",
2328
+ "ParquetWriter",
2329
+ "RowGroupMetaData",
2330
+ "SortingColumn",
2331
+ "Statistics",
2332
+ "read_metadata",
2333
+ "read_pandas",
2334
+ "read_schema",
2335
+ "read_table",
2336
+ "write_metadata",
2337
+ "write_table",
2338
+ "write_to_dataset",
2339
+ "_filters_to_expression",
2340
+ "filters_to_expression",
2341
+ )
llmeval-env/lib/python3.10/site-packages/pyarrow/parquet/encryption.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # pylint: disable=unused-wildcard-import, unused-import
2
+
3
+ # Licensed to the Apache Software Foundation (ASF) under one
4
+ # or more contributor license agreements. See the NOTICE file
5
+ # distributed with this work for additional information
6
+ # regarding copyright ownership. The ASF licenses this file
7
+ # to you under the Apache License, Version 2.0 (the
8
+ # "License"); you may not use this file except in compliance
9
+ # with the License. You may obtain a copy of the License at
10
+ #
11
+ # http://www.apache.org/licenses/LICENSE-2.0
12
+ #
13
+ # Unless required by applicable law or agreed to in writing,
14
+ # software distributed under the License is distributed on an
15
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
16
+ # KIND, either express or implied. See the License for the
17
+ # specific language governing permissions and limitations
18
+ # under the License.
19
+ from pyarrow._parquet_encryption import (CryptoFactory, # noqa
20
+ EncryptionConfiguration,
21
+ DecryptionConfiguration,
22
+ KmsConnectionConfig,
23
+ KmsClient)
llmeval-env/lib/python3.10/site-packages/pyarrow/src/arrow/python/CMakeLists.txt ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ arrow_install_all_headers("arrow/python")
llmeval-env/lib/python3.10/site-packages/pyarrow/src/arrow/python/api.h ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include "arrow/python/arrow_to_pandas.h"
21
+ #include "arrow/python/common.h"
22
+ #include "arrow/python/datetime.h"
23
+ #include "arrow/python/deserialize.h"
24
+ #include "arrow/python/helpers.h"
25
+ #include "arrow/python/inference.h"
26
+ #include "arrow/python/io.h"
27
+ #include "arrow/python/numpy_convert.h"
28
+ #include "arrow/python/numpy_to_arrow.h"
29
+ #include "arrow/python/python_to_arrow.h"
30
+ #include "arrow/python/serialize.h"
llmeval-env/lib/python3.10/site-packages/pyarrow/src/arrow/python/arrow_to_pandas.cc ADDED
@@ -0,0 +1,2645 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // Functions for pandas conversion via NumPy
19
+
20
+ #include "arrow/python/arrow_to_pandas.h"
21
+ #include "arrow/python/numpy_interop.h" // IWYU pragma: expand
22
+
23
+ #include <cmath>
24
+ #include <cstdint>
25
+ #include <iostream>
26
+ #include <memory>
27
+ #include <mutex>
28
+ #include <string>
29
+ #include <string_view>
30
+ #include <unordered_map>
31
+ #include <utility>
32
+ #include <vector>
33
+
34
+ #include "arrow/array.h"
35
+ #include "arrow/buffer.h"
36
+ #include "arrow/datum.h"
37
+ #include "arrow/status.h"
38
+ #include "arrow/table.h"
39
+ #include "arrow/type.h"
40
+ #include "arrow/type_traits.h"
41
+ #include "arrow/util/checked_cast.h"
42
+ #include "arrow/util/hashing.h"
43
+ #include "arrow/util/int_util.h"
44
+ #include "arrow/util/logging.h"
45
+ #include "arrow/util/macros.h"
46
+ #include "arrow/util/parallel.h"
47
+ #include "arrow/visit_type_inline.h"
48
+
49
+ #include "arrow/compute/api.h"
50
+
51
+ #include "arrow/python/arrow_to_python_internal.h"
52
+ #include "arrow/python/common.h"
53
+ #include "arrow/python/datetime.h"
54
+ #include "arrow/python/decimal.h"
55
+ #include "arrow/python/helpers.h"
56
+ #include "arrow/python/numpy_convert.h"
57
+ #include "arrow/python/numpy_internal.h"
58
+ #include "arrow/python/pyarrow.h"
59
+ #include "arrow/python/python_to_arrow.h"
60
+ #include "arrow/python/type_traits.h"
61
+
62
+ namespace arrow {
63
+
64
+ class MemoryPool;
65
+
66
+ using internal::checked_cast;
67
+ using internal::CheckIndexBounds;
68
+ using internal::OptionalParallelFor;
69
+
70
+ namespace py {
71
+ namespace {
72
+
73
+ // Fix options for conversion of an inner (child) array.
74
+ PandasOptions MakeInnerOptions(PandasOptions options) {
75
+ // Make sure conversion of inner dictionary arrays always returns an array,
76
+ // not a dict {'indices': array, 'dictionary': array, 'ordered': bool}
77
+ options.decode_dictionaries = true;
78
+ options.categorical_columns.clear();
79
+ options.strings_to_categorical = false;
80
+
81
+ // In ARROW-7723, we found as a result of ARROW-3789 that second
82
+ // through microsecond resolution tz-aware timestamps were being promoted to
83
+ // use the DATETIME_NANO_TZ conversion path, yielding a datetime64[ns] NumPy
84
+ // array in this function. PyArray_GETITEM returns datetime.datetime for
85
+ // units second through microsecond but PyLong for nanosecond (because
86
+ // datetime.datetime does not support nanoseconds).
87
+ // We force the object conversion to preserve the value of the timezone.
88
+ // Nanoseconds are returned as integers.
89
+ options.coerce_temporal_nanoseconds = false;
90
+
91
+ return options;
92
+ }
93
+
94
+ // ----------------------------------------------------------------------
95
+ // PyCapsule code for setting ndarray base to reference C++ object
96
+
97
+ struct ArrayCapsule {
98
+ std::shared_ptr<Array> array;
99
+ };
100
+
101
+ struct BufferCapsule {
102
+ std::shared_ptr<Buffer> buffer;
103
+ };
104
+
105
+ void ArrayCapsule_Destructor(PyObject* capsule) {
106
+ delete reinterpret_cast<ArrayCapsule*>(PyCapsule_GetPointer(capsule, "arrow::Array"));
107
+ }
108
+
109
+ void BufferCapsule_Destructor(PyObject* capsule) {
110
+ delete reinterpret_cast<BufferCapsule*>(PyCapsule_GetPointer(capsule, "arrow::Buffer"));
111
+ }
112
+
113
+ // ----------------------------------------------------------------------
114
+ // pandas 0.x DataFrame conversion internals
115
+
116
+ using internal::arrow_traits;
117
+ using internal::npy_traits;
118
+
119
+ template <typename T>
120
+ struct WrapBytes {};
121
+
122
+ template <>
123
+ struct WrapBytes<StringType> {
124
+ static inline PyObject* Wrap(const char* data, int64_t length) {
125
+ return PyUnicode_FromStringAndSize(data, length);
126
+ }
127
+ };
128
+
129
+ template <>
130
+ struct WrapBytes<LargeStringType> {
131
+ static inline PyObject* Wrap(const char* data, int64_t length) {
132
+ return PyUnicode_FromStringAndSize(data, length);
133
+ }
134
+ };
135
+
136
+ template <>
137
+ struct WrapBytes<StringViewType> {
138
+ static inline PyObject* Wrap(const char* data, int64_t length) {
139
+ return PyUnicode_FromStringAndSize(data, length);
140
+ }
141
+ };
142
+
143
+ template <>
144
+ struct WrapBytes<BinaryType> {
145
+ static inline PyObject* Wrap(const char* data, int64_t length) {
146
+ return PyBytes_FromStringAndSize(data, length);
147
+ }
148
+ };
149
+
150
+ template <>
151
+ struct WrapBytes<LargeBinaryType> {
152
+ static inline PyObject* Wrap(const char* data, int64_t length) {
153
+ return PyBytes_FromStringAndSize(data, length);
154
+ }
155
+ };
156
+
157
+ template <>
158
+ struct WrapBytes<BinaryViewType> {
159
+ static inline PyObject* Wrap(const char* data, int64_t length) {
160
+ return PyBytes_FromStringAndSize(data, length);
161
+ }
162
+ };
163
+
164
+ template <>
165
+ struct WrapBytes<FixedSizeBinaryType> {
166
+ static inline PyObject* Wrap(const char* data, int64_t length) {
167
+ return PyBytes_FromStringAndSize(data, length);
168
+ }
169
+ };
170
+
171
+ static inline bool ListTypeSupported(const DataType& type) {
172
+ switch (type.id()) {
173
+ case Type::BOOL:
174
+ case Type::UINT8:
175
+ case Type::INT8:
176
+ case Type::UINT16:
177
+ case Type::INT16:
178
+ case Type::UINT32:
179
+ case Type::INT32:
180
+ case Type::INT64:
181
+ case Type::UINT64:
182
+ case Type::HALF_FLOAT:
183
+ case Type::FLOAT:
184
+ case Type::DOUBLE:
185
+ case Type::DECIMAL128:
186
+ case Type::DECIMAL256:
187
+ case Type::BINARY:
188
+ case Type::LARGE_BINARY:
189
+ case Type::STRING:
190
+ case Type::LARGE_STRING:
191
+ case Type::DATE32:
192
+ case Type::DATE64:
193
+ case Type::STRUCT:
194
+ case Type::MAP:
195
+ case Type::TIME32:
196
+ case Type::TIME64:
197
+ case Type::TIMESTAMP:
198
+ case Type::DURATION:
199
+ case Type::DICTIONARY:
200
+ case Type::INTERVAL_MONTH_DAY_NANO:
201
+ case Type::NA: // empty list
202
+ // The above types are all supported.
203
+ return true;
204
+ case Type::FIXED_SIZE_LIST:
205
+ case Type::LIST:
206
+ case Type::LARGE_LIST:
207
+ case Type::LIST_VIEW:
208
+ case Type::LARGE_LIST_VIEW: {
209
+ const auto& list_type = checked_cast<const BaseListType&>(type);
210
+ return ListTypeSupported(*list_type.value_type());
211
+ }
212
+ case Type::EXTENSION: {
213
+ const auto& ext = checked_cast<const ExtensionType&>(*type.GetSharedPtr());
214
+ return ListTypeSupported(*(ext.storage_type()));
215
+ }
216
+ default:
217
+ break;
218
+ }
219
+ return false;
220
+ }
221
+
222
+ Status CapsulizeArray(const std::shared_ptr<Array>& arr, PyObject** out) {
223
+ auto capsule = new ArrayCapsule{{arr}};
224
+ *out = PyCapsule_New(reinterpret_cast<void*>(capsule), "arrow::Array",
225
+ &ArrayCapsule_Destructor);
226
+ if (*out == nullptr) {
227
+ delete capsule;
228
+ RETURN_IF_PYERROR();
229
+ }
230
+ return Status::OK();
231
+ }
232
+
233
+ Status CapsulizeBuffer(const std::shared_ptr<Buffer>& buffer, PyObject** out) {
234
+ auto capsule = new BufferCapsule{{buffer}};
235
+ *out = PyCapsule_New(reinterpret_cast<void*>(capsule), "arrow::Buffer",
236
+ &BufferCapsule_Destructor);
237
+ if (*out == nullptr) {
238
+ delete capsule;
239
+ RETURN_IF_PYERROR();
240
+ }
241
+ return Status::OK();
242
+ }
243
+
244
+ Status SetNdarrayBase(PyArrayObject* arr, PyObject* base) {
245
+ if (PyArray_SetBaseObject(arr, base) == -1) {
246
+ // Error occurred, trust that SetBaseObject sets the error state
247
+ Py_XDECREF(base);
248
+ RETURN_IF_PYERROR();
249
+ }
250
+ return Status::OK();
251
+ }
252
+
253
+ Status SetBufferBase(PyArrayObject* arr, const std::shared_ptr<Buffer>& buffer) {
254
+ PyObject* base;
255
+ RETURN_NOT_OK(CapsulizeBuffer(buffer, &base));
256
+ return SetNdarrayBase(arr, base);
257
+ }
258
+
259
+ inline void set_numpy_metadata(int type, const DataType* datatype, PyArray_Descr* out) {
260
+ auto metadata =
261
+ reinterpret_cast<PyArray_DatetimeDTypeMetaData*>(PyDataType_C_METADATA(out));
262
+ if (type == NPY_DATETIME) {
263
+ if (datatype->id() == Type::TIMESTAMP) {
264
+ const auto& timestamp_type = checked_cast<const TimestampType&>(*datatype);
265
+ metadata->meta.base = internal::NumPyFrequency(timestamp_type.unit());
266
+ } else {
267
+ DCHECK(false) << "NPY_DATETIME views only supported for Arrow TIMESTAMP types";
268
+ }
269
+ } else if (type == NPY_TIMEDELTA) {
270
+ DCHECK_EQ(datatype->id(), Type::DURATION);
271
+ const auto& duration_type = checked_cast<const DurationType&>(*datatype);
272
+ metadata->meta.base = internal::NumPyFrequency(duration_type.unit());
273
+ }
274
+ }
275
+
276
+ Status PyArray_NewFromPool(int nd, npy_intp* dims, PyArray_Descr* descr, MemoryPool* pool,
277
+ PyObject** out) {
278
+ // ARROW-6570: Allocate memory from MemoryPool for a couple reasons
279
+ //
280
+ // * Track allocations
281
+ // * Get better performance through custom allocators
282
+ int64_t total_size = PyDataType_ELSIZE(descr);
283
+ for (int i = 0; i < nd; ++i) {
284
+ total_size *= dims[i];
285
+ }
286
+
287
+ ARROW_ASSIGN_OR_RAISE(auto buffer, AllocateBuffer(total_size, pool));
288
+ *out = PyArray_NewFromDescr(&PyArray_Type, descr, nd, dims,
289
+ /*strides=*/nullptr,
290
+ /*data=*/buffer->mutable_data(),
291
+ /*flags=*/NPY_ARRAY_CARRAY | NPY_ARRAY_WRITEABLE,
292
+ /*obj=*/nullptr);
293
+ if (*out == nullptr) {
294
+ RETURN_IF_PYERROR();
295
+ // Trust that error set if NULL returned
296
+ }
297
+ return SetBufferBase(reinterpret_cast<PyArrayObject*>(*out), std::move(buffer));
298
+ }
299
+
300
+ template <typename T = void>
301
+ inline const T* GetPrimitiveValues(const Array& arr) {
302
+ if (arr.length() == 0) {
303
+ return nullptr;
304
+ }
305
+ const int elsize = arr.type()->byte_width();
306
+ const auto& prim_arr = checked_cast<const PrimitiveArray&>(arr);
307
+ return reinterpret_cast<const T*>(prim_arr.values()->data() + arr.offset() * elsize);
308
+ }
309
+
310
+ Status MakeNumPyView(std::shared_ptr<Array> arr, PyObject* py_ref, int npy_type, int ndim,
311
+ npy_intp* dims, PyObject** out) {
312
+ PyAcquireGIL lock;
313
+
314
+ PyArray_Descr* descr = internal::GetSafeNumPyDtype(npy_type);
315
+ set_numpy_metadata(npy_type, arr->type().get(), descr);
316
+ PyObject* result = PyArray_NewFromDescr(
317
+ &PyArray_Type, descr, ndim, dims, /*strides=*/nullptr,
318
+ const_cast<void*>(GetPrimitiveValues(*arr)), /*flags=*/0, nullptr);
319
+ PyArrayObject* np_arr = reinterpret_cast<PyArrayObject*>(result);
320
+ if (np_arr == nullptr) {
321
+ // Error occurred, trust that error set
322
+ return Status::OK();
323
+ }
324
+
325
+ PyObject* base;
326
+ if (py_ref == nullptr) {
327
+ // Capsule will be owned by the ndarray, no incref necessary. See
328
+ // ARROW-1973
329
+ RETURN_NOT_OK(CapsulizeArray(arr, &base));
330
+ } else {
331
+ Py_INCREF(py_ref);
332
+ base = py_ref;
333
+ }
334
+ RETURN_NOT_OK(SetNdarrayBase(np_arr, base));
335
+
336
+ // Do not allow Arrow data to be mutated
337
+ PyArray_CLEARFLAGS(np_arr, NPY_ARRAY_WRITEABLE);
338
+ *out = result;
339
+ return Status::OK();
340
+ }
341
+
342
+ class PandasWriter {
343
+ public:
344
+ enum type {
345
+ OBJECT,
346
+ UINT8,
347
+ INT8,
348
+ UINT16,
349
+ INT16,
350
+ UINT32,
351
+ INT32,
352
+ UINT64,
353
+ INT64,
354
+ HALF_FLOAT,
355
+ FLOAT,
356
+ DOUBLE,
357
+ BOOL,
358
+ DATETIME_DAY,
359
+ DATETIME_SECOND,
360
+ DATETIME_MILLI,
361
+ DATETIME_MICRO,
362
+ DATETIME_NANO,
363
+ DATETIME_SECOND_TZ,
364
+ DATETIME_MILLI_TZ,
365
+ DATETIME_MICRO_TZ,
366
+ DATETIME_NANO_TZ,
367
+ TIMEDELTA_SECOND,
368
+ TIMEDELTA_MILLI,
369
+ TIMEDELTA_MICRO,
370
+ TIMEDELTA_NANO,
371
+ CATEGORICAL,
372
+ EXTENSION
373
+ };
374
+
375
+ PandasWriter(const PandasOptions& options, int64_t num_rows, int num_columns)
376
+ : options_(options), num_rows_(num_rows), num_columns_(num_columns) {
377
+ PyAcquireGIL lock;
378
+ internal::InitPandasStaticData();
379
+ }
380
+ virtual ~PandasWriter() {}
381
+
382
+ void SetBlockData(PyObject* arr) {
383
+ block_arr_.reset(arr);
384
+ block_data_ =
385
+ reinterpret_cast<uint8_t*>(PyArray_DATA(reinterpret_cast<PyArrayObject*>(arr)));
386
+ }
387
+
388
+ /// \brief Either copy or wrap single array to create pandas-compatible array
389
+ /// for Series or DataFrame. num_columns_ can only be 1. Will try to zero
390
+ /// copy if possible (or error if not possible and zero_copy_only=True)
391
+ virtual Status TransferSingle(std::shared_ptr<ChunkedArray> data, PyObject* py_ref) = 0;
392
+
393
+ /// \brief Copy ChunkedArray into a multi-column block
394
+ virtual Status CopyInto(std::shared_ptr<ChunkedArray> data, int64_t rel_placement) = 0;
395
+
396
+ Status EnsurePlacementAllocated() {
397
+ std::lock_guard<std::mutex> guard(allocation_lock_);
398
+ if (placement_data_ != nullptr) {
399
+ return Status::OK();
400
+ }
401
+ PyAcquireGIL lock;
402
+ npy_intp placement_dims[1] = {num_columns_};
403
+ PyObject* placement_arr = PyArray_SimpleNew(1, placement_dims, NPY_INT64);
404
+ RETURN_IF_PYERROR();
405
+ placement_arr_.reset(placement_arr);
406
+ placement_data_ = reinterpret_cast<int64_t*>(
407
+ PyArray_DATA(reinterpret_cast<PyArrayObject*>(placement_arr)));
408
+ return Status::OK();
409
+ }
410
+
411
+ Status EnsureAllocated() {
412
+ std::lock_guard<std::mutex> guard(allocation_lock_);
413
+ if (block_data_ != nullptr) {
414
+ return Status::OK();
415
+ }
416
+ RETURN_NOT_OK(Allocate());
417
+ return Status::OK();
418
+ }
419
+
420
+ virtual bool CanZeroCopy(const ChunkedArray& data) const { return false; }
421
+
422
+ virtual Status Write(std::shared_ptr<ChunkedArray> data, int64_t abs_placement,
423
+ int64_t rel_placement) {
424
+ RETURN_NOT_OK(EnsurePlacementAllocated());
425
+ if (num_columns_ == 1 && options_.allow_zero_copy_blocks) {
426
+ RETURN_NOT_OK(TransferSingle(data, /*py_ref=*/nullptr));
427
+ } else {
428
+ RETURN_NOT_OK(
429
+ CheckNoZeroCopy("Cannot do zero copy conversion into "
430
+ "multi-column DataFrame block"));
431
+ RETURN_NOT_OK(EnsureAllocated());
432
+ RETURN_NOT_OK(CopyInto(data, rel_placement));
433
+ }
434
+ placement_data_[rel_placement] = abs_placement;
435
+ return Status::OK();
436
+ }
437
+
438
+ virtual Status GetDataFrameResult(PyObject** out) {
439
+ PyObject* result = PyDict_New();
440
+ RETURN_IF_PYERROR();
441
+
442
+ PyObject* block;
443
+ RETURN_NOT_OK(GetResultBlock(&block));
444
+
445
+ PyDict_SetItemString(result, "block", block);
446
+ PyDict_SetItemString(result, "placement", placement_arr_.obj());
447
+
448
+ RETURN_NOT_OK(AddResultMetadata(result));
449
+ *out = result;
450
+ return Status::OK();
451
+ }
452
+
453
+ // Caller steals the reference to this object
454
+ virtual Status GetSeriesResult(PyObject** out) {
455
+ RETURN_NOT_OK(MakeBlock1D());
456
+ // Caller owns the object now
457
+ *out = block_arr_.detach();
458
+ return Status::OK();
459
+ }
460
+
461
+ protected:
462
+ virtual Status AddResultMetadata(PyObject* result) { return Status::OK(); }
463
+
464
+ Status MakeBlock1D() {
465
+ // For Series or for certain DataFrame block types, we need to shape to a
466
+ // 1D array when there is only one column
467
+ PyAcquireGIL lock;
468
+
469
+ DCHECK_EQ(1, num_columns_);
470
+
471
+ npy_intp new_dims[1] = {static_cast<npy_intp>(num_rows_)};
472
+ PyArray_Dims dims;
473
+ dims.ptr = new_dims;
474
+ dims.len = 1;
475
+
476
+ PyObject* reshaped = PyArray_Newshape(
477
+ reinterpret_cast<PyArrayObject*>(block_arr_.obj()), &dims, NPY_ANYORDER);
478
+ RETURN_IF_PYERROR();
479
+
480
+ // ARROW-8801: Here a PyArrayObject is created that is not being managed by
481
+ // any OwnedRef object. This object is then put in the resulting object
482
+ // with PyDict_SetItemString, which increments the reference count, so a
483
+ // memory leak ensues. There are several ways to fix the memory leak but a
484
+ // simple one is to put the reshaped 1D block array in this OwnedRefNoGIL
485
+ // so it will be correctly decref'd when this class is destructed.
486
+ block_arr_.reset(reshaped);
487
+ return Status::OK();
488
+ }
489
+
490
+ virtual Status GetResultBlock(PyObject** out) {
491
+ *out = block_arr_.obj();
492
+ return Status::OK();
493
+ }
494
+
495
+ Status CheckNoZeroCopy(const std::string& message) {
496
+ if (options_.zero_copy_only) {
497
+ return Status::Invalid(message);
498
+ }
499
+ return Status::OK();
500
+ }
501
+
502
+ Status CheckNotZeroCopyOnly(const ChunkedArray& data) {
503
+ if (options_.zero_copy_only) {
504
+ return Status::Invalid("Needed to copy ", data.num_chunks(), " chunks with ",
505
+ data.null_count(), " nulls, but zero_copy_only was True");
506
+ }
507
+ return Status::OK();
508
+ }
509
+
510
+ virtual Status Allocate() {
511
+ return Status::NotImplemented("Override Allocate in subclasses");
512
+ }
513
+
514
+ Status AllocateNDArray(int npy_type, int ndim = 2) {
515
+ PyAcquireGIL lock;
516
+
517
+ PyObject* block_arr = nullptr;
518
+ npy_intp block_dims[2] = {0, 0};
519
+
520
+ if (ndim == 2) {
521
+ block_dims[0] = num_columns_;
522
+ block_dims[1] = num_rows_;
523
+ } else {
524
+ block_dims[0] = num_rows_;
525
+ }
526
+ PyArray_Descr* descr = internal::GetSafeNumPyDtype(npy_type);
527
+ if (PyDataType_REFCHK(descr)) {
528
+ // ARROW-6876: if the array has refcounted items, let Numpy
529
+ // own the array memory so as to decref elements on array destruction
530
+ block_arr = PyArray_SimpleNewFromDescr(ndim, block_dims, descr);
531
+ RETURN_IF_PYERROR();
532
+ } else {
533
+ RETURN_NOT_OK(
534
+ PyArray_NewFromPool(ndim, block_dims, descr, options_.pool, &block_arr));
535
+ }
536
+
537
+ SetBlockData(block_arr);
538
+ return Status::OK();
539
+ }
540
+
541
+ void SetDatetimeUnit(NPY_DATETIMEUNIT unit) {
542
+ PyAcquireGIL lock;
543
+ auto date_dtype =
544
+ reinterpret_cast<PyArray_DatetimeDTypeMetaData*>(PyDataType_C_METADATA(
545
+ PyArray_DESCR(reinterpret_cast<PyArrayObject*>(block_arr_.obj()))));
546
+ date_dtype->meta.base = unit;
547
+ }
548
+
549
+ PandasOptions options_;
550
+
551
+ std::mutex allocation_lock_;
552
+
553
+ int64_t num_rows_;
554
+ int num_columns_;
555
+
556
+ OwnedRefNoGIL block_arr_;
557
+ uint8_t* block_data_ = nullptr;
558
+
559
+ // ndarray<int32>
560
+ OwnedRefNoGIL placement_arr_;
561
+ int64_t* placement_data_ = nullptr;
562
+
563
+ private:
564
+ ARROW_DISALLOW_COPY_AND_ASSIGN(PandasWriter);
565
+ };
566
+
567
+ template <typename InType, typename OutType>
568
+ inline void ConvertIntegerWithNulls(const PandasOptions& options,
569
+ const ChunkedArray& data, OutType* out_values) {
570
+ for (int c = 0; c < data.num_chunks(); c++) {
571
+ const auto& arr = *data.chunk(c);
572
+ const InType* in_values = GetPrimitiveValues<InType>(arr);
573
+ // Upcast to double, set NaN as appropriate
574
+
575
+ for (int i = 0; i < arr.length(); ++i) {
576
+ *out_values++ =
577
+ arr.IsNull(i) ? static_cast<OutType>(NAN) : static_cast<OutType>(in_values[i]);
578
+ }
579
+ }
580
+ }
581
+
582
+ template <typename T>
583
+ inline void ConvertIntegerNoNullsSameType(const PandasOptions& options,
584
+ const ChunkedArray& data, T* out_values) {
585
+ for (int c = 0; c < data.num_chunks(); c++) {
586
+ const auto& arr = *data.chunk(c);
587
+ if (arr.length() > 0) {
588
+ const T* in_values = GetPrimitiveValues<T>(arr);
589
+ memcpy(out_values, in_values, sizeof(T) * arr.length());
590
+ out_values += arr.length();
591
+ }
592
+ }
593
+ }
594
+
595
+ template <typename InType, typename OutType>
596
+ inline void ConvertIntegerNoNullsCast(const PandasOptions& options,
597
+ const ChunkedArray& data, OutType* out_values) {
598
+ for (int c = 0; c < data.num_chunks(); c++) {
599
+ const auto& arr = *data.chunk(c);
600
+ const InType* in_values = GetPrimitiveValues<InType>(arr);
601
+ for (int64_t i = 0; i < arr.length(); ++i) {
602
+ *out_values = in_values[i];
603
+ }
604
+ }
605
+ }
606
+
607
+ template <typename T, typename Enable = void>
608
+ struct MemoizationTraits {
609
+ using Scalar = typename T::c_type;
610
+ };
611
+
612
+ template <typename T>
613
+ struct MemoizationTraits<T, enable_if_has_string_view<T>> {
614
+ // For binary, we memoize string_view as a scalar value to avoid having to
615
+ // unnecessarily copy the memory into the memo table data structure
616
+ using Scalar = std::string_view;
617
+ };
618
+
619
+ // Generic Array -> PyObject** converter that handles object deduplication, if
620
+ // requested
621
+ template <typename Type, typename WrapFunction>
622
+ inline Status ConvertAsPyObjects(const PandasOptions& options, const ChunkedArray& data,
623
+ WrapFunction&& wrap_func, PyObject** out_values) {
624
+ using ArrayType = typename TypeTraits<Type>::ArrayType;
625
+ using Scalar = typename MemoizationTraits<Type>::Scalar;
626
+
627
+ auto convert_chunks = [&](auto&& wrap_func) -> Status {
628
+ for (int c = 0; c < data.num_chunks(); c++) {
629
+ const auto& arr = arrow::internal::checked_cast<const ArrayType&>(*data.chunk(c));
630
+ RETURN_NOT_OK(internal::WriteArrayObjects(arr, wrap_func, out_values));
631
+ out_values += arr.length();
632
+ }
633
+ return Status::OK();
634
+ };
635
+
636
+ if (options.deduplicate_objects) {
637
+ // GH-40316: only allocate a memo table if deduplication is enabled.
638
+ ::arrow::internal::ScalarMemoTable<Scalar> memo_table(options.pool);
639
+ std::vector<PyObject*> unique_values;
640
+ int32_t memo_size = 0;
641
+
642
+ auto WrapMemoized = [&](const Scalar& value, PyObject** out_values) {
643
+ int32_t memo_index;
644
+ RETURN_NOT_OK(memo_table.GetOrInsert(value, &memo_index));
645
+ if (memo_index == memo_size) {
646
+ // New entry
647
+ RETURN_NOT_OK(wrap_func(value, out_values));
648
+ unique_values.push_back(*out_values);
649
+ ++memo_size;
650
+ } else {
651
+ // Duplicate entry
652
+ Py_INCREF(unique_values[memo_index]);
653
+ *out_values = unique_values[memo_index];
654
+ }
655
+ return Status::OK();
656
+ };
657
+ return convert_chunks(std::move(WrapMemoized));
658
+ } else {
659
+ return convert_chunks(std::forward<WrapFunction>(wrap_func));
660
+ }
661
+ }
662
+
663
+ Status ConvertStruct(PandasOptions options, const ChunkedArray& data,
664
+ PyObject** out_values) {
665
+ if (data.num_chunks() == 0) {
666
+ return Status::OK();
667
+ }
668
+ // ChunkedArray has at least one chunk
669
+ auto arr = checked_cast<const StructArray*>(data.chunk(0).get());
670
+ // Use it to cache the struct type and number of fields for all chunks
671
+ int32_t num_fields = arr->num_fields();
672
+ auto array_type = arr->type();
673
+ std::vector<OwnedRef> fields_data(num_fields * data.num_chunks());
674
+ OwnedRef dict_item;
675
+
676
+ // See notes in MakeInnerOptions.
677
+ options = MakeInnerOptions(std::move(options));
678
+ // Don't blindly convert because timestamps in lists are handled differently.
679
+ options.timestamp_as_object = true;
680
+
681
+ for (int c = 0; c < data.num_chunks(); c++) {
682
+ auto fields_data_offset = c * num_fields;
683
+ auto arr = checked_cast<const StructArray*>(data.chunk(c).get());
684
+ // Convert the struct arrays first
685
+ for (int32_t i = 0; i < num_fields; i++) {
686
+ auto field = arr->field(static_cast<int>(i));
687
+ // In case the field is an extension array, use .storage() to convert to Pandas
688
+ if (field->type()->id() == Type::EXTENSION) {
689
+ const ExtensionArray& arr_ext = checked_cast<const ExtensionArray&>(*field);
690
+ field = arr_ext.storage();
691
+ }
692
+ RETURN_NOT_OK(ConvertArrayToPandas(options, field, nullptr,
693
+ fields_data[i + fields_data_offset].ref()));
694
+ DCHECK(PyArray_Check(fields_data[i + fields_data_offset].obj()));
695
+ }
696
+
697
+ // Construct a dictionary for each row
698
+ const bool has_nulls = data.null_count() > 0;
699
+ for (int64_t i = 0; i < arr->length(); ++i) {
700
+ if (has_nulls && arr->IsNull(i)) {
701
+ Py_INCREF(Py_None);
702
+ *out_values = Py_None;
703
+ } else {
704
+ // Build the new dict object for the row
705
+ dict_item.reset(PyDict_New());
706
+ RETURN_IF_PYERROR();
707
+ for (int32_t field_idx = 0; field_idx < num_fields; ++field_idx) {
708
+ OwnedRef field_value;
709
+ auto name = array_type->field(static_cast<int>(field_idx))->name();
710
+ if (!arr->field(static_cast<int>(field_idx))->IsNull(i)) {
711
+ // Value exists in child array, obtain it
712
+ auto array = reinterpret_cast<PyArrayObject*>(
713
+ fields_data[field_idx + fields_data_offset].obj());
714
+ auto ptr = reinterpret_cast<const char*>(PyArray_GETPTR1(array, i));
715
+ field_value.reset(PyArray_GETITEM(array, ptr));
716
+ RETURN_IF_PYERROR();
717
+ } else {
718
+ // Translate the Null to a None
719
+ Py_INCREF(Py_None);
720
+ field_value.reset(Py_None);
721
+ }
722
+ // PyDict_SetItemString increments reference count
723
+ auto setitem_result =
724
+ PyDict_SetItemString(dict_item.obj(), name.c_str(), field_value.obj());
725
+ RETURN_IF_PYERROR();
726
+ DCHECK_EQ(setitem_result, 0);
727
+ }
728
+ *out_values = dict_item.obj();
729
+ // Grant ownership to the resulting array
730
+ Py_INCREF(*out_values);
731
+ }
732
+ ++out_values;
733
+ }
734
+ }
735
+ return Status::OK();
736
+ }
737
+
738
+ Status DecodeDictionaries(MemoryPool* pool, const std::shared_ptr<DataType>& dense_type,
739
+ ArrayVector* arrays) {
740
+ compute::ExecContext ctx(pool);
741
+ compute::CastOptions options;
742
+ for (size_t i = 0; i < arrays->size(); ++i) {
743
+ ARROW_ASSIGN_OR_RAISE((*arrays)[i],
744
+ compute::Cast(*(*arrays)[i], dense_type, options, &ctx));
745
+ }
746
+ return Status::OK();
747
+ }
748
+
749
+ Status DecodeDictionaries(MemoryPool* pool, const std::shared_ptr<DataType>& dense_type,
750
+ std::shared_ptr<ChunkedArray>* array) {
751
+ auto chunks = (*array)->chunks();
752
+ RETURN_NOT_OK(DecodeDictionaries(pool, dense_type, &chunks));
753
+ *array = std::make_shared<ChunkedArray>(std::move(chunks), dense_type);
754
+ return Status::OK();
755
+ }
756
+
757
+ template <typename T>
758
+ enable_if_list_like<T, Status> ConvertListsLike(PandasOptions options,
759
+ const ChunkedArray& data,
760
+ PyObject** out_values) {
761
+ using ListArrayT = typename TypeTraits<T>::ArrayType;
762
+ // Get column of underlying value arrays
763
+ ArrayVector value_arrays;
764
+ for (int c = 0; c < data.num_chunks(); c++) {
765
+ const auto& arr = checked_cast<const ListArrayT&>(*data.chunk(c));
766
+ // values() does not account for offsets, so we need to slice into it.
767
+ // We can't use Flatten(), because it removes the values behind a null list
768
+ // value, and that makes the offsets into original list values and our
769
+ // flattened_values array different.
770
+ std::shared_ptr<Array> flattened_values = arr.values()->Slice(
771
+ arr.value_offset(0), arr.value_offset(arr.length()) - arr.value_offset(0));
772
+ if (arr.value_type()->id() == Type::EXTENSION) {
773
+ const auto& arr_ext = checked_cast<const ExtensionArray&>(*flattened_values);
774
+ value_arrays.emplace_back(arr_ext.storage());
775
+ } else {
776
+ value_arrays.emplace_back(flattened_values);
777
+ }
778
+ }
779
+
780
+ using ListArrayType = typename ListArrayT::TypeClass;
781
+ const auto& list_type = checked_cast<const ListArrayType&>(*data.type());
782
+ auto value_type = list_type.value_type();
783
+ if (value_type->id() == Type::EXTENSION) {
784
+ value_type = checked_cast<const ExtensionType&>(*value_type).storage_type();
785
+ }
786
+
787
+ auto flat_column = std::make_shared<ChunkedArray>(value_arrays, value_type);
788
+
789
+ options = MakeInnerOptions(std::move(options));
790
+
791
+ OwnedRefNoGIL owned_numpy_array;
792
+ RETURN_NOT_OK(ConvertChunkedArrayToPandas(options, flat_column, nullptr,
793
+ owned_numpy_array.ref()));
794
+ PyObject* numpy_array = owned_numpy_array.obj();
795
+ DCHECK(PyArray_Check(numpy_array));
796
+
797
+ int64_t chunk_offset = 0;
798
+ for (int c = 0; c < data.num_chunks(); c++) {
799
+ const auto& arr = checked_cast<const ListArrayT&>(*data.chunk(c));
800
+ const bool has_nulls = data.null_count() > 0;
801
+ for (int64_t i = 0; i < arr.length(); ++i) {
802
+ if (has_nulls && arr.IsNull(i)) {
803
+ Py_INCREF(Py_None);
804
+ *out_values = Py_None;
805
+ } else {
806
+ // Need to subtract value_offset(0) since the original chunk might be a slice
807
+ // into another array.
808
+ OwnedRef start(PyLong_FromLongLong(arr.value_offset(i) + chunk_offset -
809
+ arr.value_offset(0)));
810
+ OwnedRef end(PyLong_FromLongLong(arr.value_offset(i + 1) + chunk_offset -
811
+ arr.value_offset(0)));
812
+ OwnedRef slice(PySlice_New(start.obj(), end.obj(), nullptr));
813
+
814
+ if (ARROW_PREDICT_FALSE(slice.obj() == nullptr)) {
815
+ // Fall out of loop, will return from RETURN_IF_PYERROR
816
+ break;
817
+ }
818
+ *out_values = PyObject_GetItem(numpy_array, slice.obj());
819
+
820
+ if (*out_values == nullptr) {
821
+ // Fall out of loop, will return from RETURN_IF_PYERROR
822
+ break;
823
+ }
824
+ }
825
+ ++out_values;
826
+ }
827
+ RETURN_IF_PYERROR();
828
+
829
+ chunk_offset += arr.value_offset(arr.length()) - arr.value_offset(0);
830
+ }
831
+
832
+ return Status::OK();
833
+ }
834
+
835
+ // TODO GH-40579: optimize ListView conversion to avoid unnecessary copies
836
+ template <typename T>
837
+ enable_if_list_view<T, Status> ConvertListsLike(PandasOptions options,
838
+ const ChunkedArray& data,
839
+ PyObject** out_values) {
840
+ using ListViewArrayType = typename TypeTraits<T>::ArrayType;
841
+ using NonViewType =
842
+ std::conditional_t<T::type_id == Type::LIST_VIEW, ListType, LargeListType>;
843
+ using NonViewClass = typename TypeTraits<NonViewType>::ArrayType;
844
+ ArrayVector list_arrays;
845
+ for (int c = 0; c < data.num_chunks(); c++) {
846
+ const auto& arr = checked_cast<const ListViewArrayType&>(*data.chunk(c));
847
+ ARROW_ASSIGN_OR_RAISE(auto non_view_array,
848
+ NonViewClass::FromListView(arr, options.pool));
849
+ list_arrays.emplace_back(non_view_array);
850
+ }
851
+ auto chunked_array = std::make_shared<ChunkedArray>(list_arrays);
852
+ return ConvertListsLike<NonViewType>(options, *chunked_array, out_values);
853
+ }
854
+
855
+ template <typename F1, typename F2, typename F3>
856
+ Status ConvertMapHelper(F1 resetRow, F2 addPairToRow, F3 stealRow,
857
+ const ChunkedArray& data, PyArrayObject* py_keys,
858
+ PyArrayObject* py_items,
859
+ // needed for null checks in items
860
+ const std::vector<std::shared_ptr<Array>> item_arrays,
861
+ PyObject** out_values) {
862
+ OwnedRef key_value;
863
+ OwnedRef item_value;
864
+
865
+ int64_t chunk_offset = 0;
866
+ for (int c = 0; c < data.num_chunks(); ++c) {
867
+ const auto& arr = checked_cast<const MapArray&>(*data.chunk(c));
868
+ const bool has_nulls = data.null_count() > 0;
869
+
870
+ // Make a list of key/item pairs for each row in array
871
+ for (int64_t i = 0; i < arr.length(); ++i) {
872
+ if (has_nulls && arr.IsNull(i)) {
873
+ Py_INCREF(Py_None);
874
+ *out_values = Py_None;
875
+ } else {
876
+ int64_t entry_offset = arr.value_offset(i);
877
+ int64_t num_pairs = arr.value_offset(i + 1) - entry_offset;
878
+
879
+ // Build the new list object for the row of Python pairs
880
+ RETURN_NOT_OK(resetRow(num_pairs));
881
+
882
+ // Add each key/item pair in the row
883
+ for (int64_t j = 0; j < num_pairs; ++j) {
884
+ // Get key value, key is non-nullable for a valid row
885
+ auto ptr_key = reinterpret_cast<const char*>(
886
+ PyArray_GETPTR1(py_keys, chunk_offset + entry_offset + j));
887
+ key_value.reset(PyArray_GETITEM(py_keys, ptr_key));
888
+ RETURN_IF_PYERROR();
889
+
890
+ if (item_arrays[c]->IsNull(entry_offset + j)) {
891
+ // Translate the Null to a None
892
+ Py_INCREF(Py_None);
893
+ item_value.reset(Py_None);
894
+ } else {
895
+ // Get valid value from item array
896
+ auto ptr_item = reinterpret_cast<const char*>(
897
+ PyArray_GETPTR1(py_items, chunk_offset + entry_offset + j));
898
+ item_value.reset(PyArray_GETITEM(py_items, ptr_item));
899
+ RETURN_IF_PYERROR();
900
+ }
901
+
902
+ // Add the key/item pair to the row
903
+ RETURN_NOT_OK(addPairToRow(j, key_value, item_value));
904
+ }
905
+
906
+ // Pass ownership to the resulting array
907
+ *out_values = stealRow();
908
+ }
909
+ ++out_values;
910
+ }
911
+ RETURN_IF_PYERROR();
912
+
913
+ chunk_offset += arr.values()->length();
914
+ }
915
+
916
+ return Status::OK();
917
+ }
918
+
919
+ // A more helpful error message around TypeErrors that may stem from unhashable keys
920
+ Status CheckMapAsPydictsTypeError() {
921
+ if (ARROW_PREDICT_TRUE(!PyErr_Occurred())) {
922
+ return Status::OK();
923
+ }
924
+ if (PyErr_ExceptionMatches(PyExc_TypeError)) {
925
+ // Modify the error string directly, so it is re-raised
926
+ // with our additional info.
927
+ //
928
+ // There are not many interesting things happening when this
929
+ // is hit. This is intended to only be called directly after
930
+ // PyDict_SetItem, where a finite set of errors could occur.
931
+ PyObject *type, *value, *traceback;
932
+ PyErr_Fetch(&type, &value, &traceback);
933
+ std::string message;
934
+ RETURN_NOT_OK(internal::PyObject_StdStringStr(value, &message));
935
+ message +=
936
+ ". If keys are not hashable, then you must use the option "
937
+ "[maps_as_pydicts=None (default)]";
938
+
939
+ // resets the error
940
+ PyErr_SetString(PyExc_TypeError, message.c_str());
941
+ }
942
+ return ConvertPyError();
943
+ }
944
+
945
+ Status CheckForDuplicateKeys(bool error_on_duplicate_keys, Py_ssize_t total_dict_len,
946
+ Py_ssize_t total_raw_len) {
947
+ if (total_dict_len < total_raw_len) {
948
+ const char* message =
949
+ "[maps_as_pydicts] "
950
+ "After conversion of Arrow maps to pydicts, "
951
+ "detected data loss due to duplicate keys. "
952
+ "Original input length is [%lld], total converted pydict length is [%lld].";
953
+ std::array<char, 256> buf;
954
+ std::snprintf(buf.data(), buf.size(), message, total_raw_len, total_dict_len);
955
+
956
+ if (error_on_duplicate_keys) {
957
+ return Status::UnknownError(buf.data());
958
+ } else {
959
+ ARROW_LOG(WARNING) << buf.data();
960
+ }
961
+ }
962
+ return Status::OK();
963
+ }
964
+
965
+ Status ConvertMap(PandasOptions options, const ChunkedArray& data,
966
+ PyObject** out_values) {
967
+ // Get columns of underlying key/item arrays
968
+ std::vector<std::shared_ptr<Array>> key_arrays;
969
+ std::vector<std::shared_ptr<Array>> item_arrays;
970
+ for (int c = 0; c < data.num_chunks(); ++c) {
971
+ const auto& map_arr = checked_cast<const MapArray&>(*data.chunk(c));
972
+ key_arrays.emplace_back(map_arr.keys());
973
+ item_arrays.emplace_back(map_arr.items());
974
+ }
975
+
976
+ const auto& map_type = checked_cast<const MapType&>(*data.type());
977
+ auto key_type = map_type.key_type();
978
+ auto item_type = map_type.item_type();
979
+
980
+ // ARROW-6899: Convert dictionary-encoded children to dense instead of
981
+ // failing below. A more efficient conversion than this could be done later
982
+ if (key_type->id() == Type::DICTIONARY) {
983
+ auto dense_type = checked_cast<const DictionaryType&>(*key_type).value_type();
984
+ RETURN_NOT_OK(DecodeDictionaries(options.pool, dense_type, &key_arrays));
985
+ key_type = dense_type;
986
+ }
987
+ if (item_type->id() == Type::DICTIONARY) {
988
+ auto dense_type = checked_cast<const DictionaryType&>(*item_type).value_type();
989
+ RETURN_NOT_OK(DecodeDictionaries(options.pool, dense_type, &item_arrays));
990
+ item_type = dense_type;
991
+ }
992
+
993
+ // See notes in MakeInnerOptions.
994
+ options = MakeInnerOptions(std::move(options));
995
+ // Don't blindly convert because timestamps in lists are handled differently.
996
+ options.timestamp_as_object = true;
997
+
998
+ auto flat_keys = std::make_shared<ChunkedArray>(key_arrays, key_type);
999
+ auto flat_items = std::make_shared<ChunkedArray>(item_arrays, item_type);
1000
+ OwnedRefNoGIL owned_numpy_keys;
1001
+ RETURN_NOT_OK(
1002
+ ConvertChunkedArrayToPandas(options, flat_keys, nullptr, owned_numpy_keys.ref()));
1003
+ OwnedRefNoGIL owned_numpy_items;
1004
+ RETURN_NOT_OK(
1005
+ ConvertChunkedArrayToPandas(options, flat_items, nullptr, owned_numpy_items.ref()));
1006
+ PyArrayObject* py_keys = reinterpret_cast<PyArrayObject*>(owned_numpy_keys.obj());
1007
+ PyArrayObject* py_items = reinterpret_cast<PyArrayObject*>(owned_numpy_items.obj());
1008
+
1009
+ if (options.maps_as_pydicts == MapConversionType::DEFAULT) {
1010
+ // The default behavior to express an Arrow MAP as a list of [(key, value), ...] pairs
1011
+ OwnedRef list_item;
1012
+ return ConvertMapHelper(
1013
+ [&list_item](int64_t num_pairs) {
1014
+ list_item.reset(PyList_New(num_pairs));
1015
+ return CheckPyError();
1016
+ },
1017
+ [&list_item](int64_t idx, OwnedRef& key_value, OwnedRef& item_value) {
1018
+ PyList_SET_ITEM(list_item.obj(), idx,
1019
+ PyTuple_Pack(2, key_value.obj(), item_value.obj()));
1020
+ return CheckPyError();
1021
+ },
1022
+ [&list_item] { return list_item.detach(); }, data, py_keys, py_items, item_arrays,
1023
+ out_values);
1024
+ } else {
1025
+ // Use a native pydict
1026
+ OwnedRef dict_item;
1027
+ Py_ssize_t total_dict_len{0};
1028
+ Py_ssize_t total_raw_len{0};
1029
+
1030
+ bool error_on_duplicate_keys;
1031
+ if (options.maps_as_pydicts == MapConversionType::LOSSY) {
1032
+ error_on_duplicate_keys = false;
1033
+ } else if (options.maps_as_pydicts == MapConversionType::STRICT_) {
1034
+ error_on_duplicate_keys = true;
1035
+ } else {
1036
+ auto val = std::underlying_type_t<MapConversionType>(options.maps_as_pydicts);
1037
+ return Status::UnknownError("Received unknown option for maps_as_pydicts: " +
1038
+ std::to_string(val));
1039
+ }
1040
+
1041
+ auto status = ConvertMapHelper(
1042
+ [&dict_item, &total_raw_len](int64_t num_pairs) {
1043
+ total_raw_len += num_pairs;
1044
+ dict_item.reset(PyDict_New());
1045
+ return CheckPyError();
1046
+ },
1047
+ [&dict_item]([[maybe_unused]] int64_t idx, OwnedRef& key_value,
1048
+ OwnedRef& item_value) {
1049
+ auto setitem_result =
1050
+ PyDict_SetItem(dict_item.obj(), key_value.obj(), item_value.obj());
1051
+ ARROW_RETURN_NOT_OK(CheckMapAsPydictsTypeError());
1052
+ // returns -1 if there are internal errors around hashing/resizing
1053
+ return setitem_result == 0 ? Status::OK()
1054
+ : Status::UnknownError(
1055
+ "[maps_as_pydicts] "
1056
+ "Unexpected failure inserting Arrow (key, "
1057
+ "value) pair into Python dict");
1058
+ },
1059
+ [&dict_item, &total_dict_len] {
1060
+ total_dict_len += PyDict_Size(dict_item.obj());
1061
+ return dict_item.detach();
1062
+ },
1063
+ data, py_keys, py_items, item_arrays, out_values);
1064
+
1065
+ ARROW_RETURN_NOT_OK(status);
1066
+ // If there were no errors generating the pydicts,
1067
+ // then check if we detected any data loss from duplicate keys.
1068
+ return CheckForDuplicateKeys(error_on_duplicate_keys, total_dict_len, total_raw_len);
1069
+ }
1070
+ }
1071
+
1072
+ template <typename InType, typename OutType>
1073
+ inline void ConvertNumericNullable(const ChunkedArray& data, InType na_value,
1074
+ OutType* out_values) {
1075
+ for (int c = 0; c < data.num_chunks(); c++) {
1076
+ const auto& arr = *data.chunk(c);
1077
+ const InType* in_values = GetPrimitiveValues<InType>(arr);
1078
+
1079
+ if (arr.null_count() > 0) {
1080
+ for (int64_t i = 0; i < arr.length(); ++i) {
1081
+ *out_values++ = arr.IsNull(i) ? na_value : in_values[i];
1082
+ }
1083
+ } else {
1084
+ memcpy(out_values, in_values, sizeof(InType) * arr.length());
1085
+ out_values += arr.length();
1086
+ }
1087
+ }
1088
+ }
1089
+
1090
+ template <typename InType, typename OutType>
1091
+ inline void ConvertNumericNullableCast(const ChunkedArray& data, InType na_value,
1092
+ OutType* out_values) {
1093
+ for (int c = 0; c < data.num_chunks(); c++) {
1094
+ const auto& arr = *data.chunk(c);
1095
+ const InType* in_values = GetPrimitiveValues<InType>(arr);
1096
+
1097
+ for (int64_t i = 0; i < arr.length(); ++i) {
1098
+ *out_values++ = arr.IsNull(i) ? static_cast<OutType>(na_value)
1099
+ : static_cast<OutType>(in_values[i]);
1100
+ }
1101
+ }
1102
+ }
1103
+
1104
+ template <int NPY_TYPE>
1105
+ class TypedPandasWriter : public PandasWriter {
1106
+ public:
1107
+ using T = typename npy_traits<NPY_TYPE>::value_type;
1108
+
1109
+ using PandasWriter::PandasWriter;
1110
+
1111
+ Status TransferSingle(std::shared_ptr<ChunkedArray> data, PyObject* py_ref) override {
1112
+ if (CanZeroCopy(*data)) {
1113
+ PyObject* wrapped;
1114
+ npy_intp dims[2] = {static_cast<npy_intp>(num_columns_),
1115
+ static_cast<npy_intp>(num_rows_)};
1116
+ RETURN_NOT_OK(
1117
+ MakeNumPyView(data->chunk(0), py_ref, NPY_TYPE, /*ndim=*/2, dims, &wrapped));
1118
+ SetBlockData(wrapped);
1119
+ return Status::OK();
1120
+ } else {
1121
+ RETURN_NOT_OK(CheckNotZeroCopyOnly(*data));
1122
+ RETURN_NOT_OK(EnsureAllocated());
1123
+ return CopyInto(data, /*rel_placement=*/0);
1124
+ }
1125
+ }
1126
+
1127
+ Status CheckTypeExact(const DataType& type, Type::type expected) {
1128
+ if (type.id() != expected) {
1129
+ // TODO(wesm): stringify NumPy / pandas type
1130
+ return Status::NotImplemented("Cannot write Arrow data of type ", type.ToString());
1131
+ }
1132
+ return Status::OK();
1133
+ }
1134
+
1135
+ T* GetBlockColumnStart(int64_t rel_placement) {
1136
+ return reinterpret_cast<T*>(block_data_) + rel_placement * num_rows_;
1137
+ }
1138
+
1139
+ protected:
1140
+ Status Allocate() override { return AllocateNDArray(NPY_TYPE); }
1141
+ };
1142
+
1143
+ struct ObjectWriterVisitor {
1144
+ const PandasOptions& options;
1145
+ const ChunkedArray& data;
1146
+ PyObject** out_values;
1147
+
1148
+ Status Visit(const NullType& type) {
1149
+ for (int c = 0; c < data.num_chunks(); c++) {
1150
+ std::shared_ptr<Array> arr = data.chunk(c);
1151
+
1152
+ for (int64_t i = 0; i < arr->length(); ++i) {
1153
+ // All values are null
1154
+ Py_INCREF(Py_None);
1155
+ *out_values = Py_None;
1156
+ ++out_values;
1157
+ }
1158
+ }
1159
+ return Status::OK();
1160
+ }
1161
+
1162
+ Status Visit(const BooleanType& type) {
1163
+ for (int c = 0; c < data.num_chunks(); c++) {
1164
+ const auto& arr = checked_cast<const BooleanArray&>(*data.chunk(c));
1165
+
1166
+ for (int64_t i = 0; i < arr.length(); ++i) {
1167
+ if (arr.IsNull(i)) {
1168
+ Py_INCREF(Py_None);
1169
+ *out_values++ = Py_None;
1170
+ } else if (arr.Value(i)) {
1171
+ // True
1172
+ Py_INCREF(Py_True);
1173
+ *out_values++ = Py_True;
1174
+ } else {
1175
+ // False
1176
+ Py_INCREF(Py_False);
1177
+ *out_values++ = Py_False;
1178
+ }
1179
+ }
1180
+ }
1181
+ return Status::OK();
1182
+ }
1183
+
1184
+ template <typename Type>
1185
+ enable_if_integer<Type, Status> Visit(const Type& type) {
1186
+ using T = typename Type::c_type;
1187
+ auto WrapValue = [](T value, PyObject** out) {
1188
+ *out = std::is_signed<T>::value ? PyLong_FromLongLong(value)
1189
+ : PyLong_FromUnsignedLongLong(value);
1190
+ RETURN_IF_PYERROR();
1191
+ return Status::OK();
1192
+ };
1193
+ return ConvertAsPyObjects<Type>(options, data, WrapValue, out_values);
1194
+ }
1195
+
1196
+ template <typename Type>
1197
+ enable_if_t<is_base_binary_type<Type>::value || is_binary_view_like_type<Type>::value ||
1198
+ is_fixed_size_binary_type<Type>::value,
1199
+ Status>
1200
+ Visit(const Type& type) {
1201
+ auto WrapValue = [](const std::string_view& view, PyObject** out) {
1202
+ *out = WrapBytes<Type>::Wrap(view.data(), view.length());
1203
+ if (*out == nullptr) {
1204
+ PyErr_Clear();
1205
+ return Status::UnknownError("Wrapping ", view, " failed");
1206
+ }
1207
+ return Status::OK();
1208
+ };
1209
+ return ConvertAsPyObjects<Type>(options, data, WrapValue, out_values);
1210
+ }
1211
+
1212
+ template <typename Type>
1213
+ enable_if_date<Type, Status> Visit(const Type& type) {
1214
+ auto WrapValue = [](typename Type::c_type value, PyObject** out) {
1215
+ RETURN_NOT_OK(internal::PyDate_from_int(value, Type::UNIT, out));
1216
+ RETURN_IF_PYERROR();
1217
+ return Status::OK();
1218
+ };
1219
+ return ConvertAsPyObjects<Type>(options, data, WrapValue, out_values);
1220
+ }
1221
+
1222
+ template <typename Type>
1223
+ enable_if_time<Type, Status> Visit(const Type& type) {
1224
+ const TimeUnit::type unit = type.unit();
1225
+ auto WrapValue = [unit](typename Type::c_type value, PyObject** out) {
1226
+ RETURN_NOT_OK(internal::PyTime_from_int(value, unit, out));
1227
+ RETURN_IF_PYERROR();
1228
+ return Status::OK();
1229
+ };
1230
+ return ConvertAsPyObjects<Type>(options, data, WrapValue, out_values);
1231
+ }
1232
+
1233
+ template <typename Type>
1234
+ enable_if_timestamp<Type, Status> Visit(const Type& type) {
1235
+ const TimeUnit::type unit = type.unit();
1236
+ OwnedRef tzinfo;
1237
+
1238
+ auto ConvertTimezoneNaive = [&](typename Type::c_type value, PyObject** out) {
1239
+ RETURN_NOT_OK(internal::PyDateTime_from_int(value, unit, out));
1240
+ RETURN_IF_PYERROR();
1241
+ return Status::OK();
1242
+ };
1243
+ auto ConvertTimezoneAware = [&](typename Type::c_type value, PyObject** out) {
1244
+ PyObject* naive_datetime;
1245
+ RETURN_NOT_OK(ConvertTimezoneNaive(value, &naive_datetime));
1246
+
1247
+ // convert the timezone naive datetime object to timezone aware
1248
+ // two step conversion of the datetime mimics Python's code:
1249
+ // dt.replace(tzinfo=datetime.timezone.utc).astimezone(tzinfo)
1250
+ // first step: replacing timezone with timezone.utc (replace method)
1251
+ OwnedRef args(PyTuple_New(0));
1252
+ OwnedRef keywords(PyDict_New());
1253
+ PyDict_SetItemString(keywords.obj(), "tzinfo", PyDateTime_TimeZone_UTC);
1254
+ OwnedRef naive_datetime_replace(PyObject_GetAttrString(naive_datetime, "replace"));
1255
+ OwnedRef datetime_utc(
1256
+ PyObject_Call(naive_datetime_replace.obj(), args.obj(), keywords.obj()));
1257
+ // second step: adjust the datetime to tzinfo timezone (astimezone method)
1258
+ *out = PyObject_CallMethod(datetime_utc.obj(), "astimezone", "O", tzinfo.obj());
1259
+
1260
+ // the timezone naive object is no longer required
1261
+ Py_DECREF(naive_datetime);
1262
+ RETURN_IF_PYERROR();
1263
+
1264
+ return Status::OK();
1265
+ };
1266
+
1267
+ if (!type.timezone().empty() && !options.ignore_timezone) {
1268
+ // convert timezone aware
1269
+ PyObject* tzobj;
1270
+ ARROW_ASSIGN_OR_RAISE(tzobj, internal::StringToTzinfo(type.timezone()));
1271
+ tzinfo.reset(tzobj);
1272
+ RETURN_IF_PYERROR();
1273
+ RETURN_NOT_OK(
1274
+ ConvertAsPyObjects<Type>(options, data, ConvertTimezoneAware, out_values));
1275
+ } else {
1276
+ // convert timezone naive
1277
+ RETURN_NOT_OK(
1278
+ ConvertAsPyObjects<Type>(options, data, ConvertTimezoneNaive, out_values));
1279
+ }
1280
+
1281
+ return Status::OK();
1282
+ }
1283
+
1284
+ template <typename Type>
1285
+ enable_if_t<std::is_same<Type, MonthDayNanoIntervalType>::value, Status> Visit(
1286
+ const Type& type) {
1287
+ OwnedRef args(PyTuple_New(0));
1288
+ OwnedRef kwargs(PyDict_New());
1289
+ RETURN_IF_PYERROR();
1290
+ auto to_date_offset = [&](const MonthDayNanoIntervalType::MonthDayNanos& interval,
1291
+ PyObject** out) {
1292
+ DCHECK(internal::BorrowPandasDataOffsetType() != nullptr);
1293
+ // DateOffset objects do not add nanoseconds component to pd.Timestamp.
1294
+ // as of Pandas 1.3.3
1295
+ // (https://github.com/pandas-dev/pandas/issues/43892).
1296
+ // So convert microseconds and remainder to preserve data
1297
+ // but give users more expected results.
1298
+ int64_t microseconds = interval.nanoseconds / 1000;
1299
+ int64_t nanoseconds;
1300
+ if (interval.nanoseconds >= 0) {
1301
+ nanoseconds = interval.nanoseconds % 1000;
1302
+ } else {
1303
+ nanoseconds = -((-interval.nanoseconds) % 1000);
1304
+ }
1305
+
1306
+ PyDict_SetItemString(kwargs.obj(), "months", PyLong_FromLong(interval.months));
1307
+ PyDict_SetItemString(kwargs.obj(), "days", PyLong_FromLong(interval.days));
1308
+ PyDict_SetItemString(kwargs.obj(), "microseconds",
1309
+ PyLong_FromLongLong(microseconds));
1310
+ PyDict_SetItemString(kwargs.obj(), "nanoseconds", PyLong_FromLongLong(nanoseconds));
1311
+ *out =
1312
+ PyObject_Call(internal::BorrowPandasDataOffsetType(), args.obj(), kwargs.obj());
1313
+ RETURN_IF_PYERROR();
1314
+ return Status::OK();
1315
+ };
1316
+ return ConvertAsPyObjects<MonthDayNanoIntervalType>(options, data, to_date_offset,
1317
+ out_values);
1318
+ }
1319
+
1320
+ Status Visit(const Decimal128Type& type) {
1321
+ OwnedRef decimal;
1322
+ OwnedRef Decimal;
1323
+ RETURN_NOT_OK(internal::ImportModule("decimal", &decimal));
1324
+ RETURN_NOT_OK(internal::ImportFromModule(decimal.obj(), "Decimal", &Decimal));
1325
+ PyObject* decimal_constructor = Decimal.obj();
1326
+
1327
+ for (int c = 0; c < data.num_chunks(); c++) {
1328
+ const auto& arr = checked_cast<const arrow::Decimal128Array&>(*data.chunk(c));
1329
+
1330
+ for (int64_t i = 0; i < arr.length(); ++i) {
1331
+ if (arr.IsNull(i)) {
1332
+ Py_INCREF(Py_None);
1333
+ *out_values++ = Py_None;
1334
+ } else {
1335
+ *out_values++ =
1336
+ internal::DecimalFromString(decimal_constructor, arr.FormatValue(i));
1337
+ RETURN_IF_PYERROR();
1338
+ }
1339
+ }
1340
+ }
1341
+
1342
+ return Status::OK();
1343
+ }
1344
+
1345
+ Status Visit(const Decimal256Type& type) {
1346
+ OwnedRef decimal;
1347
+ OwnedRef Decimal;
1348
+ RETURN_NOT_OK(internal::ImportModule("decimal", &decimal));
1349
+ RETURN_NOT_OK(internal::ImportFromModule(decimal.obj(), "Decimal", &Decimal));
1350
+ PyObject* decimal_constructor = Decimal.obj();
1351
+
1352
+ for (int c = 0; c < data.num_chunks(); c++) {
1353
+ const auto& arr = checked_cast<const arrow::Decimal256Array&>(*data.chunk(c));
1354
+
1355
+ for (int64_t i = 0; i < arr.length(); ++i) {
1356
+ if (arr.IsNull(i)) {
1357
+ Py_INCREF(Py_None);
1358
+ *out_values++ = Py_None;
1359
+ } else {
1360
+ *out_values++ =
1361
+ internal::DecimalFromString(decimal_constructor, arr.FormatValue(i));
1362
+ RETURN_IF_PYERROR();
1363
+ }
1364
+ }
1365
+ }
1366
+
1367
+ return Status::OK();
1368
+ }
1369
+
1370
+ template <typename T>
1371
+ enable_if_t<is_list_like_type<T>::value || is_list_view_type<T>::value, Status> Visit(
1372
+ const T& type) {
1373
+ if (!ListTypeSupported(*type.value_type())) {
1374
+ return Status::NotImplemented(
1375
+ "Not implemented type for conversion from List to Pandas: ",
1376
+ type.value_type()->ToString());
1377
+ }
1378
+ return ConvertListsLike<T>(options, data, out_values);
1379
+ }
1380
+
1381
+ Status Visit(const MapType& type) { return ConvertMap(options, data, out_values); }
1382
+
1383
+ Status Visit(const StructType& type) {
1384
+ return ConvertStruct(options, data, out_values);
1385
+ }
1386
+
1387
+ template <typename Type>
1388
+ enable_if_t<is_floating_type<Type>::value ||
1389
+ std::is_same<DictionaryType, Type>::value ||
1390
+ std::is_same<DurationType, Type>::value ||
1391
+ std::is_same<RunEndEncodedType, Type>::value ||
1392
+ std::is_same<ExtensionType, Type>::value ||
1393
+ (std::is_base_of<IntervalType, Type>::value &&
1394
+ !std::is_same<MonthDayNanoIntervalType, Type>::value) ||
1395
+ std::is_base_of<UnionType, Type>::value,
1396
+ Status>
1397
+ Visit(const Type& type) {
1398
+ return Status::NotImplemented("No implemented conversion to object dtype: ",
1399
+ type.ToString());
1400
+ }
1401
+ };
1402
+
1403
+ class ObjectWriter : public TypedPandasWriter<NPY_OBJECT> {
1404
+ public:
1405
+ using TypedPandasWriter<NPY_OBJECT>::TypedPandasWriter;
1406
+ Status CopyInto(std::shared_ptr<ChunkedArray> data, int64_t rel_placement) override {
1407
+ PyAcquireGIL lock;
1408
+ ObjectWriterVisitor visitor{this->options_, *data,
1409
+ this->GetBlockColumnStart(rel_placement)};
1410
+ return VisitTypeInline(*data->type(), &visitor);
1411
+ }
1412
+ };
1413
+
1414
+ static inline bool IsNonNullContiguous(const ChunkedArray& data) {
1415
+ return data.num_chunks() == 1 && data.null_count() == 0;
1416
+ }
1417
+
1418
+ template <int NPY_TYPE>
1419
+ class IntWriter : public TypedPandasWriter<NPY_TYPE> {
1420
+ public:
1421
+ using ArrowType = typename npy_traits<NPY_TYPE>::TypeClass;
1422
+ using TypedPandasWriter<NPY_TYPE>::TypedPandasWriter;
1423
+
1424
+ bool CanZeroCopy(const ChunkedArray& data) const override {
1425
+ return IsNonNullContiguous(data);
1426
+ }
1427
+
1428
+ Status CopyInto(std::shared_ptr<ChunkedArray> data, int64_t rel_placement) override {
1429
+ RETURN_NOT_OK(this->CheckTypeExact(*data->type(), ArrowType::type_id));
1430
+ ConvertIntegerNoNullsSameType<typename ArrowType::c_type>(
1431
+ this->options_, *data, this->GetBlockColumnStart(rel_placement));
1432
+ return Status::OK();
1433
+ }
1434
+ };
1435
+
1436
+ template <int NPY_TYPE>
1437
+ class FloatWriter : public TypedPandasWriter<NPY_TYPE> {
1438
+ public:
1439
+ using ArrowType = typename npy_traits<NPY_TYPE>::TypeClass;
1440
+ using TypedPandasWriter<NPY_TYPE>::TypedPandasWriter;
1441
+ using T = typename ArrowType::c_type;
1442
+
1443
+ bool CanZeroCopy(const ChunkedArray& data) const override {
1444
+ return IsNonNullContiguous(data) && data.type()->id() == ArrowType::type_id;
1445
+ }
1446
+
1447
+ Status CopyInto(std::shared_ptr<ChunkedArray> data, int64_t rel_placement) override {
1448
+ Type::type in_type = data->type()->id();
1449
+ auto out_values = this->GetBlockColumnStart(rel_placement);
1450
+
1451
+ #define INTEGER_CASE(IN_TYPE) \
1452
+ ConvertIntegerWithNulls<IN_TYPE, T>(this->options_, *data, out_values); \
1453
+ break;
1454
+
1455
+ switch (in_type) {
1456
+ case Type::UINT8:
1457
+ INTEGER_CASE(uint8_t);
1458
+ case Type::INT8:
1459
+ INTEGER_CASE(int8_t);
1460
+ case Type::UINT16:
1461
+ INTEGER_CASE(uint16_t);
1462
+ case Type::INT16:
1463
+ INTEGER_CASE(int16_t);
1464
+ case Type::UINT32:
1465
+ INTEGER_CASE(uint32_t);
1466
+ case Type::INT32:
1467
+ INTEGER_CASE(int32_t);
1468
+ case Type::UINT64:
1469
+ INTEGER_CASE(uint64_t);
1470
+ case Type::INT64:
1471
+ INTEGER_CASE(int64_t);
1472
+ case Type::HALF_FLOAT:
1473
+ ConvertNumericNullableCast(*data, npy_traits<NPY_TYPE>::na_sentinel, out_values);
1474
+ case Type::FLOAT:
1475
+ ConvertNumericNullableCast(*data, npy_traits<NPY_TYPE>::na_sentinel, out_values);
1476
+ break;
1477
+ case Type::DOUBLE:
1478
+ ConvertNumericNullableCast(*data, npy_traits<NPY_TYPE>::na_sentinel, out_values);
1479
+ break;
1480
+ default:
1481
+ return Status::NotImplemented("Cannot write Arrow data of type ",
1482
+ data->type()->ToString(),
1483
+ " to a Pandas floating point block");
1484
+ }
1485
+
1486
+ #undef INTEGER_CASE
1487
+
1488
+ return Status::OK();
1489
+ }
1490
+ };
1491
+
1492
+ using UInt8Writer = IntWriter<NPY_UINT8>;
1493
+ using Int8Writer = IntWriter<NPY_INT8>;
1494
+ using UInt16Writer = IntWriter<NPY_UINT16>;
1495
+ using Int16Writer = IntWriter<NPY_INT16>;
1496
+ using UInt32Writer = IntWriter<NPY_UINT32>;
1497
+ using Int32Writer = IntWriter<NPY_INT32>;
1498
+ using UInt64Writer = IntWriter<NPY_UINT64>;
1499
+ using Int64Writer = IntWriter<NPY_INT64>;
1500
+ using Float16Writer = FloatWriter<NPY_FLOAT16>;
1501
+ using Float32Writer = FloatWriter<NPY_FLOAT32>;
1502
+ using Float64Writer = FloatWriter<NPY_FLOAT64>;
1503
+
1504
+ class BoolWriter : public TypedPandasWriter<NPY_BOOL> {
1505
+ public:
1506
+ using TypedPandasWriter<NPY_BOOL>::TypedPandasWriter;
1507
+
1508
+ Status TransferSingle(std::shared_ptr<ChunkedArray> data, PyObject* py_ref) override {
1509
+ RETURN_NOT_OK(
1510
+ CheckNoZeroCopy("Zero copy conversions not possible with "
1511
+ "boolean types"));
1512
+ RETURN_NOT_OK(EnsureAllocated());
1513
+ return CopyInto(data, /*rel_placement=*/0);
1514
+ }
1515
+
1516
+ Status CopyInto(std::shared_ptr<ChunkedArray> data, int64_t rel_placement) override {
1517
+ RETURN_NOT_OK(this->CheckTypeExact(*data->type(), Type::BOOL));
1518
+ auto out_values = this->GetBlockColumnStart(rel_placement);
1519
+ for (int c = 0; c < data->num_chunks(); c++) {
1520
+ const auto& arr = checked_cast<const BooleanArray&>(*data->chunk(c));
1521
+ for (int64_t i = 0; i < arr.length(); ++i) {
1522
+ *out_values++ = static_cast<uint8_t>(arr.Value(i));
1523
+ }
1524
+ }
1525
+ return Status::OK();
1526
+ }
1527
+ };
1528
+
1529
+ // ----------------------------------------------------------------------
1530
+ // Date / timestamp types
1531
+
1532
+ template <typename T, int64_t SHIFT>
1533
+ inline void ConvertDatetime(const ChunkedArray& data, int64_t* out_values) {
1534
+ for (int c = 0; c < data.num_chunks(); c++) {
1535
+ const auto& arr = *data.chunk(c);
1536
+ const T* in_values = GetPrimitiveValues<T>(arr);
1537
+
1538
+ for (int64_t i = 0; i < arr.length(); ++i) {
1539
+ *out_values++ = arr.IsNull(i) ? kPandasTimestampNull
1540
+ : (static_cast<int64_t>(in_values[i]) * SHIFT);
1541
+ }
1542
+ }
1543
+ }
1544
+
1545
+ template <typename T, int SHIFT>
1546
+ void ConvertDatesShift(const ChunkedArray& data, int64_t* out_values) {
1547
+ for (int c = 0; c < data.num_chunks(); c++) {
1548
+ const auto& arr = *data.chunk(c);
1549
+ const T* in_values = GetPrimitiveValues<T>(arr);
1550
+ for (int64_t i = 0; i < arr.length(); ++i) {
1551
+ *out_values++ = arr.IsNull(i) ? kPandasTimestampNull
1552
+ : static_cast<int64_t>(in_values[i]) / SHIFT;
1553
+ }
1554
+ }
1555
+ }
1556
+
1557
+ class DatetimeDayWriter : public TypedPandasWriter<NPY_DATETIME> {
1558
+ public:
1559
+ using TypedPandasWriter<NPY_DATETIME>::TypedPandasWriter;
1560
+
1561
+ Status CopyInto(std::shared_ptr<ChunkedArray> data, int64_t rel_placement) override {
1562
+ int64_t* out_values = this->GetBlockColumnStart(rel_placement);
1563
+ const auto& type = checked_cast<const DateType&>(*data->type());
1564
+ switch (type.unit()) {
1565
+ case DateUnit::DAY:
1566
+ ConvertDatesShift<int32_t, 1LL>(*data, out_values);
1567
+ break;
1568
+ case DateUnit::MILLI:
1569
+ ConvertDatesShift<int64_t, 86400000LL>(*data, out_values);
1570
+ break;
1571
+ }
1572
+ return Status::OK();
1573
+ }
1574
+
1575
+ protected:
1576
+ Status Allocate() override {
1577
+ RETURN_NOT_OK(this->AllocateNDArray(NPY_DATETIME));
1578
+ SetDatetimeUnit(NPY_FR_D);
1579
+ return Status::OK();
1580
+ }
1581
+ };
1582
+
1583
+ template <TimeUnit::type UNIT>
1584
+ class DatetimeWriter : public TypedPandasWriter<NPY_DATETIME> {
1585
+ public:
1586
+ using TypedPandasWriter<NPY_DATETIME>::TypedPandasWriter;
1587
+
1588
+ bool CanZeroCopy(const ChunkedArray& data) const override {
1589
+ if (data.type()->id() == Type::TIMESTAMP) {
1590
+ const auto& type = checked_cast<const TimestampType&>(*data.type());
1591
+ return IsNonNullContiguous(data) && type.unit() == UNIT;
1592
+ } else {
1593
+ return false;
1594
+ }
1595
+ }
1596
+
1597
+ Status CopyInto(std::shared_ptr<ChunkedArray> data, int64_t rel_placement) override {
1598
+ const auto& ts_type = checked_cast<const TimestampType&>(*data->type());
1599
+ DCHECK_EQ(UNIT, ts_type.unit()) << "Should only call instances of this writer "
1600
+ << "with arrays of the correct unit";
1601
+ ConvertNumericNullable<int64_t>(*data, kPandasTimestampNull,
1602
+ this->GetBlockColumnStart(rel_placement));
1603
+ return Status::OK();
1604
+ }
1605
+
1606
+ protected:
1607
+ Status Allocate() override {
1608
+ RETURN_NOT_OK(this->AllocateNDArray(NPY_DATETIME));
1609
+ SetDatetimeUnit(internal::NumPyFrequency(UNIT));
1610
+ return Status::OK();
1611
+ }
1612
+ };
1613
+
1614
+ using DatetimeSecondWriter = DatetimeWriter<TimeUnit::SECOND>;
1615
+
1616
+ class DatetimeMilliWriter : public DatetimeWriter<TimeUnit::MILLI> {
1617
+ public:
1618
+ using DatetimeWriter<TimeUnit::MILLI>::DatetimeWriter;
1619
+
1620
+ Status CopyInto(std::shared_ptr<ChunkedArray> data, int64_t rel_placement) override {
1621
+ Type::type type = data->type()->id();
1622
+ int64_t* out_values = this->GetBlockColumnStart(rel_placement);
1623
+ if (type == Type::DATE32) {
1624
+ // Convert from days since epoch to datetime64[ms]
1625
+ ConvertDatetime<int32_t, 86400000L>(*data, out_values);
1626
+ } else if (type == Type::DATE64) {
1627
+ ConvertNumericNullable<int64_t>(*data, kPandasTimestampNull, out_values);
1628
+ } else {
1629
+ const auto& ts_type = checked_cast<const TimestampType&>(*data->type());
1630
+ DCHECK_EQ(TimeUnit::MILLI, ts_type.unit())
1631
+ << "Should only call instances of this writer "
1632
+ << "with arrays of the correct unit";
1633
+ ConvertNumericNullable<int64_t>(*data, kPandasTimestampNull, out_values);
1634
+ }
1635
+ return Status::OK();
1636
+ }
1637
+ };
1638
+
1639
+ using DatetimeMicroWriter = DatetimeWriter<TimeUnit::MICRO>;
1640
+
1641
+ class DatetimeNanoWriter : public DatetimeWriter<TimeUnit::NANO> {
1642
+ public:
1643
+ using DatetimeWriter<TimeUnit::NANO>::DatetimeWriter;
1644
+
1645
+ Status CopyInto(std::shared_ptr<ChunkedArray> data, int64_t rel_placement) override {
1646
+ Type::type type = data->type()->id();
1647
+ int64_t* out_values = this->GetBlockColumnStart(rel_placement);
1648
+ compute::ExecContext ctx(options_.pool);
1649
+ compute::CastOptions options;
1650
+ if (options_.safe_cast) {
1651
+ options = compute::CastOptions::Safe();
1652
+ } else {
1653
+ options = compute::CastOptions::Unsafe();
1654
+ }
1655
+ Datum out;
1656
+ auto target_type = timestamp(TimeUnit::NANO);
1657
+
1658
+ if (type == Type::DATE32) {
1659
+ // Convert from days since epoch to datetime64[ns]
1660
+ ConvertDatetime<int32_t, kNanosecondsInDay>(*data, out_values);
1661
+ } else if (type == Type::DATE64) {
1662
+ // Date64Type is millisecond timestamp stored as int64_t
1663
+ // TODO(wesm): Do we want to make sure to zero out the milliseconds?
1664
+ ConvertDatetime<int64_t, 1000000L>(*data, out_values);
1665
+ } else if (type == Type::TIMESTAMP) {
1666
+ const auto& ts_type = checked_cast<const TimestampType&>(*data->type());
1667
+
1668
+ if (ts_type.unit() == TimeUnit::NANO) {
1669
+ ConvertNumericNullable<int64_t>(*data, kPandasTimestampNull, out_values);
1670
+ } else if (ts_type.unit() == TimeUnit::MICRO || ts_type.unit() == TimeUnit::MILLI ||
1671
+ ts_type.unit() == TimeUnit::SECOND) {
1672
+ ARROW_ASSIGN_OR_RAISE(out, compute::Cast(data, target_type, options, &ctx));
1673
+ ConvertNumericNullable<int64_t>(*out.chunked_array(), kPandasTimestampNull,
1674
+ out_values);
1675
+ } else {
1676
+ return Status::NotImplemented("Unsupported time unit");
1677
+ }
1678
+ } else {
1679
+ return Status::NotImplemented("Cannot write Arrow data of type ",
1680
+ data->type()->ToString(),
1681
+ " to a Pandas datetime block.");
1682
+ }
1683
+ return Status::OK();
1684
+ }
1685
+ };
1686
+
1687
+ template <typename BASE>
1688
+ class DatetimeTZWriter : public BASE {
1689
+ public:
1690
+ DatetimeTZWriter(const PandasOptions& options, const std::string& timezone,
1691
+ int64_t num_rows)
1692
+ : BASE(options, num_rows, 1), timezone_(timezone) {}
1693
+
1694
+ protected:
1695
+ Status GetResultBlock(PyObject** out) override {
1696
+ RETURN_NOT_OK(this->MakeBlock1D());
1697
+ *out = this->block_arr_.obj();
1698
+ return Status::OK();
1699
+ }
1700
+
1701
+ Status AddResultMetadata(PyObject* result) override {
1702
+ PyObject* py_tz = PyUnicode_FromStringAndSize(
1703
+ timezone_.c_str(), static_cast<Py_ssize_t>(timezone_.size()));
1704
+ RETURN_IF_PYERROR();
1705
+ PyDict_SetItemString(result, "timezone", py_tz);
1706
+ Py_DECREF(py_tz);
1707
+ return Status::OK();
1708
+ }
1709
+
1710
+ private:
1711
+ std::string timezone_;
1712
+ };
1713
+
1714
+ using DatetimeSecondTZWriter = DatetimeTZWriter<DatetimeSecondWriter>;
1715
+ using DatetimeMilliTZWriter = DatetimeTZWriter<DatetimeMilliWriter>;
1716
+ using DatetimeMicroTZWriter = DatetimeTZWriter<DatetimeMicroWriter>;
1717
+ using DatetimeNanoTZWriter = DatetimeTZWriter<DatetimeNanoWriter>;
1718
+
1719
+ template <TimeUnit::type UNIT>
1720
+ class TimedeltaWriter : public TypedPandasWriter<NPY_TIMEDELTA> {
1721
+ public:
1722
+ using TypedPandasWriter<NPY_TIMEDELTA>::TypedPandasWriter;
1723
+
1724
+ Status AllocateTimedelta(int ndim) {
1725
+ RETURN_NOT_OK(this->AllocateNDArray(NPY_TIMEDELTA, ndim));
1726
+ SetDatetimeUnit(internal::NumPyFrequency(UNIT));
1727
+ return Status::OK();
1728
+ }
1729
+
1730
+ bool CanZeroCopy(const ChunkedArray& data) const override {
1731
+ const auto& type = checked_cast<const DurationType&>(*data.type());
1732
+ return IsNonNullContiguous(data) && type.unit() == UNIT;
1733
+ }
1734
+
1735
+ Status CopyInto(std::shared_ptr<ChunkedArray> data, int64_t rel_placement) override {
1736
+ const auto& type = checked_cast<const DurationType&>(*data->type());
1737
+ DCHECK_EQ(UNIT, type.unit()) << "Should only call instances of this writer "
1738
+ << "with arrays of the correct unit";
1739
+ ConvertNumericNullable<int64_t>(*data, kPandasTimestampNull,
1740
+ this->GetBlockColumnStart(rel_placement));
1741
+ return Status::OK();
1742
+ }
1743
+
1744
+ protected:
1745
+ Status Allocate() override { return AllocateTimedelta(2); }
1746
+ };
1747
+
1748
+ using TimedeltaSecondWriter = TimedeltaWriter<TimeUnit::SECOND>;
1749
+ using TimedeltaMilliWriter = TimedeltaWriter<TimeUnit::MILLI>;
1750
+ using TimedeltaMicroWriter = TimedeltaWriter<TimeUnit::MICRO>;
1751
+
1752
+ class TimedeltaNanoWriter : public TimedeltaWriter<TimeUnit::NANO> {
1753
+ public:
1754
+ using TimedeltaWriter<TimeUnit::NANO>::TimedeltaWriter;
1755
+
1756
+ Status CopyInto(std::shared_ptr<ChunkedArray> data, int64_t rel_placement) override {
1757
+ Type::type type = data->type()->id();
1758
+ int64_t* out_values = this->GetBlockColumnStart(rel_placement);
1759
+ if (type == Type::DURATION) {
1760
+ const auto& ts_type = checked_cast<const DurationType&>(*data->type());
1761
+ if (ts_type.unit() == TimeUnit::NANO) {
1762
+ ConvertNumericNullable<int64_t>(*data, kPandasTimestampNull, out_values);
1763
+ } else if (ts_type.unit() == TimeUnit::MICRO) {
1764
+ ConvertDatetime<int64_t, 1000L>(*data, out_values);
1765
+ } else if (ts_type.unit() == TimeUnit::MILLI) {
1766
+ ConvertDatetime<int64_t, 1000000L>(*data, out_values);
1767
+ } else if (ts_type.unit() == TimeUnit::SECOND) {
1768
+ ConvertDatetime<int64_t, 1000000000L>(*data, out_values);
1769
+ } else {
1770
+ return Status::NotImplemented("Unsupported time unit");
1771
+ }
1772
+ } else {
1773
+ return Status::NotImplemented("Cannot write Arrow data of type ",
1774
+ data->type()->ToString(),
1775
+ " to a Pandas timedelta block.");
1776
+ }
1777
+ return Status::OK();
1778
+ }
1779
+ };
1780
+
1781
+ Status MakeZeroLengthArray(const std::shared_ptr<DataType>& type,
1782
+ std::shared_ptr<Array>* out) {
1783
+ std::unique_ptr<ArrayBuilder> builder;
1784
+ RETURN_NOT_OK(MakeBuilder(default_memory_pool(), type, &builder));
1785
+ RETURN_NOT_OK(builder->Resize(0));
1786
+ return builder->Finish(out);
1787
+ }
1788
+
1789
+ bool NeedDictionaryUnification(const ChunkedArray& data) {
1790
+ if (data.num_chunks() < 2) {
1791
+ return false;
1792
+ }
1793
+ const auto& arr_first = checked_cast<const DictionaryArray&>(*data.chunk(0));
1794
+ for (int c = 1; c < data.num_chunks(); c++) {
1795
+ const auto& arr = checked_cast<const DictionaryArray&>(*data.chunk(c));
1796
+ if (!(arr_first.dictionary()->Equals(arr.dictionary()))) {
1797
+ return true;
1798
+ }
1799
+ }
1800
+ return false;
1801
+ }
1802
+
1803
+ template <typename IndexType>
1804
+ class CategoricalWriter
1805
+ : public TypedPandasWriter<arrow_traits<IndexType::type_id>::npy_type> {
1806
+ public:
1807
+ using TRAITS = arrow_traits<IndexType::type_id>;
1808
+ using ArrayType = typename TypeTraits<IndexType>::ArrayType;
1809
+ using T = typename TRAITS::T;
1810
+
1811
+ explicit CategoricalWriter(const PandasOptions& options, int64_t num_rows)
1812
+ : TypedPandasWriter<TRAITS::npy_type>(options, num_rows, 1),
1813
+ ordered_(false),
1814
+ needs_copy_(false) {}
1815
+
1816
+ Status CopyInto(std::shared_ptr<ChunkedArray> data, int64_t rel_placement) override {
1817
+ return Status::NotImplemented("categorical type");
1818
+ }
1819
+
1820
+ Status TransferSingle(std::shared_ptr<ChunkedArray> data, PyObject* py_ref) override {
1821
+ const auto& dict_type = checked_cast<const DictionaryType&>(*data->type());
1822
+ std::shared_ptr<Array> dict;
1823
+ if (data->num_chunks() == 0) {
1824
+ // no dictionary values => create empty array
1825
+ RETURN_NOT_OK(this->AllocateNDArray(TRAITS::npy_type, 1));
1826
+ RETURN_NOT_OK(MakeZeroLengthArray(dict_type.value_type(), &dict));
1827
+ } else {
1828
+ DCHECK_EQ(IndexType::type_id, dict_type.index_type()->id());
1829
+ RETURN_NOT_OK(WriteIndices(*data, &dict));
1830
+ }
1831
+
1832
+ PyObject* pydict;
1833
+ RETURN_NOT_OK(ConvertArrayToPandas(this->options_, dict, nullptr, &pydict));
1834
+ dictionary_.reset(pydict);
1835
+ ordered_ = dict_type.ordered();
1836
+ return Status::OK();
1837
+ }
1838
+
1839
+ Status Write(std::shared_ptr<ChunkedArray> data, int64_t abs_placement,
1840
+ int64_t rel_placement) override {
1841
+ RETURN_NOT_OK(this->EnsurePlacementAllocated());
1842
+ RETURN_NOT_OK(TransferSingle(data, /*py_ref=*/nullptr));
1843
+ this->placement_data_[rel_placement] = abs_placement;
1844
+ return Status::OK();
1845
+ }
1846
+
1847
+ Status GetSeriesResult(PyObject** out) override {
1848
+ PyAcquireGIL lock;
1849
+
1850
+ PyObject* result = PyDict_New();
1851
+ RETURN_IF_PYERROR();
1852
+
1853
+ // Expected single array dictionary layout
1854
+ PyDict_SetItemString(result, "indices", this->block_arr_.obj());
1855
+ RETURN_IF_PYERROR();
1856
+ RETURN_NOT_OK(AddResultMetadata(result));
1857
+
1858
+ *out = result;
1859
+ return Status::OK();
1860
+ }
1861
+
1862
+ protected:
1863
+ Status AddResultMetadata(PyObject* result) override {
1864
+ PyDict_SetItemString(result, "dictionary", dictionary_.obj());
1865
+ PyObject* py_ordered = ordered_ ? Py_True : Py_False;
1866
+ Py_INCREF(py_ordered);
1867
+ PyDict_SetItemString(result, "ordered", py_ordered);
1868
+ return Status::OK();
1869
+ }
1870
+
1871
+ Status WriteIndicesUniform(const ChunkedArray& data) {
1872
+ RETURN_NOT_OK(this->AllocateNDArray(TRAITS::npy_type, 1));
1873
+ T* out_values = reinterpret_cast<T*>(this->block_data_);
1874
+
1875
+ for (int c = 0; c < data.num_chunks(); c++) {
1876
+ const auto& arr = checked_cast<const DictionaryArray&>(*data.chunk(c));
1877
+ const auto& indices = checked_cast<const ArrayType&>(*arr.indices());
1878
+ auto values = reinterpret_cast<const T*>(indices.raw_values());
1879
+
1880
+ RETURN_NOT_OK(CheckIndexBounds(*indices.data(), arr.dictionary()->length()));
1881
+ // Null is -1 in CategoricalBlock
1882
+ for (int i = 0; i < arr.length(); ++i) {
1883
+ if (indices.IsValid(i)) {
1884
+ *out_values++ = values[i];
1885
+ } else {
1886
+ *out_values++ = -1;
1887
+ }
1888
+ }
1889
+ }
1890
+ return Status::OK();
1891
+ }
1892
+
1893
+ Status WriteIndicesVarying(const ChunkedArray& data, std::shared_ptr<Array>* out_dict) {
1894
+ // Yield int32 indices to allow for dictionary outgrowing the current index
1895
+ // type
1896
+ RETURN_NOT_OK(this->AllocateNDArray(NPY_INT32, 1));
1897
+ auto out_values = reinterpret_cast<int32_t*>(this->block_data_);
1898
+
1899
+ const auto& dict_type = checked_cast<const DictionaryType&>(*data.type());
1900
+
1901
+ ARROW_ASSIGN_OR_RAISE(auto unifier, DictionaryUnifier::Make(dict_type.value_type(),
1902
+ this->options_.pool));
1903
+ for (int c = 0; c < data.num_chunks(); c++) {
1904
+ const auto& arr = checked_cast<const DictionaryArray&>(*data.chunk(c));
1905
+ const auto& indices = checked_cast<const ArrayType&>(*arr.indices());
1906
+ auto values = reinterpret_cast<const T*>(indices.raw_values());
1907
+
1908
+ std::shared_ptr<Buffer> transpose_buffer;
1909
+ RETURN_NOT_OK(unifier->Unify(*arr.dictionary(), &transpose_buffer));
1910
+
1911
+ auto transpose = reinterpret_cast<const int32_t*>(transpose_buffer->data());
1912
+ int64_t dict_length = arr.dictionary()->length();
1913
+
1914
+ RETURN_NOT_OK(CheckIndexBounds(*indices.data(), dict_length));
1915
+
1916
+ // Null is -1 in CategoricalBlock
1917
+ for (int i = 0; i < arr.length(); ++i) {
1918
+ if (indices.IsValid(i)) {
1919
+ *out_values++ = transpose[values[i]];
1920
+ } else {
1921
+ *out_values++ = -1;
1922
+ }
1923
+ }
1924
+ }
1925
+
1926
+ std::shared_ptr<DataType> unused_type;
1927
+ return unifier->GetResult(&unused_type, out_dict);
1928
+ }
1929
+
1930
+ Status WriteIndices(const ChunkedArray& data, std::shared_ptr<Array>* out_dict) {
1931
+ DCHECK_GT(data.num_chunks(), 0);
1932
+
1933
+ // Sniff the first chunk
1934
+ const auto& arr_first = checked_cast<const DictionaryArray&>(*data.chunk(0));
1935
+ const auto indices_first = std::static_pointer_cast<ArrayType>(arr_first.indices());
1936
+
1937
+ if (data.num_chunks() == 1 && indices_first->null_count() == 0) {
1938
+ RETURN_NOT_OK(
1939
+ CheckIndexBounds(*indices_first->data(), arr_first.dictionary()->length()));
1940
+
1941
+ PyObject* wrapped;
1942
+ npy_intp dims[1] = {static_cast<npy_intp>(this->num_rows_)};
1943
+ RETURN_NOT_OK(MakeNumPyView(indices_first, /*py_ref=*/nullptr, TRAITS::npy_type,
1944
+ /*ndim=*/1, dims, &wrapped));
1945
+ this->SetBlockData(wrapped);
1946
+ *out_dict = arr_first.dictionary();
1947
+ } else {
1948
+ RETURN_NOT_OK(this->CheckNotZeroCopyOnly(data));
1949
+ if (NeedDictionaryUnification(data)) {
1950
+ RETURN_NOT_OK(WriteIndicesVarying(data, out_dict));
1951
+ } else {
1952
+ RETURN_NOT_OK(WriteIndicesUniform(data));
1953
+ *out_dict = arr_first.dictionary();
1954
+ }
1955
+ }
1956
+ return Status::OK();
1957
+ }
1958
+
1959
+ OwnedRefNoGIL dictionary_;
1960
+ bool ordered_;
1961
+ bool needs_copy_;
1962
+ };
1963
+
1964
+ class ExtensionWriter : public PandasWriter {
1965
+ public:
1966
+ using PandasWriter::PandasWriter;
1967
+
1968
+ Status Allocate() override {
1969
+ // no-op
1970
+ return Status::OK();
1971
+ }
1972
+
1973
+ Status TransferSingle(std::shared_ptr<ChunkedArray> data, PyObject* py_ref) override {
1974
+ PyAcquireGIL lock;
1975
+ PyObject* py_array;
1976
+ py_array = wrap_chunked_array(data);
1977
+ py_array_.reset(py_array);
1978
+
1979
+ return Status::OK();
1980
+ }
1981
+
1982
+ Status CopyInto(std::shared_ptr<ChunkedArray> data, int64_t rel_placement) override {
1983
+ return TransferSingle(data, nullptr);
1984
+ }
1985
+
1986
+ Status GetDataFrameResult(PyObject** out) override {
1987
+ PyAcquireGIL lock;
1988
+ PyObject* result = PyDict_New();
1989
+ RETURN_IF_PYERROR();
1990
+
1991
+ PyDict_SetItemString(result, "py_array", py_array_.obj());
1992
+ PyDict_SetItemString(result, "placement", placement_arr_.obj());
1993
+ *out = result;
1994
+ return Status::OK();
1995
+ }
1996
+
1997
+ Status GetSeriesResult(PyObject** out) override {
1998
+ *out = py_array_.detach();
1999
+ return Status::OK();
2000
+ }
2001
+
2002
+ protected:
2003
+ OwnedRefNoGIL py_array_;
2004
+ };
2005
+
2006
+ Status MakeWriter(const PandasOptions& options, PandasWriter::type writer_type,
2007
+ const DataType& type, int64_t num_rows, int num_columns,
2008
+ std::shared_ptr<PandasWriter>* writer) {
2009
+ #define BLOCK_CASE(NAME, TYPE) \
2010
+ case PandasWriter::NAME: \
2011
+ *writer = std::make_shared<TYPE>(options, num_rows, num_columns); \
2012
+ break;
2013
+
2014
+ #define CATEGORICAL_CASE(TYPE) \
2015
+ case TYPE::type_id: \
2016
+ *writer = std::make_shared<CategoricalWriter<TYPE>>(options, num_rows); \
2017
+ break;
2018
+
2019
+ #define TZ_CASE(NAME, TYPE) \
2020
+ case PandasWriter::NAME: { \
2021
+ const auto& ts_type = checked_cast<const TimestampType&>(type); \
2022
+ *writer = std::make_shared<TYPE>(options, ts_type.timezone(), num_rows); \
2023
+ } break;
2024
+
2025
+ switch (writer_type) {
2026
+ case PandasWriter::CATEGORICAL: {
2027
+ const auto& index_type = *checked_cast<const DictionaryType&>(type).index_type();
2028
+ switch (index_type.id()) {
2029
+ CATEGORICAL_CASE(Int8Type);
2030
+ CATEGORICAL_CASE(Int16Type);
2031
+ CATEGORICAL_CASE(Int32Type);
2032
+ CATEGORICAL_CASE(Int64Type);
2033
+ case Type::UINT8:
2034
+ case Type::UINT16:
2035
+ case Type::UINT32:
2036
+ case Type::UINT64:
2037
+ return Status::TypeError(
2038
+ "Converting unsigned dictionary indices to pandas",
2039
+ " not yet supported, index type: ", index_type.ToString());
2040
+ default:
2041
+ // Unreachable
2042
+ DCHECK(false);
2043
+ break;
2044
+ }
2045
+ } break;
2046
+ case PandasWriter::EXTENSION:
2047
+ *writer = std::make_shared<ExtensionWriter>(options, num_rows, num_columns);
2048
+ break;
2049
+ BLOCK_CASE(OBJECT, ObjectWriter);
2050
+ BLOCK_CASE(UINT8, UInt8Writer);
2051
+ BLOCK_CASE(INT8, Int8Writer);
2052
+ BLOCK_CASE(UINT16, UInt16Writer);
2053
+ BLOCK_CASE(INT16, Int16Writer);
2054
+ BLOCK_CASE(UINT32, UInt32Writer);
2055
+ BLOCK_CASE(INT32, Int32Writer);
2056
+ BLOCK_CASE(UINT64, UInt64Writer);
2057
+ BLOCK_CASE(INT64, Int64Writer);
2058
+ BLOCK_CASE(HALF_FLOAT, Float16Writer);
2059
+ BLOCK_CASE(FLOAT, Float32Writer);
2060
+ BLOCK_CASE(DOUBLE, Float64Writer);
2061
+ BLOCK_CASE(BOOL, BoolWriter);
2062
+ BLOCK_CASE(DATETIME_DAY, DatetimeDayWriter);
2063
+ BLOCK_CASE(DATETIME_SECOND, DatetimeSecondWriter);
2064
+ BLOCK_CASE(DATETIME_MILLI, DatetimeMilliWriter);
2065
+ BLOCK_CASE(DATETIME_MICRO, DatetimeMicroWriter);
2066
+ BLOCK_CASE(DATETIME_NANO, DatetimeNanoWriter);
2067
+ BLOCK_CASE(TIMEDELTA_SECOND, TimedeltaSecondWriter);
2068
+ BLOCK_CASE(TIMEDELTA_MILLI, TimedeltaMilliWriter);
2069
+ BLOCK_CASE(TIMEDELTA_MICRO, TimedeltaMicroWriter);
2070
+ BLOCK_CASE(TIMEDELTA_NANO, TimedeltaNanoWriter);
2071
+ TZ_CASE(DATETIME_SECOND_TZ, DatetimeSecondTZWriter);
2072
+ TZ_CASE(DATETIME_MILLI_TZ, DatetimeMilliTZWriter);
2073
+ TZ_CASE(DATETIME_MICRO_TZ, DatetimeMicroTZWriter);
2074
+ TZ_CASE(DATETIME_NANO_TZ, DatetimeNanoTZWriter);
2075
+ default:
2076
+ return Status::NotImplemented("Unsupported block type");
2077
+ }
2078
+
2079
+ #undef BLOCK_CASE
2080
+ #undef CATEGORICAL_CASE
2081
+
2082
+ return Status::OK();
2083
+ }
2084
+
2085
+ static Status GetPandasWriterType(const ChunkedArray& data, const PandasOptions& options,
2086
+ PandasWriter::type* output_type) {
2087
+ #define INTEGER_CASE(NAME) \
2088
+ *output_type = \
2089
+ data.null_count() > 0 \
2090
+ ? options.integer_object_nulls ? PandasWriter::OBJECT : PandasWriter::DOUBLE \
2091
+ : PandasWriter::NAME; \
2092
+ break;
2093
+
2094
+ switch (data.type()->id()) {
2095
+ case Type::BOOL:
2096
+ *output_type = data.null_count() > 0 ? PandasWriter::OBJECT : PandasWriter::BOOL;
2097
+ break;
2098
+ case Type::UINT8:
2099
+ INTEGER_CASE(UINT8);
2100
+ case Type::INT8:
2101
+ INTEGER_CASE(INT8);
2102
+ case Type::UINT16:
2103
+ INTEGER_CASE(UINT16);
2104
+ case Type::INT16:
2105
+ INTEGER_CASE(INT16);
2106
+ case Type::UINT32:
2107
+ INTEGER_CASE(UINT32);
2108
+ case Type::INT32:
2109
+ INTEGER_CASE(INT32);
2110
+ case Type::UINT64:
2111
+ INTEGER_CASE(UINT64);
2112
+ case Type::INT64:
2113
+ INTEGER_CASE(INT64);
2114
+ case Type::HALF_FLOAT:
2115
+ *output_type = PandasWriter::HALF_FLOAT;
2116
+ break;
2117
+ case Type::FLOAT:
2118
+ *output_type = PandasWriter::FLOAT;
2119
+ break;
2120
+ case Type::DOUBLE:
2121
+ *output_type = PandasWriter::DOUBLE;
2122
+ break;
2123
+ case Type::STRING: // fall through
2124
+ case Type::LARGE_STRING: // fall through
2125
+ case Type::STRING_VIEW: // fall through
2126
+ case Type::BINARY: // fall through
2127
+ case Type::LARGE_BINARY:
2128
+ case Type::BINARY_VIEW:
2129
+ case Type::NA: // fall through
2130
+ case Type::FIXED_SIZE_BINARY: // fall through
2131
+ case Type::STRUCT: // fall through
2132
+ case Type::TIME32: // fall through
2133
+ case Type::TIME64: // fall through
2134
+ case Type::DECIMAL128: // fall through
2135
+ case Type::DECIMAL256: // fall through
2136
+ case Type::INTERVAL_MONTH_DAY_NANO: // fall through
2137
+ *output_type = PandasWriter::OBJECT;
2138
+ break;
2139
+ case Type::DATE32:
2140
+ if (options.date_as_object) {
2141
+ *output_type = PandasWriter::OBJECT;
2142
+ } else if (options.coerce_temporal_nanoseconds) {
2143
+ *output_type = PandasWriter::DATETIME_NANO;
2144
+ } else if (options.to_numpy) {
2145
+ // Numpy supports Day, but Pandas does not
2146
+ *output_type = PandasWriter::DATETIME_DAY;
2147
+ } else {
2148
+ *output_type = PandasWriter::DATETIME_MILLI;
2149
+ }
2150
+ break;
2151
+ case Type::DATE64:
2152
+ if (options.date_as_object) {
2153
+ *output_type = PandasWriter::OBJECT;
2154
+ } else if (options.coerce_temporal_nanoseconds) {
2155
+ *output_type = PandasWriter::DATETIME_NANO;
2156
+ } else {
2157
+ *output_type = PandasWriter::DATETIME_MILLI;
2158
+ }
2159
+ break;
2160
+ case Type::TIMESTAMP: {
2161
+ const auto& ts_type = checked_cast<const TimestampType&>(*data.type());
2162
+ if (options.timestamp_as_object && ts_type.unit() != TimeUnit::NANO) {
2163
+ // Nanoseconds are never out of bounds for pandas, so in that case
2164
+ // we don't convert to object
2165
+ *output_type = PandasWriter::OBJECT;
2166
+ } else if (options.coerce_temporal_nanoseconds) {
2167
+ if (!ts_type.timezone().empty()) {
2168
+ *output_type = PandasWriter::DATETIME_NANO_TZ;
2169
+ } else {
2170
+ *output_type = PandasWriter::DATETIME_NANO;
2171
+ }
2172
+ } else {
2173
+ if (!ts_type.timezone().empty()) {
2174
+ switch (ts_type.unit()) {
2175
+ case TimeUnit::SECOND:
2176
+ *output_type = PandasWriter::DATETIME_SECOND_TZ;
2177
+ break;
2178
+ case TimeUnit::MILLI:
2179
+ *output_type = PandasWriter::DATETIME_MILLI_TZ;
2180
+ break;
2181
+ case TimeUnit::MICRO:
2182
+ *output_type = PandasWriter::DATETIME_MICRO_TZ;
2183
+ break;
2184
+ case TimeUnit::NANO:
2185
+ *output_type = PandasWriter::DATETIME_NANO_TZ;
2186
+ break;
2187
+ }
2188
+ } else {
2189
+ switch (ts_type.unit()) {
2190
+ case TimeUnit::SECOND:
2191
+ *output_type = PandasWriter::DATETIME_SECOND;
2192
+ break;
2193
+ case TimeUnit::MILLI:
2194
+ *output_type = PandasWriter::DATETIME_MILLI;
2195
+ break;
2196
+ case TimeUnit::MICRO:
2197
+ *output_type = PandasWriter::DATETIME_MICRO;
2198
+ break;
2199
+ case TimeUnit::NANO:
2200
+ *output_type = PandasWriter::DATETIME_NANO;
2201
+ break;
2202
+ }
2203
+ }
2204
+ }
2205
+ } break;
2206
+ case Type::DURATION: {
2207
+ const auto& dur_type = checked_cast<const DurationType&>(*data.type());
2208
+ if (options.coerce_temporal_nanoseconds) {
2209
+ *output_type = PandasWriter::TIMEDELTA_NANO;
2210
+ } else {
2211
+ switch (dur_type.unit()) {
2212
+ case TimeUnit::SECOND:
2213
+ *output_type = PandasWriter::TIMEDELTA_SECOND;
2214
+ break;
2215
+ case TimeUnit::MILLI:
2216
+ *output_type = PandasWriter::TIMEDELTA_MILLI;
2217
+ break;
2218
+ case TimeUnit::MICRO:
2219
+ *output_type = PandasWriter::TIMEDELTA_MICRO;
2220
+ break;
2221
+ case TimeUnit::NANO:
2222
+ *output_type = PandasWriter::TIMEDELTA_NANO;
2223
+ break;
2224
+ }
2225
+ }
2226
+ } break;
2227
+ case Type::FIXED_SIZE_LIST:
2228
+ case Type::LIST:
2229
+ case Type::LARGE_LIST:
2230
+ case Type::LIST_VIEW:
2231
+ case Type::LARGE_LIST_VIEW:
2232
+ case Type::MAP: {
2233
+ auto list_type = std::static_pointer_cast<BaseListType>(data.type());
2234
+ if (!ListTypeSupported(*list_type->value_type())) {
2235
+ return Status::NotImplemented("Not implemented type for Arrow list to pandas: ",
2236
+ list_type->value_type()->ToString());
2237
+ }
2238
+ *output_type = PandasWriter::OBJECT;
2239
+ } break;
2240
+ case Type::DICTIONARY:
2241
+ *output_type = PandasWriter::CATEGORICAL;
2242
+ break;
2243
+ case Type::EXTENSION:
2244
+ *output_type = PandasWriter::EXTENSION;
2245
+ break;
2246
+ default:
2247
+ return Status::NotImplemented(
2248
+ "No known equivalent Pandas block for Arrow data of type ",
2249
+ data.type()->ToString(), " is known.");
2250
+ }
2251
+ return Status::OK();
2252
+ }
2253
+
2254
+ // Construct the exact pandas "BlockManager" memory layout
2255
+ //
2256
+ // * For each column determine the correct output pandas type
2257
+ // * Allocate 2D blocks (ncols x nrows) for each distinct data type in output
2258
+ // * Allocate block placement arrays
2259
+ // * Write Arrow columns out into each slice of memory; populate block
2260
+ // * placement arrays as we go
2261
+ class PandasBlockCreator {
2262
+ public:
2263
+ using WriterMap = std::unordered_map<int, std::shared_ptr<PandasWriter>>;
2264
+
2265
+ explicit PandasBlockCreator(const PandasOptions& options, FieldVector fields,
2266
+ ChunkedArrayVector arrays)
2267
+ : options_(options), fields_(std::move(fields)), arrays_(std::move(arrays)) {
2268
+ num_columns_ = static_cast<int>(arrays_.size());
2269
+ if (num_columns_ > 0) {
2270
+ num_rows_ = arrays_[0]->length();
2271
+ }
2272
+ column_block_placement_.resize(num_columns_);
2273
+ }
2274
+ virtual ~PandasBlockCreator() = default;
2275
+
2276
+ virtual Status Convert(PyObject** out) = 0;
2277
+
2278
+ Status AppendBlocks(const WriterMap& blocks, PyObject* list) {
2279
+ for (const auto& it : blocks) {
2280
+ PyObject* item;
2281
+ RETURN_NOT_OK(it.second->GetDataFrameResult(&item));
2282
+ if (PyList_Append(list, item) < 0) {
2283
+ RETURN_IF_PYERROR();
2284
+ }
2285
+
2286
+ // ARROW-1017; PyList_Append increments object refcount
2287
+ Py_DECREF(item);
2288
+ }
2289
+ return Status::OK();
2290
+ }
2291
+
2292
+ protected:
2293
+ PandasOptions options_;
2294
+
2295
+ FieldVector fields_;
2296
+ ChunkedArrayVector arrays_;
2297
+ int num_columns_;
2298
+ int64_t num_rows_;
2299
+
2300
+ // column num -> relative placement within internal block
2301
+ std::vector<int> column_block_placement_;
2302
+ };
2303
+
2304
+ // Helper function for extension chunked arrays
2305
+ // Constructing a storage chunked array of an extension chunked array
2306
+ std::shared_ptr<ChunkedArray> GetStorageChunkedArray(std::shared_ptr<ChunkedArray> arr) {
2307
+ auto value_type = checked_cast<const ExtensionType&>(*arr->type()).storage_type();
2308
+ ArrayVector storage_arrays;
2309
+ for (int c = 0; c < arr->num_chunks(); c++) {
2310
+ const auto& arr_ext = checked_cast<const ExtensionArray&>(*arr->chunk(c));
2311
+ storage_arrays.emplace_back(arr_ext.storage());
2312
+ }
2313
+ return std::make_shared<ChunkedArray>(std::move(storage_arrays), value_type);
2314
+ };
2315
+
2316
+ // Helper function to decode RunEndEncodedArray
2317
+ Result<std::shared_ptr<ChunkedArray>> GetDecodedChunkedArray(
2318
+ std::shared_ptr<ChunkedArray> arr) {
2319
+ ARROW_ASSIGN_OR_RAISE(Datum decoded, compute::RunEndDecode(arr));
2320
+ DCHECK(decoded.is_chunked_array());
2321
+ return decoded.chunked_array();
2322
+ };
2323
+
2324
+ class ConsolidatedBlockCreator : public PandasBlockCreator {
2325
+ public:
2326
+ using PandasBlockCreator::PandasBlockCreator;
2327
+
2328
+ Status Convert(PyObject** out) override {
2329
+ column_types_.resize(num_columns_);
2330
+ RETURN_NOT_OK(CreateBlocks());
2331
+ RETURN_NOT_OK(WriteTableToBlocks());
2332
+ PyAcquireGIL lock;
2333
+
2334
+ PyObject* result = PyList_New(0);
2335
+ RETURN_IF_PYERROR();
2336
+
2337
+ RETURN_NOT_OK(AppendBlocks(blocks_, result));
2338
+ RETURN_NOT_OK(AppendBlocks(singleton_blocks_, result));
2339
+
2340
+ *out = result;
2341
+ return Status::OK();
2342
+ }
2343
+
2344
+ Status GetBlockType(int column_index, PandasWriter::type* out) {
2345
+ if (options_.extension_columns.count(fields_[column_index]->name())) {
2346
+ *out = PandasWriter::EXTENSION;
2347
+ return Status::OK();
2348
+ } else {
2349
+ // In case of an extension array default to the storage type
2350
+ if (arrays_[column_index]->type()->id() == Type::EXTENSION) {
2351
+ arrays_[column_index] = GetStorageChunkedArray(arrays_[column_index]);
2352
+ }
2353
+ // In case of a RunEndEncodedArray default to the values type
2354
+ else if (arrays_[column_index]->type()->id() == Type::RUN_END_ENCODED) {
2355
+ ARROW_ASSIGN_OR_RAISE(arrays_[column_index],
2356
+ GetDecodedChunkedArray(arrays_[column_index]));
2357
+ }
2358
+ return GetPandasWriterType(*arrays_[column_index], options_, out);
2359
+ }
2360
+ }
2361
+
2362
+ Status CreateBlocks() {
2363
+ for (int i = 0; i < num_columns_; ++i) {
2364
+ const DataType& type = *arrays_[i]->type();
2365
+ PandasWriter::type output_type;
2366
+ RETURN_NOT_OK(GetBlockType(i, &output_type));
2367
+
2368
+ int block_placement = 0;
2369
+ std::shared_ptr<PandasWriter> writer;
2370
+ if (output_type == PandasWriter::CATEGORICAL ||
2371
+ output_type == PandasWriter::DATETIME_SECOND_TZ ||
2372
+ output_type == PandasWriter::DATETIME_MILLI_TZ ||
2373
+ output_type == PandasWriter::DATETIME_MICRO_TZ ||
2374
+ output_type == PandasWriter::DATETIME_NANO_TZ ||
2375
+ output_type == PandasWriter::EXTENSION) {
2376
+ RETURN_NOT_OK(MakeWriter(options_, output_type, type, num_rows_,
2377
+ /*num_columns=*/1, &writer));
2378
+ singleton_blocks_[i] = writer;
2379
+ } else {
2380
+ auto it = block_sizes_.find(output_type);
2381
+ if (it != block_sizes_.end()) {
2382
+ block_placement = it->second;
2383
+ // Increment count
2384
+ ++it->second;
2385
+ } else {
2386
+ // Add key to map
2387
+ block_sizes_[output_type] = 1;
2388
+ }
2389
+ }
2390
+ column_types_[i] = output_type;
2391
+ column_block_placement_[i] = block_placement;
2392
+ }
2393
+
2394
+ // Create normal non-categorical blocks
2395
+ for (const auto& it : this->block_sizes_) {
2396
+ PandasWriter::type output_type = static_cast<PandasWriter::type>(it.first);
2397
+ std::shared_ptr<PandasWriter> block;
2398
+ RETURN_NOT_OK(MakeWriter(this->options_, output_type, /*unused*/ *null(), num_rows_,
2399
+ it.second, &block));
2400
+ this->blocks_[output_type] = block;
2401
+ }
2402
+ return Status::OK();
2403
+ }
2404
+
2405
+ Status GetWriter(int i, std::shared_ptr<PandasWriter>* block) {
2406
+ PandasWriter::type output_type = this->column_types_[i];
2407
+ switch (output_type) {
2408
+ case PandasWriter::CATEGORICAL:
2409
+ case PandasWriter::DATETIME_SECOND_TZ:
2410
+ case PandasWriter::DATETIME_MILLI_TZ:
2411
+ case PandasWriter::DATETIME_MICRO_TZ:
2412
+ case PandasWriter::DATETIME_NANO_TZ:
2413
+ case PandasWriter::EXTENSION: {
2414
+ auto it = this->singleton_blocks_.find(i);
2415
+ if (it == this->singleton_blocks_.end()) {
2416
+ return Status::KeyError("No block allocated");
2417
+ }
2418
+ *block = it->second;
2419
+ } break;
2420
+ default:
2421
+ auto it = this->blocks_.find(output_type);
2422
+ if (it == this->blocks_.end()) {
2423
+ return Status::KeyError("No block allocated");
2424
+ }
2425
+ *block = it->second;
2426
+ break;
2427
+ }
2428
+ return Status::OK();
2429
+ }
2430
+
2431
+ Status WriteTableToBlocks() {
2432
+ auto WriteColumn = [this](int i) {
2433
+ std::shared_ptr<PandasWriter> block;
2434
+ RETURN_NOT_OK(this->GetWriter(i, &block));
2435
+ // ARROW-3789 Use std::move on the array to permit self-destructing
2436
+ return block->Write(std::move(arrays_[i]), i, this->column_block_placement_[i]);
2437
+ };
2438
+
2439
+ return OptionalParallelFor(options_.use_threads, num_columns_, WriteColumn);
2440
+ }
2441
+
2442
+ private:
2443
+ // column num -> block type id
2444
+ std::vector<PandasWriter::type> column_types_;
2445
+
2446
+ // block type -> type count
2447
+ std::unordered_map<int, int> block_sizes_;
2448
+ std::unordered_map<int, const DataType*> block_types_;
2449
+
2450
+ // block type -> block
2451
+ WriterMap blocks_;
2452
+
2453
+ WriterMap singleton_blocks_;
2454
+ };
2455
+
2456
+ /// \brief Create blocks for pandas.DataFrame block manager using one block per
2457
+ /// column strategy. This permits some zero-copy optimizations as well as the
2458
+ /// ability for the table to "self-destruct" if selected by the user.
2459
+ class SplitBlockCreator : public PandasBlockCreator {
2460
+ public:
2461
+ using PandasBlockCreator::PandasBlockCreator;
2462
+
2463
+ Status GetWriter(int i, std::shared_ptr<PandasWriter>* writer) {
2464
+ PandasWriter::type output_type = PandasWriter::OBJECT;
2465
+ const DataType& type = *arrays_[i]->type();
2466
+ if (options_.extension_columns.count(fields_[i]->name())) {
2467
+ output_type = PandasWriter::EXTENSION;
2468
+ } else {
2469
+ // Null count needed to determine output type
2470
+ RETURN_NOT_OK(GetPandasWriterType(*arrays_[i], options_, &output_type));
2471
+ }
2472
+ return MakeWriter(this->options_, output_type, type, num_rows_, 1, writer);
2473
+ }
2474
+
2475
+ Status Convert(PyObject** out) override {
2476
+ PyAcquireGIL lock;
2477
+
2478
+ PyObject* result = PyList_New(0);
2479
+ RETURN_IF_PYERROR();
2480
+
2481
+ for (int i = 0; i < num_columns_; ++i) {
2482
+ std::shared_ptr<PandasWriter> writer;
2483
+ RETURN_NOT_OK(GetWriter(i, &writer));
2484
+ // ARROW-3789 Use std::move on the array to permit self-destructing
2485
+ RETURN_NOT_OK(writer->Write(std::move(arrays_[i]), i, /*rel_placement=*/0));
2486
+
2487
+ PyObject* item;
2488
+ RETURN_NOT_OK(writer->GetDataFrameResult(&item));
2489
+ if (PyList_Append(result, item) < 0) {
2490
+ RETURN_IF_PYERROR();
2491
+ }
2492
+ // PyList_Append increments object refcount
2493
+ Py_DECREF(item);
2494
+ }
2495
+
2496
+ *out = result;
2497
+ return Status::OK();
2498
+ }
2499
+
2500
+ private:
2501
+ std::vector<std::shared_ptr<PandasWriter>> writers_;
2502
+ };
2503
+
2504
+ Status ConvertCategoricals(const PandasOptions& options, ChunkedArrayVector* arrays,
2505
+ FieldVector* fields) {
2506
+ std::vector<int> columns_to_encode;
2507
+
2508
+ // For Categorical conversions
2509
+ auto EncodeColumn = [&](int j) {
2510
+ int i = columns_to_encode[j];
2511
+ if (options.zero_copy_only) {
2512
+ return Status::Invalid("Need to dictionary encode a column, but ",
2513
+ "only zero-copy conversions allowed");
2514
+ }
2515
+ compute::ExecContext ctx(options.pool);
2516
+ ARROW_ASSIGN_OR_RAISE(
2517
+ Datum out, DictionaryEncode((*arrays)[i],
2518
+ compute::DictionaryEncodeOptions::Defaults(), &ctx));
2519
+ (*arrays)[i] = out.chunked_array();
2520
+ (*fields)[i] = (*fields)[i]->WithType((*arrays)[i]->type());
2521
+ return Status::OK();
2522
+ };
2523
+
2524
+ if (!options.categorical_columns.empty()) {
2525
+ for (int i = 0; i < static_cast<int>(arrays->size()); i++) {
2526
+ if ((*arrays)[i]->type()->id() != Type::DICTIONARY &&
2527
+ options.categorical_columns.count((*fields)[i]->name())) {
2528
+ columns_to_encode.push_back(i);
2529
+ }
2530
+ }
2531
+ }
2532
+ if (options.strings_to_categorical) {
2533
+ for (int i = 0; i < static_cast<int>(arrays->size()); i++) {
2534
+ if (is_base_binary_like((*arrays)[i]->type()->id())) {
2535
+ columns_to_encode.push_back(i);
2536
+ }
2537
+ }
2538
+ }
2539
+ return OptionalParallelFor(options.use_threads,
2540
+ static_cast<int>(columns_to_encode.size()), EncodeColumn);
2541
+ }
2542
+
2543
+ } // namespace
2544
+
2545
+ Status ConvertArrayToPandas(const PandasOptions& options, std::shared_ptr<Array> arr,
2546
+ PyObject* py_ref, PyObject** out) {
2547
+ return ConvertChunkedArrayToPandas(
2548
+ options, std::make_shared<ChunkedArray>(std::move(arr)), py_ref, out);
2549
+ }
2550
+
2551
+ Status ConvertChunkedArrayToPandas(const PandasOptions& options,
2552
+ std::shared_ptr<ChunkedArray> arr, PyObject* py_ref,
2553
+ PyObject** out) {
2554
+ if (options.decode_dictionaries && arr->type()->id() == Type::DICTIONARY) {
2555
+ // XXX we should return an error as below if options.zero_copy_only
2556
+ // is true, but that would break compatibility with existing tests.
2557
+ const auto& dense_type =
2558
+ checked_cast<const DictionaryType&>(*arr->type()).value_type();
2559
+ RETURN_NOT_OK(DecodeDictionaries(options.pool, dense_type, &arr));
2560
+ DCHECK_NE(arr->type()->id(), Type::DICTIONARY);
2561
+
2562
+ // The original Python DictionaryArray won't own the memory anymore
2563
+ // as we actually built a new array when we decoded the DictionaryArray
2564
+ // thus let the final resulting numpy array own the memory through a Capsule
2565
+ py_ref = nullptr;
2566
+ }
2567
+
2568
+ if (options.strings_to_categorical && is_base_binary_like(arr->type()->id())) {
2569
+ if (options.zero_copy_only) {
2570
+ return Status::Invalid("Need to dictionary encode a column, but ",
2571
+ "only zero-copy conversions allowed");
2572
+ }
2573
+ compute::ExecContext ctx(options.pool);
2574
+ ARROW_ASSIGN_OR_RAISE(
2575
+ Datum out,
2576
+ DictionaryEncode(arr, compute::DictionaryEncodeOptions::Defaults(), &ctx));
2577
+ arr = out.chunked_array();
2578
+ }
2579
+
2580
+ PandasOptions modified_options = options;
2581
+ modified_options.strings_to_categorical = false;
2582
+
2583
+ // ARROW-7596: We permit the hybrid Series/DataFrame code path to do zero copy
2584
+ // optimizations that we do not allow in the default case when converting
2585
+ // Table->DataFrame
2586
+ modified_options.allow_zero_copy_blocks = true;
2587
+
2588
+ // In case of an extension array default to the storage type
2589
+ if (arr->type()->id() == Type::EXTENSION) {
2590
+ arr = GetStorageChunkedArray(arr);
2591
+ }
2592
+ // In case of a RunEndEncodedArray decode the array
2593
+ else if (arr->type()->id() == Type::RUN_END_ENCODED) {
2594
+ if (options.zero_copy_only) {
2595
+ return Status::Invalid("Need to dencode a RunEndEncodedArray, but ",
2596
+ "only zero-copy conversions allowed");
2597
+ }
2598
+ ARROW_ASSIGN_OR_RAISE(arr, GetDecodedChunkedArray(arr));
2599
+
2600
+ // Because we built a new array when we decoded the RunEndEncodedArray
2601
+ // the final resulting numpy array should own the memory through a Capsule
2602
+ py_ref = nullptr;
2603
+ }
2604
+
2605
+ PandasWriter::type output_type;
2606
+ RETURN_NOT_OK(GetPandasWriterType(*arr, modified_options, &output_type));
2607
+ if (options.decode_dictionaries) {
2608
+ DCHECK_NE(output_type, PandasWriter::CATEGORICAL);
2609
+ }
2610
+
2611
+ std::shared_ptr<PandasWriter> writer;
2612
+ RETURN_NOT_OK(MakeWriter(modified_options, output_type, *arr->type(), arr->length(),
2613
+ /*num_columns=*/1, &writer));
2614
+ RETURN_NOT_OK(writer->TransferSingle(std::move(arr), py_ref));
2615
+ return writer->GetSeriesResult(out);
2616
+ }
2617
+
2618
+ Status ConvertTableToPandas(const PandasOptions& options, std::shared_ptr<Table> table,
2619
+ PyObject** out) {
2620
+ ChunkedArrayVector arrays = table->columns();
2621
+ FieldVector fields = table->fields();
2622
+
2623
+ // ARROW-3789: allow "self-destructing" by releasing references to columns as
2624
+ // we convert them to pandas
2625
+ table = nullptr;
2626
+
2627
+ RETURN_NOT_OK(ConvertCategoricals(options, &arrays, &fields));
2628
+
2629
+ PandasOptions modified_options = options;
2630
+ modified_options.strings_to_categorical = false;
2631
+ modified_options.categorical_columns.clear();
2632
+
2633
+ if (options.split_blocks) {
2634
+ modified_options.allow_zero_copy_blocks = true;
2635
+ SplitBlockCreator helper(modified_options, std::move(fields), std::move(arrays));
2636
+ return helper.Convert(out);
2637
+ } else {
2638
+ ConsolidatedBlockCreator helper(modified_options, std::move(fields),
2639
+ std::move(arrays));
2640
+ return helper.Convert(out);
2641
+ }
2642
+ }
2643
+
2644
+ } // namespace py
2645
+ } // namespace arrow
llmeval-env/lib/python3.10/site-packages/pyarrow/src/arrow/python/arrow_to_pandas.h ADDED
@@ -0,0 +1,146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // Functions for converting between pandas's NumPy-based data representation
19
+ // and Arrow data structures
20
+
21
+ #pragma once
22
+
23
+ #include "arrow/python/platform.h"
24
+
25
+ #include <memory>
26
+ #include <string>
27
+ #include <unordered_set>
28
+
29
+ #include "arrow/memory_pool.h"
30
+ #include "arrow/python/visibility.h"
31
+
32
+ namespace arrow {
33
+
34
+ class Array;
35
+ class ChunkedArray;
36
+ class Column;
37
+ class DataType;
38
+ class MemoryPool;
39
+ class Status;
40
+ class Table;
41
+
42
+ namespace py {
43
+
44
+ enum class MapConversionType {
45
+ DEFAULT, // convert arrow maps to assoc lists (list of kev-value tuples) in Pandas
46
+ LOSSY, // report warnings when lossiness is encountered due to duplicate keys
47
+ STRICT_, // raise a Python exception when lossiness is encountered due to duplicate
48
+ // keys
49
+ };
50
+
51
+ struct PandasOptions {
52
+ /// arrow::MemoryPool to use for memory allocations
53
+ MemoryPool* pool = default_memory_pool();
54
+
55
+ /// If true, we will convert all string columns to categoricals
56
+ bool strings_to_categorical = false;
57
+ bool zero_copy_only = false;
58
+ bool integer_object_nulls = false;
59
+ bool date_as_object = false;
60
+ bool timestamp_as_object = false;
61
+ bool use_threads = false;
62
+
63
+ /// Coerce all date and timestamp to datetime64[ns]
64
+ bool coerce_temporal_nanoseconds = false;
65
+
66
+ /// Used to maintain backwards compatibility for
67
+ /// timezone bugs (see ARROW-9528). Should be removed
68
+ /// after Arrow 2.0 release.
69
+ bool ignore_timezone = false;
70
+
71
+ /// \brief If true, do not create duplicate PyObject versions of equal
72
+ /// objects. This only applies to immutable objects like strings or datetime
73
+ /// objects
74
+ bool deduplicate_objects = false;
75
+
76
+ /// \brief For certain data types, a cast is needed in order to store the
77
+ /// data in a pandas DataFrame or Series (e.g. timestamps are always stored
78
+ /// as nanoseconds in pandas). This option controls whether it is a safe
79
+ /// cast or not.
80
+ bool safe_cast = true;
81
+
82
+ /// \brief If true, create one block per column rather than consolidated
83
+ /// blocks (1 per data type). Do zero-copy wrapping when there are no
84
+ /// nulls. pandas currently will consolidate the blocks on its own, causing
85
+ /// increased memory use, so keep this in mind if you are working on a
86
+ /// memory-constrained situation.
87
+ bool split_blocks = false;
88
+
89
+ /// \brief If true, allow non-writable zero-copy views to be created for
90
+ /// single column blocks. This option is also used to provide zero copy for
91
+ /// Series data
92
+ bool allow_zero_copy_blocks = false;
93
+
94
+ /// \brief If true, attempt to deallocate buffers in passed Arrow object if
95
+ /// it is the only remaining shared_ptr copy of it. See ARROW-3789 for
96
+ /// original context for this feature. Only currently implemented for Table
97
+ /// conversions
98
+ bool self_destruct = false;
99
+
100
+ /// \brief The default behavior (DEFAULT), is to convert Arrow Map arrays to
101
+ /// Python association lists (list-of-tuples) in the same order as the Arrow
102
+ /// Map, as in [(key1, value1), (key2, value2), ...]
103
+ /// If LOSSY or STRICT, convert Arrow Map arrays to native Python dicts.
104
+ /// This can change the ordering of (key, value) pairs, and will deduplicate
105
+ /// multiple keys, resulting in a possible loss of data.
106
+ /// If 'lossy', this key deduplication results in a warning printed
107
+ /// when detected. If 'strict', this instead results in an exception
108
+ /// being raised when detected.
109
+ MapConversionType maps_as_pydicts = MapConversionType::DEFAULT;
110
+
111
+ // Used internally for nested arrays.
112
+ bool decode_dictionaries = false;
113
+
114
+ // Columns that should be casted to categorical
115
+ std::unordered_set<std::string> categorical_columns;
116
+
117
+ // Columns that should be passed through to be converted to
118
+ // ExtensionArray/Block
119
+ std::unordered_set<std::string> extension_columns;
120
+
121
+ // Used internally to decipher between to_numpy() and to_pandas() when
122
+ // the expected output differs
123
+ bool to_numpy = false;
124
+ };
125
+
126
+ ARROW_PYTHON_EXPORT
127
+ Status ConvertArrayToPandas(const PandasOptions& options, std::shared_ptr<Array> arr,
128
+ PyObject* py_ref, PyObject** out);
129
+
130
+ ARROW_PYTHON_EXPORT
131
+ Status ConvertChunkedArrayToPandas(const PandasOptions& options,
132
+ std::shared_ptr<ChunkedArray> col, PyObject* py_ref,
133
+ PyObject** out);
134
+
135
+ // Convert a whole table as efficiently as possible to a pandas.DataFrame.
136
+ //
137
+ // The returned Python object is a list of tuples consisting of the exact 2D
138
+ // BlockManager structure of the pandas.DataFrame used as of pandas 0.19.x.
139
+ //
140
+ // tuple item: (indices: ndarray[int32], block: ndarray[TYPE, ndim=2])
141
+ ARROW_PYTHON_EXPORT
142
+ Status ConvertTableToPandas(const PandasOptions& options, std::shared_ptr<Table> table,
143
+ PyObject** out);
144
+
145
+ } // namespace py
146
+ } // namespace arrow
llmeval-env/lib/python3.10/site-packages/pyarrow/src/arrow/python/arrow_to_python_internal.h ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include "arrow/array.h"
21
+ #include "arrow/python/platform.h"
22
+
23
+ namespace arrow {
24
+ namespace py {
25
+ namespace internal {
26
+ // TODO(ARROW-12976): See if we can refactor Pandas ObjectWriter logic
27
+ // to the .cc file and move this there as well if we can.
28
+
29
+ // Converts array to a sequency of python objects.
30
+ template <typename ArrayType, typename WriteValue, typename Assigner>
31
+ inline Status WriteArrayObjects(const ArrayType& arr, WriteValue&& write_func,
32
+ Assigner out_values) {
33
+ // TODO(ARROW-12976): Use visitor here?
34
+ const bool has_nulls = arr.null_count() > 0;
35
+ for (int64_t i = 0; i < arr.length(); ++i) {
36
+ if (has_nulls && arr.IsNull(i)) {
37
+ Py_INCREF(Py_None);
38
+ *out_values = Py_None;
39
+ } else {
40
+ RETURN_NOT_OK(write_func(arr.GetView(i), out_values));
41
+ }
42
+ ++out_values;
43
+ }
44
+ return Status::OK();
45
+ }
46
+
47
+ } // namespace internal
48
+ } // namespace py
49
+ } // namespace arrow
llmeval-env/lib/python3.10/site-packages/pyarrow/src/arrow/python/async.h ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <utility>
21
+
22
+ #include "arrow/python/common.h"
23
+ #include "arrow/status.h"
24
+ #include "arrow/util/future.h"
25
+
26
+ namespace arrow::py {
27
+
28
+ /// \brief Bind a Python callback to an arrow::Future.
29
+ ///
30
+ /// If the Future finishes successfully, py_wrapper is called with its
31
+ /// result value and should return a PyObject*. If py_wrapper is successful,
32
+ /// py_cb is called with its return value.
33
+ ///
34
+ /// If either the Future or py_wrapper fails, py_cb is called with the
35
+ /// associated Python exception.
36
+ ///
37
+ /// \param future The future to bind to.
38
+ /// \param py_cb The Python callback function. Will be passed the result of
39
+ /// py_wrapper, or a Python exception if the future failed or one was
40
+ /// raised by py_wrapper.
41
+ /// \param py_wrapper A function (likely defined in Cython) to convert the C++
42
+ /// result of the future to a Python object.
43
+ template <typename T, typename PyWrapper = PyObject* (*)(T)>
44
+ void BindFuture(Future<T> future, PyObject* py_cb, PyWrapper py_wrapper) {
45
+ Py_INCREF(py_cb);
46
+ OwnedRefNoGIL cb_ref(py_cb);
47
+
48
+ auto future_cb = [cb_ref = std::move(cb_ref),
49
+ py_wrapper = std::move(py_wrapper)](Result<T> result) {
50
+ SafeCallIntoPythonVoid([&]() {
51
+ OwnedRef py_value_or_exc{WrapResult(std::move(result), std::move(py_wrapper))};
52
+ Py_XDECREF(
53
+ PyObject_CallFunctionObjArgs(cb_ref.obj(), py_value_or_exc.obj(), NULLPTR));
54
+ ARROW_WARN_NOT_OK(CheckPyError(), "Internal error in async call");
55
+ });
56
+ };
57
+ future.AddCallback(std::move(future_cb));
58
+ }
59
+
60
+ } // namespace arrow::py
llmeval-env/lib/python3.10/site-packages/pyarrow/src/arrow/python/benchmark.cc ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #include "arrow/python/benchmark.h"
19
+ #include "arrow/python/helpers.h"
20
+
21
+ namespace arrow {
22
+ namespace py {
23
+ namespace benchmark {
24
+
25
+ void Benchmark_PandasObjectIsNull(PyObject* list) {
26
+ if (!PyList_CheckExact(list)) {
27
+ PyErr_SetString(PyExc_TypeError, "expected a list");
28
+ return;
29
+ }
30
+ Py_ssize_t i, n = PyList_GET_SIZE(list);
31
+ for (i = 0; i < n; i++) {
32
+ internal::PandasObjectIsNull(PyList_GET_ITEM(list, i));
33
+ }
34
+ }
35
+
36
+ } // namespace benchmark
37
+ } // namespace py
38
+ } // namespace arrow
llmeval-env/lib/python3.10/site-packages/pyarrow/src/arrow/python/benchmark.h ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include "arrow/python/platform.h"
21
+
22
+ #include "arrow/python/visibility.h"
23
+
24
+ namespace arrow {
25
+ namespace py {
26
+ namespace benchmark {
27
+
28
+ // Micro-benchmark routines for use from ASV
29
+
30
+ // Run PandasObjectIsNull() once over every object in *list*
31
+ ARROW_PYTHON_EXPORT
32
+ void Benchmark_PandasObjectIsNull(PyObject* list);
33
+
34
+ } // namespace benchmark
35
+ } // namespace py
36
+ } // namespace arrow
llmeval-env/lib/python3.10/site-packages/pyarrow/src/arrow/python/common.cc ADDED
@@ -0,0 +1,246 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #include "arrow/python/common.h"
19
+
20
+ #include <cstdlib>
21
+ #include <mutex>
22
+ #include <sstream>
23
+ #include <string>
24
+
25
+ #include "arrow/memory_pool.h"
26
+ #include "arrow/status.h"
27
+ #include "arrow/util/checked_cast.h"
28
+ #include "arrow/util/logging.h"
29
+
30
+ #include "arrow/python/helpers.h"
31
+
32
+ namespace arrow {
33
+
34
+ using internal::checked_cast;
35
+
36
+ namespace py {
37
+
38
+ static std::mutex memory_pool_mutex;
39
+ static MemoryPool* default_python_pool = nullptr;
40
+
41
+ void set_default_memory_pool(MemoryPool* pool) {
42
+ std::lock_guard<std::mutex> guard(memory_pool_mutex);
43
+ default_python_pool = pool;
44
+ }
45
+
46
+ MemoryPool* get_memory_pool() {
47
+ std::lock_guard<std::mutex> guard(memory_pool_mutex);
48
+ if (default_python_pool) {
49
+ return default_python_pool;
50
+ } else {
51
+ return default_memory_pool();
52
+ }
53
+ }
54
+
55
+ // ----------------------------------------------------------------------
56
+ // PythonErrorDetail
57
+
58
+ namespace {
59
+
60
+ const char kErrorDetailTypeId[] = "arrow::py::PythonErrorDetail";
61
+
62
+ // Try to match the Python exception type with an appropriate Status code
63
+ StatusCode MapPyError(PyObject* exc_type) {
64
+ StatusCode code;
65
+
66
+ if (PyErr_GivenExceptionMatches(exc_type, PyExc_MemoryError)) {
67
+ code = StatusCode::OutOfMemory;
68
+ } else if (PyErr_GivenExceptionMatches(exc_type, PyExc_IndexError)) {
69
+ code = StatusCode::IndexError;
70
+ } else if (PyErr_GivenExceptionMatches(exc_type, PyExc_KeyError)) {
71
+ code = StatusCode::KeyError;
72
+ } else if (PyErr_GivenExceptionMatches(exc_type, PyExc_TypeError)) {
73
+ code = StatusCode::TypeError;
74
+ } else if (PyErr_GivenExceptionMatches(exc_type, PyExc_ValueError) ||
75
+ PyErr_GivenExceptionMatches(exc_type, PyExc_OverflowError)) {
76
+ code = StatusCode::Invalid;
77
+ } else if (PyErr_GivenExceptionMatches(exc_type, PyExc_EnvironmentError)) {
78
+ code = StatusCode::IOError;
79
+ } else if (PyErr_GivenExceptionMatches(exc_type, PyExc_NotImplementedError)) {
80
+ code = StatusCode::NotImplemented;
81
+ } else {
82
+ code = StatusCode::UnknownError;
83
+ }
84
+ return code;
85
+ }
86
+
87
+ // PythonErrorDetail indicates a Python exception was raised.
88
+ class PythonErrorDetail : public StatusDetail {
89
+ public:
90
+ const char* type_id() const override { return kErrorDetailTypeId; }
91
+
92
+ std::string ToString() const override {
93
+ // This is simple enough not to need the GIL
94
+ Result<std::string> result = FormatImpl();
95
+
96
+ if (result.ok()) {
97
+ return result.ValueOrDie();
98
+ } else {
99
+ // Fallback to just the exception type
100
+ const auto ty = reinterpret_cast<const PyTypeObject*>(exc_type_.obj());
101
+ return std::string("Python exception: ") + ty->tp_name;
102
+ }
103
+ }
104
+
105
+ void RestorePyError() const {
106
+ Py_INCREF(exc_type_.obj());
107
+ Py_INCREF(exc_value_.obj());
108
+ Py_INCREF(exc_traceback_.obj());
109
+ PyErr_Restore(exc_type_.obj(), exc_value_.obj(), exc_traceback_.obj());
110
+ }
111
+
112
+ PyObject* exc_type() const { return exc_type_.obj(); }
113
+
114
+ PyObject* exc_value() const { return exc_value_.obj(); }
115
+
116
+ static std::shared_ptr<PythonErrorDetail> FromPyError() {
117
+ PyObject* exc_type = nullptr;
118
+ PyObject* exc_value = nullptr;
119
+ PyObject* exc_traceback = nullptr;
120
+
121
+ PyErr_Fetch(&exc_type, &exc_value, &exc_traceback);
122
+ PyErr_NormalizeException(&exc_type, &exc_value, &exc_traceback);
123
+ ARROW_CHECK(exc_type)
124
+ << "PythonErrorDetail::FromPyError called without a Python error set";
125
+ DCHECK(PyType_Check(exc_type));
126
+ DCHECK(exc_value); // Ensured by PyErr_NormalizeException, double-check
127
+ if (exc_traceback == nullptr) {
128
+ // Needed by PyErr_Restore()
129
+ Py_INCREF(Py_None);
130
+ exc_traceback = Py_None;
131
+ }
132
+
133
+ std::shared_ptr<PythonErrorDetail> detail(new PythonErrorDetail);
134
+ detail->exc_type_.reset(exc_type);
135
+ detail->exc_value_.reset(exc_value);
136
+ detail->exc_traceback_.reset(exc_traceback);
137
+ return detail;
138
+ }
139
+
140
+ protected:
141
+ Result<std::string> FormatImpl() const {
142
+ PyAcquireGIL lock;
143
+
144
+ // Use traceback.format_exception()
145
+ OwnedRef traceback_module;
146
+ RETURN_NOT_OK(internal::ImportModule("traceback", &traceback_module));
147
+
148
+ OwnedRef fmt_exception;
149
+ RETURN_NOT_OK(internal::ImportFromModule(traceback_module.obj(), "format_exception",
150
+ &fmt_exception));
151
+
152
+ OwnedRef formatted;
153
+ formatted.reset(PyObject_CallFunctionObjArgs(fmt_exception.obj(), exc_type_.obj(),
154
+ exc_value_.obj(), exc_traceback_.obj(),
155
+ NULL));
156
+ RETURN_IF_PYERROR();
157
+
158
+ std::stringstream ss;
159
+ ss << "Python exception: ";
160
+ Py_ssize_t num_lines = PySequence_Length(formatted.obj());
161
+ RETURN_IF_PYERROR();
162
+
163
+ for (Py_ssize_t i = 0; i < num_lines; ++i) {
164
+ Py_ssize_t line_size;
165
+
166
+ PyObject* line = PySequence_GetItem(formatted.obj(), i);
167
+ RETURN_IF_PYERROR();
168
+
169
+ const char* data = PyUnicode_AsUTF8AndSize(line, &line_size);
170
+ RETURN_IF_PYERROR();
171
+
172
+ ss << std::string_view(data, line_size);
173
+ }
174
+ return ss.str();
175
+ }
176
+
177
+ PythonErrorDetail() = default;
178
+
179
+ OwnedRefNoGIL exc_type_, exc_value_, exc_traceback_;
180
+ };
181
+
182
+ } // namespace
183
+
184
+ // ----------------------------------------------------------------------
185
+ // Python exception <-> Status
186
+
187
+ Status ConvertPyError(StatusCode code) {
188
+ auto detail = PythonErrorDetail::FromPyError();
189
+ if (code == StatusCode::UnknownError) {
190
+ code = MapPyError(detail->exc_type());
191
+ }
192
+
193
+ std::string message;
194
+ RETURN_NOT_OK(internal::PyObject_StdStringStr(detail->exc_value(), &message));
195
+ return Status(code, message, detail);
196
+ }
197
+
198
+ bool IsPyError(const Status& status) {
199
+ if (status.ok()) {
200
+ return false;
201
+ }
202
+ auto detail = status.detail();
203
+ bool result = detail != nullptr && detail->type_id() == kErrorDetailTypeId;
204
+ return result;
205
+ }
206
+
207
+ void RestorePyError(const Status& status) {
208
+ ARROW_CHECK(IsPyError(status));
209
+ const auto& detail = checked_cast<const PythonErrorDetail&>(*status.detail());
210
+ detail.RestorePyError();
211
+ }
212
+
213
+ // ----------------------------------------------------------------------
214
+ // PyBuffer
215
+
216
+ PyBuffer::PyBuffer() : Buffer(nullptr, 0) {}
217
+
218
+ Status PyBuffer::Init(PyObject* obj) {
219
+ if (!PyObject_GetBuffer(obj, &py_buf_, PyBUF_ANY_CONTIGUOUS)) {
220
+ data_ = reinterpret_cast<const uint8_t*>(py_buf_.buf);
221
+ ARROW_CHECK_NE(data_, nullptr) << "Null pointer in Py_buffer";
222
+ size_ = py_buf_.len;
223
+ capacity_ = py_buf_.len;
224
+ is_mutable_ = !py_buf_.readonly;
225
+ return Status::OK();
226
+ } else {
227
+ return ConvertPyError(StatusCode::Invalid);
228
+ }
229
+ }
230
+
231
+ Result<std::shared_ptr<Buffer>> PyBuffer::FromPyObject(PyObject* obj) {
232
+ PyBuffer* buf = new PyBuffer();
233
+ std::shared_ptr<Buffer> res(buf);
234
+ RETURN_NOT_OK(buf->Init(obj));
235
+ return res;
236
+ }
237
+
238
+ PyBuffer::~PyBuffer() {
239
+ if (data_ != nullptr) {
240
+ PyAcquireGIL lock;
241
+ PyBuffer_Release(&py_buf_);
242
+ }
243
+ }
244
+
245
+ } // namespace py
246
+ } // namespace arrow
llmeval-env/lib/python3.10/site-packages/pyarrow/src/arrow/python/csv.cc ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #include "csv.h"
19
+
20
+ #include <memory>
21
+
22
+ #include "arrow/python/common.h"
23
+
24
+ namespace arrow {
25
+
26
+ using csv::InvalidRow;
27
+ using csv::InvalidRowHandler;
28
+ using csv::InvalidRowResult;
29
+
30
+ namespace py {
31
+ namespace csv {
32
+
33
+ InvalidRowHandler MakeInvalidRowHandler(PyInvalidRowCallback cb, PyObject* py_handler) {
34
+ if (cb == nullptr) {
35
+ return InvalidRowHandler{};
36
+ }
37
+
38
+ struct Handler {
39
+ PyInvalidRowCallback cb;
40
+ std::shared_ptr<OwnedRefNoGIL> handler_ref;
41
+
42
+ InvalidRowResult operator()(const InvalidRow& invalid_row) {
43
+ InvalidRowResult result;
44
+ auto st = SafeCallIntoPython([&]() -> Status {
45
+ result = cb(handler_ref->obj(), invalid_row);
46
+ if (PyErr_Occurred()) {
47
+ PyErr_WriteUnraisable(handler_ref->obj());
48
+ }
49
+ return Status::OK();
50
+ });
51
+ ARROW_UNUSED(st);
52
+ return result;
53
+ }
54
+ };
55
+
56
+ Py_INCREF(py_handler);
57
+ return Handler{cb, std::make_shared<OwnedRefNoGIL>(py_handler)};
58
+ }
59
+
60
+ } // namespace csv
61
+ } // namespace py
62
+ } // namespace arrow
llmeval-env/lib/python3.10/site-packages/pyarrow/src/arrow/python/csv.h ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <functional>
21
+ #include <memory>
22
+ #include <string>
23
+ #include <vector>
24
+
25
+ #include "arrow/csv/options.h"
26
+ #include "arrow/python/common.h"
27
+ #include "arrow/util/macros.h"
28
+
29
+ namespace arrow {
30
+ namespace py {
31
+ namespace csv {
32
+
33
+ using PyInvalidRowCallback = std::function<::arrow::csv::InvalidRowResult(
34
+ PyObject*, const ::arrow::csv::InvalidRow&)>;
35
+
36
+ ARROW_PYTHON_EXPORT
37
+ ::arrow::csv::InvalidRowHandler MakeInvalidRowHandler(PyInvalidRowCallback,
38
+ PyObject* handler);
39
+
40
+ } // namespace csv
41
+ } // namespace py
42
+ } // namespace arrow
llmeval-env/lib/python3.10/site-packages/pyarrow/src/arrow/python/datetime.cc ADDED
@@ -0,0 +1,663 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+ #include "datetime.h"
18
+
19
+ #include <algorithm>
20
+ #include <chrono>
21
+ #include <iomanip>
22
+ #include <regex>
23
+ #include <string_view>
24
+
25
+ #include "arrow/array.h"
26
+ #include "arrow/python/arrow_to_python_internal.h"
27
+ #include "arrow/python/common.h"
28
+ #include "arrow/python/helpers.h"
29
+ #include "arrow/python/platform.h"
30
+ #include "arrow/scalar.h"
31
+ #include "arrow/status.h"
32
+ #include "arrow/type.h"
33
+ #include "arrow/util/logging.h"
34
+ #include "arrow/util/regex.h"
35
+ #include "arrow/util/value_parsing.h"
36
+
37
+ namespace arrow {
38
+
39
+ using internal::RegexMatch;
40
+
41
+ namespace py {
42
+ namespace internal {
43
+
44
+ namespace {
45
+
46
+ bool MatchFixedOffset(const std::string& tz, std::string_view* sign,
47
+ std::string_view* hour, std::string_view* minute) {
48
+ static const std::regex regex("^([+-])(0[0-9]|1[0-9]|2[0-3]):([0-5][0-9])$");
49
+ if (tz.size() < 5) {
50
+ return false;
51
+ }
52
+ return RegexMatch(regex, tz, {sign, hour, minute});
53
+ }
54
+
55
+ constexpr char* NonConst(const char* st) {
56
+ // Hack for python versions < 3.7 where members of PyStruct members
57
+ // where non-const (C++ doesn't like assigning string literals to these types)
58
+ return const_cast<char*>(st);
59
+ }
60
+
61
+ static PyTypeObject MonthDayNanoTupleType = {};
62
+
63
+ static PyStructSequence_Field MonthDayNanoField[] = {
64
+ {NonConst("months"), NonConst("The number of months in the interval")},
65
+ {NonConst("days"), NonConst("The number days in the interval")},
66
+ {NonConst("nanoseconds"), NonConst("The number of nanoseconds in the interval")},
67
+ {nullptr, nullptr}};
68
+
69
+ static PyStructSequence_Desc MonthDayNanoTupleDesc = {
70
+ NonConst("MonthDayNano"),
71
+ NonConst("A calendar interval consisting of months, days and nanoseconds."),
72
+ MonthDayNanoField,
73
+ /*n_in_sequence=*/3};
74
+
75
+ } // namespace
76
+
77
+ #ifndef PYPY_VERSION
78
+ PyDateTime_CAPI* datetime_api = nullptr;
79
+
80
+ void InitDatetime() {
81
+ PyAcquireGIL lock;
82
+ datetime_api =
83
+ reinterpret_cast<PyDateTime_CAPI*>(PyCapsule_Import(PyDateTime_CAPSULE_NAME, 0));
84
+ if (datetime_api == nullptr) {
85
+ Py_FatalError("Could not import datetime C API");
86
+ }
87
+ }
88
+ #endif
89
+
90
+ // The following code is adapted from
91
+ // https://github.com/numpy/numpy/blob/main/numpy/core/src/multiarray/datetime.c
92
+
93
+ // Days per month, regular year and leap year
94
+ static int64_t _days_per_month_table[2][12] = {
95
+ {31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31},
96
+ {31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31}};
97
+
98
+ static bool is_leapyear(int64_t year) {
99
+ return (year & 0x3) == 0 && // year % 4 == 0
100
+ ((year % 100) != 0 || (year % 400) == 0);
101
+ }
102
+
103
+ // Calculates the days offset from the 1970 epoch.
104
+ static int64_t get_days_from_date(int64_t date_year, int64_t date_month,
105
+ int64_t date_day) {
106
+ int64_t i, month;
107
+ int64_t year, days = 0;
108
+ int64_t* month_lengths;
109
+
110
+ year = date_year - 1970;
111
+ days = year * 365;
112
+
113
+ // Adjust for leap years
114
+ if (days >= 0) {
115
+ // 1968 is the closest leap year before 1970.
116
+ // Exclude the current year, so add 1.
117
+ year += 1;
118
+ // Add one day for each 4 years
119
+ days += year / 4;
120
+ // 1900 is the closest previous year divisible by 100
121
+ year += 68;
122
+ // Subtract one day for each 100 years
123
+ days -= year / 100;
124
+ // 1600 is the closest previous year divisible by 400
125
+ year += 300;
126
+ // Add one day for each 400 years
127
+ days += year / 400;
128
+ } else {
129
+ // 1972 is the closest later year after 1970.
130
+ // Include the current year, so subtract 2.
131
+ year -= 2;
132
+ // Subtract one day for each 4 years
133
+ days += year / 4;
134
+ // 2000 is the closest later year divisible by 100
135
+ year -= 28;
136
+ // Add one day for each 100 years
137
+ days -= year / 100;
138
+ // 2000 is also the closest later year divisible by 400
139
+ // Subtract one day for each 400 years
140
+ days += year / 400;
141
+ }
142
+
143
+ month_lengths = _days_per_month_table[is_leapyear(date_year)];
144
+ month = date_month - 1;
145
+
146
+ // Add the months
147
+ for (i = 0; i < month; ++i) {
148
+ days += month_lengths[i];
149
+ }
150
+
151
+ // Add the days
152
+ days += date_day - 1;
153
+
154
+ return days;
155
+ }
156
+
157
+ // Modifies '*days_' to be the day offset within the year,
158
+ // and returns the year.
159
+ static int64_t days_to_yearsdays(int64_t* days_) {
160
+ const int64_t days_per_400years = (400 * 365 + 100 - 4 + 1);
161
+ // Adjust so it's relative to the year 2000 (divisible by 400)
162
+ int64_t days = (*days_) - (365 * 30 + 7);
163
+ int64_t year;
164
+
165
+ // Break down the 400 year cycle to get the year and day within the year
166
+ if (days >= 0) {
167
+ year = 400 * (days / days_per_400years);
168
+ days = days % days_per_400years;
169
+ } else {
170
+ year = 400 * ((days - (days_per_400years - 1)) / days_per_400years);
171
+ days = days % days_per_400years;
172
+ if (days < 0) {
173
+ days += days_per_400years;
174
+ }
175
+ }
176
+
177
+ // Work out the year/day within the 400 year cycle
178
+ if (days >= 366) {
179
+ year += 100 * ((days - 1) / (100 * 365 + 25 - 1));
180
+ days = (days - 1) % (100 * 365 + 25 - 1);
181
+ if (days >= 365) {
182
+ year += 4 * ((days + 1) / (4 * 365 + 1));
183
+ days = (days + 1) % (4 * 365 + 1);
184
+ if (days >= 366) {
185
+ year += (days - 1) / 365;
186
+ days = (days - 1) % 365;
187
+ }
188
+ }
189
+ }
190
+
191
+ *days_ = days;
192
+ return year + 2000;
193
+ }
194
+
195
+ // Extracts the month and year and day number from a number of days
196
+ static void get_date_from_days(int64_t days, int64_t* date_year, int64_t* date_month,
197
+ int64_t* date_day) {
198
+ int64_t *month_lengths, i;
199
+
200
+ *date_year = days_to_yearsdays(&days);
201
+ month_lengths = _days_per_month_table[is_leapyear(*date_year)];
202
+
203
+ for (i = 0; i < 12; ++i) {
204
+ if (days < month_lengths[i]) {
205
+ *date_month = i + 1;
206
+ *date_day = days + 1;
207
+ return;
208
+ } else {
209
+ days -= month_lengths[i];
210
+ }
211
+ }
212
+
213
+ // Should never get here
214
+ return;
215
+ }
216
+
217
+ // Splitting time quantities, for example splitting total seconds into
218
+ // minutes and remaining seconds. After we run
219
+ // int64_t remaining = split_time(total, quotient, &next)
220
+ // we have
221
+ // total = next * quotient + remaining. Handles negative values by propagating
222
+ // them: If total is negative, next will be negative and remaining will
223
+ // always be non-negative.
224
+ static inline int64_t split_time(int64_t total, int64_t quotient, int64_t* next) {
225
+ int64_t r = total % quotient;
226
+ if (r < 0) {
227
+ *next = total / quotient - 1;
228
+ return r + quotient;
229
+ } else {
230
+ *next = total / quotient;
231
+ return r;
232
+ }
233
+ }
234
+
235
+ static inline Status PyTime_convert_int(int64_t val, const TimeUnit::type unit,
236
+ int64_t* hour, int64_t* minute, int64_t* second,
237
+ int64_t* microsecond) {
238
+ switch (unit) {
239
+ case TimeUnit::NANO:
240
+ if (val % 1000 != 0) {
241
+ return Status::Invalid("Value ", val, " has non-zero nanoseconds");
242
+ }
243
+ val /= 1000;
244
+ // fall through
245
+ case TimeUnit::MICRO:
246
+ *microsecond = split_time(val, 1000000LL, &val);
247
+ *second = split_time(val, 60, &val);
248
+ *minute = split_time(val, 60, hour);
249
+ break;
250
+ case TimeUnit::MILLI:
251
+ *microsecond = split_time(val, 1000, &val) * 1000;
252
+ // fall through
253
+ case TimeUnit::SECOND:
254
+ *second = split_time(val, 60, &val);
255
+ *minute = split_time(val, 60, hour);
256
+ break;
257
+ default:
258
+ break;
259
+ }
260
+ return Status::OK();
261
+ }
262
+
263
+ static inline Status PyDate_convert_int(int64_t val, const DateUnit unit, int64_t* year,
264
+ int64_t* month, int64_t* day) {
265
+ switch (unit) {
266
+ case DateUnit::MILLI:
267
+ val /= 86400000LL; // fall through
268
+ case DateUnit::DAY:
269
+ get_date_from_days(val, year, month, day);
270
+ default:
271
+ break;
272
+ }
273
+ return Status::OK();
274
+ }
275
+
276
+ PyObject* NewMonthDayNanoTupleType() {
277
+ if (MonthDayNanoTupleType.tp_name == nullptr) {
278
+ if (PyStructSequence_InitType2(&MonthDayNanoTupleType, &MonthDayNanoTupleDesc) != 0) {
279
+ Py_FatalError("Could not initialize MonthDayNanoTuple");
280
+ }
281
+ }
282
+ Py_INCREF(&MonthDayNanoTupleType);
283
+ return (PyObject*)&MonthDayNanoTupleType;
284
+ }
285
+
286
+ Status PyTime_from_int(int64_t val, const TimeUnit::type unit, PyObject** out) {
287
+ int64_t hour = 0, minute = 0, second = 0, microsecond = 0;
288
+ RETURN_NOT_OK(PyTime_convert_int(val, unit, &hour, &minute, &second, &microsecond));
289
+ *out = PyTime_FromTime(static_cast<int32_t>(hour), static_cast<int32_t>(minute),
290
+ static_cast<int32_t>(second), static_cast<int32_t>(microsecond));
291
+ return Status::OK();
292
+ }
293
+
294
+ Status PyDate_from_int(int64_t val, const DateUnit unit, PyObject** out) {
295
+ int64_t year = 0, month = 0, day = 0;
296
+ RETURN_NOT_OK(PyDate_convert_int(val, unit, &year, &month, &day));
297
+ *out = PyDate_FromDate(static_cast<int32_t>(year), static_cast<int32_t>(month),
298
+ static_cast<int32_t>(day));
299
+ return Status::OK();
300
+ }
301
+
302
+ Status PyDateTime_from_int(int64_t val, const TimeUnit::type unit, PyObject** out) {
303
+ int64_t hour = 0, minute = 0, second = 0, microsecond = 0;
304
+ RETURN_NOT_OK(PyTime_convert_int(val, unit, &hour, &minute, &second, &microsecond));
305
+ int64_t total_days = 0;
306
+ hour = split_time(hour, 24, &total_days);
307
+ int64_t year = 0, month = 0, day = 0;
308
+ get_date_from_days(total_days, &year, &month, &day);
309
+ *out = PyDateTime_FromDateAndTime(
310
+ static_cast<int32_t>(year), static_cast<int32_t>(month), static_cast<int32_t>(day),
311
+ static_cast<int32_t>(hour), static_cast<int32_t>(minute),
312
+ static_cast<int32_t>(second), static_cast<int32_t>(microsecond));
313
+ return Status::OK();
314
+ }
315
+
316
+ int64_t PyDate_to_days(PyDateTime_Date* pydate) {
317
+ return get_days_from_date(PyDateTime_GET_YEAR(pydate), PyDateTime_GET_MONTH(pydate),
318
+ PyDateTime_GET_DAY(pydate));
319
+ }
320
+
321
+ Result<int64_t> PyDateTime_utcoffset_s(PyObject* obj) {
322
+ // calculate offset from UTC timezone in seconds
323
+ // supports only PyDateTime_DateTime and PyDateTime_Time objects
324
+ OwnedRef pyoffset(PyObject_CallMethod(obj, "utcoffset", NULL));
325
+ RETURN_IF_PYERROR();
326
+ if (pyoffset.obj() != nullptr && pyoffset.obj() != Py_None) {
327
+ auto delta = reinterpret_cast<PyDateTime_Delta*>(pyoffset.obj());
328
+ return internal::PyDelta_to_s(delta);
329
+ } else {
330
+ return 0;
331
+ }
332
+ }
333
+
334
+ Result<std::string> PyTZInfo_utcoffset_hhmm(PyObject* pytzinfo) {
335
+ // attempt to convert timezone offset objects to "+/-{hh}:{mm}" format
336
+ OwnedRef pydelta_object(PyObject_CallMethod(pytzinfo, "utcoffset", "O", Py_None));
337
+ RETURN_IF_PYERROR();
338
+
339
+ if (!PyDelta_Check(pydelta_object.obj())) {
340
+ return Status::Invalid(
341
+ "Object returned by tzinfo.utcoffset(None) is not an instance of "
342
+ "datetime.timedelta");
343
+ }
344
+ auto pydelta = reinterpret_cast<PyDateTime_Delta*>(pydelta_object.obj());
345
+
346
+ // retrieve the offset as seconds
347
+ auto total_seconds = internal::PyDelta_to_s(pydelta);
348
+
349
+ // determine whether the offset is positive or negative
350
+ auto sign = (total_seconds < 0) ? "-" : "+";
351
+ total_seconds = abs(total_seconds);
352
+
353
+ // calculate offset components
354
+ int64_t hours, minutes, seconds;
355
+ seconds = split_time(total_seconds, 60, &minutes);
356
+ minutes = split_time(minutes, 60, &hours);
357
+ if (seconds > 0) {
358
+ // check there are no remaining seconds
359
+ return Status::Invalid("Offset must represent whole number of minutes");
360
+ }
361
+
362
+ // construct the timezone string
363
+ std::stringstream stream;
364
+ stream << sign << std::setfill('0') << std::setw(2) << hours << ":" << std::setfill('0')
365
+ << std::setw(2) << minutes;
366
+ return stream.str();
367
+ }
368
+
369
+ // Converted from python. See https://github.com/apache/arrow/pull/7604
370
+ // for details.
371
+ Result<PyObject*> StringToTzinfo(const std::string& tz) {
372
+ std::string_view sign_str, hour_str, minute_str;
373
+ OwnedRef pytz;
374
+ OwnedRef zoneinfo;
375
+ OwnedRef datetime;
376
+
377
+ if (internal::ImportModule("pytz", &pytz).ok()) {
378
+ if (MatchFixedOffset(tz, &sign_str, &hour_str, &minute_str)) {
379
+ int sign = -1;
380
+ if (sign_str == "+") {
381
+ sign = 1;
382
+ }
383
+ OwnedRef fixed_offset;
384
+ RETURN_NOT_OK(internal::ImportFromModule(pytz.obj(), "FixedOffset", &fixed_offset));
385
+ uint32_t minutes, hours;
386
+ if (!::arrow::internal::ParseUnsigned(hour_str.data(), hour_str.size(), &hours) ||
387
+ !::arrow::internal::ParseUnsigned(minute_str.data(), minute_str.size(),
388
+ &minutes)) {
389
+ return Status::Invalid("Invalid timezone: ", tz);
390
+ }
391
+ OwnedRef total_minutes(PyLong_FromLong(
392
+ sign * ((static_cast<int>(hours) * 60) + static_cast<int>(minutes))));
393
+ RETURN_IF_PYERROR();
394
+ auto tzinfo =
395
+ PyObject_CallFunctionObjArgs(fixed_offset.obj(), total_minutes.obj(), NULL);
396
+ RETURN_IF_PYERROR();
397
+ return tzinfo;
398
+ }
399
+
400
+ OwnedRef timezone;
401
+ RETURN_NOT_OK(internal::ImportFromModule(pytz.obj(), "timezone", &timezone));
402
+ OwnedRef py_tz_string(
403
+ PyUnicode_FromStringAndSize(tz.c_str(), static_cast<Py_ssize_t>(tz.size())));
404
+ auto tzinfo = PyObject_CallFunctionObjArgs(timezone.obj(), py_tz_string.obj(), NULL);
405
+ RETURN_IF_PYERROR();
406
+ return tzinfo;
407
+ }
408
+
409
+ // catch fixed offset if pytz is not present
410
+ if (MatchFixedOffset(tz, &sign_str, &hour_str, &minute_str)) {
411
+ RETURN_NOT_OK(internal::ImportModule("datetime", &datetime));
412
+ int sign = -1;
413
+ if (sign_str == "+") {
414
+ sign = 1;
415
+ }
416
+
417
+ // import timezone and timedelta module to create a tzinfo object
418
+ OwnedRef class_timezone;
419
+ OwnedRef class_timedelta;
420
+ RETURN_NOT_OK(
421
+ internal::ImportFromModule(datetime.obj(), "timezone", &class_timezone));
422
+ RETURN_NOT_OK(
423
+ internal::ImportFromModule(datetime.obj(), "timedelta", &class_timedelta));
424
+
425
+ // check input
426
+ uint32_t minutes, hours;
427
+ if (!::arrow::internal::ParseUnsigned(hour_str.data(), hour_str.size(), &hours) ||
428
+ !::arrow::internal::ParseUnsigned(minute_str.data(), minute_str.size(),
429
+ &minutes)) {
430
+ return Status::Invalid("Invalid timezone: ", tz);
431
+ }
432
+
433
+ // save offset as a signed integer
434
+ OwnedRef total_minutes(PyLong_FromLong(
435
+ sign * ((static_cast<int>(hours) * 60) + static_cast<int>(minutes))));
436
+ // create zero integers for empty arguments in datetime.timedelta
437
+ OwnedRef zero(PyLong_FromLong(static_cast<int>(0)));
438
+
439
+ // call datetime.timedelta to get correct offset object for datetime.timezone
440
+ auto offset =
441
+ PyObject_CallFunctionObjArgs(class_timedelta.obj(), zero.obj(), zero.obj(),
442
+ zero.obj(), zero.obj(), total_minutes.obj(), NULL);
443
+ RETURN_IF_PYERROR();
444
+ // call datetime.timezone
445
+ auto tzinfo = PyObject_CallFunctionObjArgs(class_timezone.obj(), offset, NULL);
446
+ RETURN_IF_PYERROR();
447
+ return tzinfo;
448
+ }
449
+
450
+ // fallback on zoneinfo if tz is string and pytz is not present
451
+ if (internal::ImportModule("zoneinfo", &zoneinfo).ok()) {
452
+ OwnedRef class_zoneinfo;
453
+ RETURN_NOT_OK(
454
+ internal::ImportFromModule(zoneinfo.obj(), "ZoneInfo", &class_zoneinfo));
455
+ OwnedRef py_tz_string(
456
+ PyUnicode_FromStringAndSize(tz.c_str(), static_cast<Py_ssize_t>(tz.size())));
457
+ auto tzinfo =
458
+ PyObject_CallFunctionObjArgs(class_zoneinfo.obj(), py_tz_string.obj(), NULL);
459
+ RETURN_IF_PYERROR();
460
+ return tzinfo;
461
+ }
462
+
463
+ return Status::Invalid(
464
+ "Pytz package or Python>=3.8 for zoneinfo module must be installed.");
465
+ }
466
+
467
+ Result<std::string> TzinfoToString(PyObject* tzinfo) {
468
+ OwnedRef module_pytz; // import pytz
469
+ OwnedRef module_datetime; // import datetime
470
+ OwnedRef module_zoneinfo; // import zoneinfo
471
+ OwnedRef module_dateutil; // import dateutil
472
+ OwnedRef class_timezone; // from datetime import timezone
473
+ OwnedRef class_fixedoffset; // from pytz import _FixedOffset
474
+ OwnedRef class_basetzinfo; // from pytz import BaseTzInfo
475
+ OwnedRef class_zoneinfo; // from zoneinfo import ZoneInfo
476
+ OwnedRef class_tzfile; // from zoneinfo import tzfile
477
+
478
+ // import necessary modules
479
+ RETURN_NOT_OK(internal::ImportModule("datetime", &module_datetime));
480
+ // import necessary classes
481
+ RETURN_NOT_OK(
482
+ internal::ImportFromModule(module_datetime.obj(), "timezone", &class_timezone));
483
+
484
+ // check that it's a valid tzinfo object
485
+ if (!PyTZInfo_Check(tzinfo)) {
486
+ return Status::TypeError("Not an instance of datetime.tzinfo");
487
+ }
488
+
489
+ // if tzinfo is an instance of datetime.timezone return the
490
+ // HH:MM offset string representation
491
+ if (PyObject_IsInstance(tzinfo, class_timezone.obj())) {
492
+ // still recognize datetime.timezone.utc as UTC (instead of +00:00)
493
+ OwnedRef tzname_object(PyObject_CallMethod(tzinfo, "tzname", "O", Py_None));
494
+ RETURN_IF_PYERROR();
495
+ if (PyUnicode_Check(tzname_object.obj())) {
496
+ std::string result;
497
+ RETURN_NOT_OK(internal::PyUnicode_AsStdString(tzname_object.obj(), &result));
498
+ if (result == "UTC") {
499
+ return result;
500
+ }
501
+ }
502
+ return PyTZInfo_utcoffset_hhmm(tzinfo);
503
+ }
504
+
505
+ // Try to import pytz if it is available
506
+ if (internal::ImportModule("pytz", &module_pytz).ok()) {
507
+ RETURN_NOT_OK(internal::ImportFromModule(module_pytz.obj(), "_FixedOffset",
508
+ &class_fixedoffset));
509
+ RETURN_NOT_OK(
510
+ internal::ImportFromModule(module_pytz.obj(), "BaseTzInfo", &class_basetzinfo));
511
+ }
512
+
513
+ // if tzinfo is an instance of pytz._FixedOffset return the
514
+ // HH:MM offset string representation
515
+ if (module_pytz.obj() != nullptr &&
516
+ PyObject_IsInstance(tzinfo, class_fixedoffset.obj())) {
517
+ OwnedRef tzname_object(PyObject_CallMethod(tzinfo, "tzname", "O", Py_None));
518
+ RETURN_IF_PYERROR();
519
+ return PyTZInfo_utcoffset_hhmm(tzinfo);
520
+ }
521
+
522
+ // if pytz is installed and tzinfo is and instance of pytz.BaseTzInfo
523
+ if (module_pytz.obj() != nullptr &&
524
+ PyObject_IsInstance(tzinfo, class_basetzinfo.obj())) {
525
+ OwnedRef zone(PyObject_GetAttrString(tzinfo, "zone"));
526
+ RETURN_IF_PYERROR();
527
+ std::string result;
528
+ RETURN_NOT_OK(internal::PyUnicode_AsStdString(zone.obj(), &result));
529
+ return result;
530
+ }
531
+
532
+ // Try to import zoneinfo if it is available
533
+ if (internal::ImportModule("zoneinfo", &module_zoneinfo).ok()) {
534
+ RETURN_NOT_OK(
535
+ internal::ImportFromModule(module_zoneinfo.obj(), "ZoneInfo", &class_zoneinfo));
536
+ }
537
+
538
+ // if zoneinfo is installed and tzinfo is an instance of zoneinfo.ZoneInfo
539
+ if (module_zoneinfo.obj() != nullptr &&
540
+ PyObject_IsInstance(tzinfo, class_zoneinfo.obj())) {
541
+ OwnedRef key(PyObject_GetAttrString(tzinfo, "key"));
542
+ RETURN_IF_PYERROR();
543
+ std::string result;
544
+ RETURN_NOT_OK(internal::PyUnicode_AsStdString(key.obj(), &result));
545
+ return result;
546
+ }
547
+
548
+ // Try to import dateutil if it is available
549
+ if (internal::ImportModule("dateutil.tz", &module_dateutil).ok()) {
550
+ RETURN_NOT_OK(
551
+ internal::ImportFromModule(module_dateutil.obj(), "tzfile", &class_tzfile));
552
+ }
553
+
554
+ // if dateutil is installed and tzinfo is an instance of dateutil.tz.tzfile
555
+ if (module_dateutil.obj() != nullptr &&
556
+ PyObject_IsInstance(tzinfo, class_tzfile.obj())) {
557
+ OwnedRef _filename(PyObject_GetAttrString(tzinfo, "_filename"));
558
+ RETURN_IF_PYERROR();
559
+ std::string result;
560
+ RETURN_NOT_OK(internal::PyUnicode_AsStdString(_filename.obj(), &result));
561
+ // _filename returns a full path in general ('/usr/share/zoneinfo/Europe/Paris')
562
+ // or POSIX name on Windows ('Europe/Paris') - we need a substring in first case
563
+ std::size_t pos = result.find("zoneinfo/");
564
+ if (pos != std::string::npos) {
565
+ return result.substr(pos + 9);
566
+ }
567
+ return result;
568
+ }
569
+
570
+ // attempt to call tzinfo.tzname(None)
571
+ OwnedRef tzname_object(PyObject_CallMethod(tzinfo, "tzname", "O", Py_None));
572
+ RETURN_IF_PYERROR();
573
+ if (PyUnicode_Check(tzname_object.obj())) {
574
+ std::string result;
575
+ RETURN_NOT_OK(internal::PyUnicode_AsStdString(tzname_object.obj(), &result));
576
+ return result;
577
+ }
578
+
579
+ // fall back to HH:MM offset string representation based on tzinfo.utcoffset(None)
580
+ return PyTZInfo_utcoffset_hhmm(tzinfo);
581
+ }
582
+
583
+ PyObject* MonthDayNanoIntervalToNamedTuple(
584
+ const MonthDayNanoIntervalType::MonthDayNanos& interval) {
585
+ OwnedRef tuple(PyStructSequence_New(&MonthDayNanoTupleType));
586
+ if (ARROW_PREDICT_FALSE(tuple.obj() == nullptr)) {
587
+ return nullptr;
588
+ }
589
+ PyStructSequence_SetItem(tuple.obj(), /*pos=*/0, PyLong_FromLong(interval.months));
590
+ PyStructSequence_SetItem(tuple.obj(), /*pos=*/1, PyLong_FromLong(interval.days));
591
+ PyStructSequence_SetItem(tuple.obj(), /*pos=*/2,
592
+ PyLong_FromLongLong(interval.nanoseconds));
593
+ return tuple.detach();
594
+ }
595
+
596
+ namespace {
597
+
598
+ // Wrapper around a Python list object that mimics dereference and assignment
599
+ // operations.
600
+ struct PyListAssigner {
601
+ public:
602
+ explicit PyListAssigner(PyObject* list) : list_(list) { DCHECK(PyList_Check(list_)); }
603
+
604
+ PyListAssigner& operator*() { return *this; }
605
+
606
+ void operator=(PyObject* obj) {
607
+ if (ARROW_PREDICT_FALSE(PyList_SetItem(list_, current_index_, obj) == -1)) {
608
+ Py_FatalError("list did not have the correct preallocated size.");
609
+ }
610
+ }
611
+
612
+ PyListAssigner& operator++() {
613
+ current_index_++;
614
+ return *this;
615
+ }
616
+
617
+ PyListAssigner& operator+=(int64_t offset) {
618
+ current_index_ += offset;
619
+ return *this;
620
+ }
621
+
622
+ private:
623
+ PyObject* list_;
624
+ int64_t current_index_ = 0;
625
+ };
626
+
627
+ } // namespace
628
+
629
+ Result<PyObject*> MonthDayNanoIntervalArrayToPyList(
630
+ const MonthDayNanoIntervalArray& array) {
631
+ OwnedRef out_list(PyList_New(array.length()));
632
+ RETURN_IF_PYERROR();
633
+ PyListAssigner out_objects(out_list.obj());
634
+ auto& interval_array =
635
+ arrow::internal::checked_cast<const MonthDayNanoIntervalArray&>(array);
636
+ RETURN_NOT_OK(internal::WriteArrayObjects(
637
+ interval_array,
638
+ [&](const MonthDayNanoIntervalType::MonthDayNanos& interval, PyListAssigner& out) {
639
+ PyObject* tuple = internal::MonthDayNanoIntervalToNamedTuple(interval);
640
+ if (ARROW_PREDICT_FALSE(tuple == nullptr)) {
641
+ RETURN_IF_PYERROR();
642
+ }
643
+
644
+ *out = tuple;
645
+ return Status::OK();
646
+ },
647
+ out_objects));
648
+ return out_list.detach();
649
+ }
650
+
651
+ Result<PyObject*> MonthDayNanoIntervalScalarToPyObject(
652
+ const MonthDayNanoIntervalScalar& scalar) {
653
+ if (scalar.is_valid) {
654
+ return internal::MonthDayNanoIntervalToNamedTuple(scalar.value);
655
+ } else {
656
+ Py_INCREF(Py_None);
657
+ return Py_None;
658
+ }
659
+ }
660
+
661
+ } // namespace internal
662
+ } // namespace py
663
+ } // namespace arrow
llmeval-env/lib/python3.10/site-packages/pyarrow/src/arrow/python/datetime.h ADDED
@@ -0,0 +1,231 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <algorithm>
21
+ #include <chrono>
22
+
23
+ #include "arrow/python/platform.h"
24
+ #include "arrow/python/visibility.h"
25
+ #include "arrow/result.h"
26
+ #include "arrow/status.h"
27
+ #include "arrow/type.h"
28
+ #include "arrow/type_fwd.h"
29
+ #include "arrow/util/int_util_overflow.h"
30
+ #include "arrow/util/logging.h"
31
+
32
+ // By default, PyDateTimeAPI is a *static* variable. This forces
33
+ // PyDateTime_IMPORT to be called in every C/C++ module using the
34
+ // C datetime API. This is error-prone and potentially costly.
35
+ // Instead, we redefine PyDateTimeAPI to point to a global variable,
36
+ // which is initialized once by calling InitDatetime().
37
+ #ifdef PYPY_VERSION
38
+ #include "datetime.h"
39
+ #else
40
+ #define PyDateTimeAPI ::arrow::py::internal::datetime_api
41
+ #endif
42
+
43
+ namespace arrow {
44
+ using internal::AddWithOverflow;
45
+ using internal::MultiplyWithOverflow;
46
+ namespace py {
47
+ namespace internal {
48
+
49
+ #ifndef PYPY_VERSION
50
+ extern PyDateTime_CAPI* datetime_api;
51
+
52
+ ARROW_PYTHON_EXPORT
53
+ void InitDatetime();
54
+ #endif
55
+
56
+ // Returns the MonthDayNano namedtuple type (increments the reference count).
57
+ ARROW_PYTHON_EXPORT
58
+ PyObject* NewMonthDayNanoTupleType();
59
+
60
+ ARROW_PYTHON_EXPORT
61
+ inline int64_t PyTime_to_us(PyObject* pytime) {
62
+ return (PyDateTime_TIME_GET_HOUR(pytime) * 3600000000LL +
63
+ PyDateTime_TIME_GET_MINUTE(pytime) * 60000000LL +
64
+ PyDateTime_TIME_GET_SECOND(pytime) * 1000000LL +
65
+ PyDateTime_TIME_GET_MICROSECOND(pytime));
66
+ }
67
+
68
+ ARROW_PYTHON_EXPORT
69
+ inline int64_t PyTime_to_s(PyObject* pytime) { return PyTime_to_us(pytime) / 1000000; }
70
+
71
+ ARROW_PYTHON_EXPORT
72
+ inline int64_t PyTime_to_ms(PyObject* pytime) { return PyTime_to_us(pytime) / 1000; }
73
+
74
+ ARROW_PYTHON_EXPORT
75
+ inline int64_t PyTime_to_ns(PyObject* pytime) { return PyTime_to_us(pytime) * 1000; }
76
+
77
+ ARROW_PYTHON_EXPORT
78
+ Status PyTime_from_int(int64_t val, const TimeUnit::type unit, PyObject** out);
79
+
80
+ ARROW_PYTHON_EXPORT
81
+ Status PyDate_from_int(int64_t val, const DateUnit unit, PyObject** out);
82
+
83
+ // WARNING: This function returns a naive datetime.
84
+ ARROW_PYTHON_EXPORT
85
+ Status PyDateTime_from_int(int64_t val, const TimeUnit::type unit, PyObject** out);
86
+
87
+ // This declaration must be the same as in filesystem/filesystem.h
88
+ using TimePoint =
89
+ std::chrono::time_point<std::chrono::system_clock, std::chrono::nanoseconds>;
90
+
91
+ ARROW_PYTHON_EXPORT
92
+ int64_t PyDate_to_days(PyDateTime_Date* pydate);
93
+
94
+ ARROW_PYTHON_EXPORT
95
+ inline int64_t PyDate_to_s(PyDateTime_Date* pydate) {
96
+ return PyDate_to_days(pydate) * 86400LL;
97
+ }
98
+
99
+ ARROW_PYTHON_EXPORT
100
+ inline int64_t PyDate_to_ms(PyDateTime_Date* pydate) {
101
+ return PyDate_to_days(pydate) * 86400000LL;
102
+ }
103
+
104
+ ARROW_PYTHON_EXPORT
105
+ inline int64_t PyDateTime_to_s(PyDateTime_DateTime* pydatetime) {
106
+ return (PyDate_to_s(reinterpret_cast<PyDateTime_Date*>(pydatetime)) +
107
+ PyDateTime_DATE_GET_HOUR(pydatetime) * 3600LL +
108
+ PyDateTime_DATE_GET_MINUTE(pydatetime) * 60LL +
109
+ PyDateTime_DATE_GET_SECOND(pydatetime));
110
+ }
111
+
112
+ ARROW_PYTHON_EXPORT
113
+ inline int64_t PyDateTime_to_ms(PyDateTime_DateTime* pydatetime) {
114
+ return (PyDateTime_to_s(pydatetime) * 1000LL +
115
+ PyDateTime_DATE_GET_MICROSECOND(pydatetime) / 1000);
116
+ }
117
+
118
+ ARROW_PYTHON_EXPORT
119
+ inline int64_t PyDateTime_to_us(PyDateTime_DateTime* pydatetime) {
120
+ return (PyDateTime_to_s(pydatetime) * 1000000LL +
121
+ PyDateTime_DATE_GET_MICROSECOND(pydatetime));
122
+ }
123
+
124
+ ARROW_PYTHON_EXPORT
125
+ inline int64_t PyDateTime_to_ns(PyDateTime_DateTime* pydatetime) {
126
+ return PyDateTime_to_us(pydatetime) * 1000LL;
127
+ }
128
+
129
+ ARROW_PYTHON_EXPORT
130
+ inline TimePoint PyDateTime_to_TimePoint(PyDateTime_DateTime* pydatetime) {
131
+ return TimePoint(TimePoint::duration(PyDateTime_to_ns(pydatetime)));
132
+ }
133
+
134
+ ARROW_PYTHON_EXPORT
135
+ inline int64_t TimePoint_to_ns(TimePoint val) { return val.time_since_epoch().count(); }
136
+
137
+ ARROW_PYTHON_EXPORT
138
+ inline TimePoint TimePoint_from_s(double val) {
139
+ return TimePoint(TimePoint::duration(static_cast<int64_t>(1e9 * val)));
140
+ }
141
+
142
+ ARROW_PYTHON_EXPORT
143
+ inline TimePoint TimePoint_from_ns(int64_t val) {
144
+ return TimePoint(TimePoint::duration(val));
145
+ }
146
+
147
+ ARROW_PYTHON_EXPORT
148
+ inline int64_t PyDelta_to_s(PyDateTime_Delta* pytimedelta) {
149
+ return (PyDateTime_DELTA_GET_DAYS(pytimedelta) * 86400LL +
150
+ PyDateTime_DELTA_GET_SECONDS(pytimedelta));
151
+ }
152
+
153
+ ARROW_PYTHON_EXPORT
154
+ inline int64_t PyDelta_to_ms(PyDateTime_Delta* pytimedelta) {
155
+ return (PyDelta_to_s(pytimedelta) * 1000LL +
156
+ PyDateTime_DELTA_GET_MICROSECONDS(pytimedelta) / 1000);
157
+ }
158
+
159
+ ARROW_PYTHON_EXPORT
160
+ inline Result<int64_t> PyDelta_to_us(PyDateTime_Delta* pytimedelta) {
161
+ int64_t result = PyDelta_to_s(pytimedelta);
162
+ if (MultiplyWithOverflow(result, 1000000LL, &result)) {
163
+ return Status::Invalid("Timedelta too large to fit in 64-bit integer");
164
+ }
165
+ if (AddWithOverflow(result, PyDateTime_DELTA_GET_MICROSECONDS(pytimedelta), &result)) {
166
+ return Status::Invalid("Timedelta too large to fit in 64-bit integer");
167
+ }
168
+ return result;
169
+ }
170
+
171
+ ARROW_PYTHON_EXPORT
172
+ inline Result<int64_t> PyDelta_to_ns(PyDateTime_Delta* pytimedelta) {
173
+ ARROW_ASSIGN_OR_RAISE(int64_t result, PyDelta_to_us(pytimedelta));
174
+ if (MultiplyWithOverflow(result, 1000LL, &result)) {
175
+ return Status::Invalid("Timedelta too large to fit in 64-bit integer");
176
+ }
177
+ return result;
178
+ }
179
+
180
+ ARROW_PYTHON_EXPORT
181
+ Result<int64_t> PyDateTime_utcoffset_s(PyObject* pydatetime);
182
+
183
+ /// \brief Convert a time zone name into a time zone object.
184
+ ///
185
+ /// Supported input strings are:
186
+ /// * As used in the Olson time zone database (the "tz database" or
187
+ /// "tzdata"), such as "America/New_York"
188
+ /// * An absolute time zone offset of the form +XX:XX or -XX:XX, such as +07:30
189
+ /// GIL must be held when calling this method.
190
+ ARROW_PYTHON_EXPORT
191
+ Result<PyObject*> StringToTzinfo(const std::string& tz);
192
+
193
+ /// \brief Convert a time zone object to a string representation.
194
+ ///
195
+ /// The output strings are:
196
+ /// * An absolute time zone offset of the form +XX:XX or -XX:XX, such as +07:30
197
+ /// if the input object is either an instance of pytz._FixedOffset or
198
+ /// datetime.timedelta
199
+ /// * The timezone's name if the input object's tzname() method returns with a
200
+ /// non-empty timezone name such as "UTC" or "America/New_York"
201
+ ///
202
+ /// GIL must be held when calling this method.
203
+ ARROW_PYTHON_EXPORT
204
+ Result<std::string> TzinfoToString(PyObject* pytzinfo);
205
+
206
+ /// \brief Convert MonthDayNano to a python namedtuple.
207
+ ///
208
+ /// Return a named tuple (pyarrow.MonthDayNano) containing attributes
209
+ /// "months", "days", "nanoseconds" in the given order
210
+ /// with values extracted from the fields on interval.
211
+ ///
212
+ /// GIL must be held when calling this method.
213
+ ARROW_PYTHON_EXPORT
214
+ PyObject* MonthDayNanoIntervalToNamedTuple(
215
+ const MonthDayNanoIntervalType::MonthDayNanos& interval);
216
+
217
+ /// \brief Convert the given Array to a PyList object containing
218
+ /// pyarrow.MonthDayNano objects.
219
+ ARROW_PYTHON_EXPORT
220
+ Result<PyObject*> MonthDayNanoIntervalArrayToPyList(
221
+ const MonthDayNanoIntervalArray& array);
222
+
223
+ /// \brief Convert the Scalar object to a pyarrow.MonthDayNano (or None if
224
+ /// is isn't valid).
225
+ ARROW_PYTHON_EXPORT
226
+ Result<PyObject*> MonthDayNanoIntervalScalarToPyObject(
227
+ const MonthDayNanoIntervalScalar& scalar);
228
+
229
+ } // namespace internal
230
+ } // namespace py
231
+ } // namespace arrow
llmeval-env/lib/python3.10/site-packages/pyarrow/src/arrow/python/decimal.cc ADDED
@@ -0,0 +1,246 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #include <algorithm>
19
+ #include <limits>
20
+
21
+ #include "arrow/python/common.h"
22
+ #include "arrow/python/decimal.h"
23
+ #include "arrow/python/helpers.h"
24
+ #include "arrow/type_fwd.h"
25
+ #include "arrow/util/decimal.h"
26
+ #include "arrow/util/logging.h"
27
+
28
+ namespace arrow {
29
+ namespace py {
30
+ namespace internal {
31
+
32
+ Status ImportDecimalType(OwnedRef* decimal_type) {
33
+ OwnedRef decimal_module;
34
+ RETURN_NOT_OK(ImportModule("decimal", &decimal_module));
35
+ RETURN_NOT_OK(ImportFromModule(decimal_module.obj(), "Decimal", decimal_type));
36
+ return Status::OK();
37
+ }
38
+
39
+ Status PythonDecimalToString(PyObject* python_decimal, std::string* out) {
40
+ // Call Python's str(decimal_object)
41
+ return PyObject_StdStringStr(python_decimal, out);
42
+ }
43
+
44
+ // \brief Infer the precision and scale of a Python decimal.Decimal instance
45
+ // \param python_decimal[in] An instance of decimal.Decimal
46
+ // \param precision[out] The value of the inferred precision
47
+ // \param scale[out] The value of the inferred scale
48
+ // \return The status of the operation
49
+ static Status InferDecimalPrecisionAndScale(PyObject* python_decimal, int32_t* precision,
50
+ int32_t* scale) {
51
+ DCHECK_NE(python_decimal, NULLPTR);
52
+ DCHECK_NE(precision, NULLPTR);
53
+ DCHECK_NE(scale, NULLPTR);
54
+
55
+ // TODO(phillipc): Make sure we perform PyDecimal_Check(python_decimal) as a DCHECK
56
+ OwnedRef as_tuple(PyObject_CallMethod(python_decimal, const_cast<char*>("as_tuple"),
57
+ const_cast<char*>("")));
58
+ RETURN_IF_PYERROR();
59
+ DCHECK(PyTuple_Check(as_tuple.obj()));
60
+
61
+ OwnedRef digits(PyObject_GetAttrString(as_tuple.obj(), "digits"));
62
+ RETURN_IF_PYERROR();
63
+ DCHECK(PyTuple_Check(digits.obj()));
64
+
65
+ const auto num_digits = static_cast<int32_t>(PyTuple_Size(digits.obj()));
66
+ RETURN_IF_PYERROR();
67
+
68
+ OwnedRef py_exponent(PyObject_GetAttrString(as_tuple.obj(), "exponent"));
69
+ RETURN_IF_PYERROR();
70
+ DCHECK(IsPyInteger(py_exponent.obj()));
71
+
72
+ const auto exponent = static_cast<int32_t>(PyLong_AsLong(py_exponent.obj()));
73
+ RETURN_IF_PYERROR();
74
+
75
+ if (exponent < 0) {
76
+ // If exponent > num_digits, we have a number with leading zeros
77
+ // such as 0.01234. Ensure we have enough precision for leading zeros
78
+ // (which are not included in num_digits).
79
+ *precision = std::max(num_digits, -exponent);
80
+ *scale = -exponent;
81
+ } else {
82
+ // Trailing zeros are not included in num_digits, need to add to precision.
83
+ // Note we don't generate negative scales as they are poorly supported
84
+ // in non-Arrow systems.
85
+ *precision = num_digits + exponent;
86
+ *scale = 0;
87
+ }
88
+ return Status::OK();
89
+ }
90
+
91
+ PyObject* DecimalFromString(PyObject* decimal_constructor,
92
+ const std::string& decimal_string) {
93
+ DCHECK_NE(decimal_constructor, nullptr);
94
+
95
+ auto string_size = decimal_string.size();
96
+ DCHECK_GT(string_size, 0);
97
+
98
+ auto string_bytes = decimal_string.c_str();
99
+ DCHECK_NE(string_bytes, nullptr);
100
+
101
+ return PyObject_CallFunction(decimal_constructor, const_cast<char*>("s#"), string_bytes,
102
+ static_cast<Py_ssize_t>(string_size));
103
+ }
104
+
105
+ namespace {
106
+
107
+ template <typename ArrowDecimal>
108
+ Status DecimalFromStdString(const std::string& decimal_string,
109
+ const DecimalType& arrow_type, ArrowDecimal* out) {
110
+ int32_t inferred_precision;
111
+ int32_t inferred_scale;
112
+
113
+ RETURN_NOT_OK(ArrowDecimal::FromString(decimal_string, out, &inferred_precision,
114
+ &inferred_scale));
115
+
116
+ const int32_t precision = arrow_type.precision();
117
+ const int32_t scale = arrow_type.scale();
118
+
119
+ if (scale != inferred_scale) {
120
+ DCHECK_NE(out, NULLPTR);
121
+ ARROW_ASSIGN_OR_RAISE(*out, out->Rescale(inferred_scale, scale));
122
+ }
123
+
124
+ auto inferred_scale_delta = inferred_scale - scale;
125
+ if (ARROW_PREDICT_FALSE((inferred_precision - inferred_scale_delta) > precision)) {
126
+ return Status::Invalid(
127
+ "Decimal type with precision ", inferred_precision,
128
+ " does not fit into precision inferred from first array element: ", precision);
129
+ }
130
+
131
+ return Status::OK();
132
+ }
133
+
134
+ template <typename ArrowDecimal>
135
+ Status InternalDecimalFromPythonDecimal(PyObject* python_decimal,
136
+ const DecimalType& arrow_type,
137
+ ArrowDecimal* out) {
138
+ DCHECK_NE(python_decimal, NULLPTR);
139
+ DCHECK_NE(out, NULLPTR);
140
+
141
+ std::string string;
142
+ RETURN_NOT_OK(PythonDecimalToString(python_decimal, &string));
143
+ return DecimalFromStdString(string, arrow_type, out);
144
+ }
145
+
146
+ template <typename ArrowDecimal>
147
+ Status InternalDecimalFromPyObject(PyObject* obj, const DecimalType& arrow_type,
148
+ ArrowDecimal* out) {
149
+ DCHECK_NE(obj, NULLPTR);
150
+ DCHECK_NE(out, NULLPTR);
151
+
152
+ if (IsPyInteger(obj)) {
153
+ // TODO: add a fast path for small-ish ints
154
+ std::string string;
155
+ RETURN_NOT_OK(PyObject_StdStringStr(obj, &string));
156
+ return DecimalFromStdString(string, arrow_type, out);
157
+ } else if (PyDecimal_Check(obj)) {
158
+ return InternalDecimalFromPythonDecimal<ArrowDecimal>(obj, arrow_type, out);
159
+ } else {
160
+ return Status::TypeError("int or Decimal object expected, got ",
161
+ Py_TYPE(obj)->tp_name);
162
+ }
163
+ }
164
+
165
+ } // namespace
166
+
167
+ Status DecimalFromPythonDecimal(PyObject* python_decimal, const DecimalType& arrow_type,
168
+ Decimal128* out) {
169
+ return InternalDecimalFromPythonDecimal(python_decimal, arrow_type, out);
170
+ }
171
+
172
+ Status DecimalFromPyObject(PyObject* obj, const DecimalType& arrow_type,
173
+ Decimal128* out) {
174
+ return InternalDecimalFromPyObject(obj, arrow_type, out);
175
+ }
176
+
177
+ Status DecimalFromPythonDecimal(PyObject* python_decimal, const DecimalType& arrow_type,
178
+ Decimal256* out) {
179
+ return InternalDecimalFromPythonDecimal(python_decimal, arrow_type, out);
180
+ }
181
+
182
+ Status DecimalFromPyObject(PyObject* obj, const DecimalType& arrow_type,
183
+ Decimal256* out) {
184
+ return InternalDecimalFromPyObject(obj, arrow_type, out);
185
+ }
186
+
187
+ bool PyDecimal_Check(PyObject* obj) {
188
+ static OwnedRef decimal_type;
189
+ if (!decimal_type.obj()) {
190
+ ARROW_CHECK_OK(ImportDecimalType(&decimal_type));
191
+ DCHECK(PyType_Check(decimal_type.obj()));
192
+ }
193
+ // PyObject_IsInstance() is slower as it has to check for virtual subclasses
194
+ const int result =
195
+ PyType_IsSubtype(Py_TYPE(obj), reinterpret_cast<PyTypeObject*>(decimal_type.obj()));
196
+ ARROW_CHECK_NE(result, -1) << " error during PyType_IsSubtype check";
197
+ return result == 1;
198
+ }
199
+
200
+ bool PyDecimal_ISNAN(PyObject* obj) {
201
+ DCHECK(PyDecimal_Check(obj)) << "obj is not an instance of decimal.Decimal";
202
+ OwnedRef is_nan(
203
+ PyObject_CallMethod(obj, const_cast<char*>("is_nan"), const_cast<char*>("")));
204
+ return PyObject_IsTrue(is_nan.obj()) == 1;
205
+ }
206
+
207
+ DecimalMetadata::DecimalMetadata()
208
+ : DecimalMetadata(std::numeric_limits<int32_t>::min(),
209
+ std::numeric_limits<int32_t>::min()) {}
210
+
211
+ DecimalMetadata::DecimalMetadata(int32_t precision, int32_t scale)
212
+ : precision_(precision), scale_(scale) {}
213
+
214
+ Status DecimalMetadata::Update(int32_t suggested_precision, int32_t suggested_scale) {
215
+ const int32_t current_scale = scale_;
216
+ scale_ = std::max(current_scale, suggested_scale);
217
+
218
+ const int32_t current_precision = precision_;
219
+
220
+ if (current_precision == std::numeric_limits<int32_t>::min()) {
221
+ precision_ = suggested_precision;
222
+ } else {
223
+ auto num_digits = std::max(current_precision - current_scale,
224
+ suggested_precision - suggested_scale);
225
+ precision_ = std::max(num_digits + scale_, current_precision);
226
+ }
227
+
228
+ return Status::OK();
229
+ }
230
+
231
+ Status DecimalMetadata::Update(PyObject* object) {
232
+ bool is_decimal = PyDecimal_Check(object);
233
+
234
+ if (ARROW_PREDICT_FALSE(!is_decimal || PyDecimal_ISNAN(object))) {
235
+ return Status::OK();
236
+ }
237
+
238
+ int32_t precision = 0;
239
+ int32_t scale = 0;
240
+ RETURN_NOT_OK(InferDecimalPrecisionAndScale(object, &precision, &scale));
241
+ return Update(precision, scale);
242
+ }
243
+
244
+ } // namespace internal
245
+ } // namespace py
246
+ } // namespace arrow
llmeval-env/lib/python3.10/site-packages/pyarrow/src/arrow/python/decimal.h ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <string>
21
+
22
+ #include "arrow/python/visibility.h"
23
+ #include "arrow/type.h"
24
+
25
+ namespace arrow {
26
+
27
+ class Decimal128;
28
+ class Decimal256;
29
+
30
+ namespace py {
31
+
32
+ class OwnedRef;
33
+
34
+ //
35
+ // Python Decimal support
36
+ //
37
+
38
+ namespace internal {
39
+
40
+ // \brief Import the Python Decimal type
41
+ ARROW_PYTHON_EXPORT
42
+ Status ImportDecimalType(OwnedRef* decimal_type);
43
+
44
+ // \brief Convert a Python Decimal object to a C++ string
45
+ // \param[in] python_decimal A Python decimal.Decimal instance
46
+ // \param[out] The string representation of the Python Decimal instance
47
+ // \return The status of the operation
48
+ ARROW_PYTHON_EXPORT
49
+ Status PythonDecimalToString(PyObject* python_decimal, std::string* out);
50
+
51
+ // \brief Convert a C++ std::string to a Python Decimal instance
52
+ // \param[in] decimal_constructor The decimal type object
53
+ // \param[in] decimal_string A decimal string
54
+ // \return An instance of decimal.Decimal
55
+ ARROW_PYTHON_EXPORT
56
+ PyObject* DecimalFromString(PyObject* decimal_constructor,
57
+ const std::string& decimal_string);
58
+
59
+ // \brief Convert a Python decimal to an Arrow Decimal128 object
60
+ // \param[in] python_decimal A Python decimal.Decimal instance
61
+ // \param[in] arrow_type An instance of arrow::DecimalType
62
+ // \param[out] out A pointer to a Decimal128
63
+ // \return The status of the operation
64
+ ARROW_PYTHON_EXPORT
65
+ Status DecimalFromPythonDecimal(PyObject* python_decimal, const DecimalType& arrow_type,
66
+ Decimal128* out);
67
+
68
+ // \brief Convert a Python object to an Arrow Decimal128 object
69
+ // \param[in] python_decimal A Python int or decimal.Decimal instance
70
+ // \param[in] arrow_type An instance of arrow::DecimalType
71
+ // \param[out] out A pointer to a Decimal128
72
+ // \return The status of the operation
73
+ ARROW_PYTHON_EXPORT
74
+ Status DecimalFromPyObject(PyObject* obj, const DecimalType& arrow_type, Decimal128* out);
75
+
76
+ // \brief Convert a Python decimal to an Arrow Decimal256 object
77
+ // \param[in] python_decimal A Python decimal.Decimal instance
78
+ // \param[in] arrow_type An instance of arrow::DecimalType
79
+ // \param[out] out A pointer to a Decimal256
80
+ // \return The status of the operation
81
+ ARROW_PYTHON_EXPORT
82
+ Status DecimalFromPythonDecimal(PyObject* python_decimal, const DecimalType& arrow_type,
83
+ Decimal256* out);
84
+
85
+ // \brief Convert a Python object to an Arrow Decimal256 object
86
+ // \param[in] python_decimal A Python int or decimal.Decimal instance
87
+ // \param[in] arrow_type An instance of arrow::DecimalType
88
+ // \param[out] out A pointer to a Decimal256
89
+ // \return The status of the operation
90
+ ARROW_PYTHON_EXPORT
91
+ Status DecimalFromPyObject(PyObject* obj, const DecimalType& arrow_type, Decimal256* out);
92
+
93
+ // \brief Check whether obj is an instance of Decimal
94
+ ARROW_PYTHON_EXPORT
95
+ bool PyDecimal_Check(PyObject* obj);
96
+
97
+ // \brief Check whether obj is nan. This function will abort the program if the argument
98
+ // is not a Decimal instance
99
+ ARROW_PYTHON_EXPORT
100
+ bool PyDecimal_ISNAN(PyObject* obj);
101
+
102
+ // \brief Helper class to track and update the precision and scale of a decimal
103
+ class ARROW_PYTHON_EXPORT DecimalMetadata {
104
+ public:
105
+ DecimalMetadata();
106
+ DecimalMetadata(int32_t precision, int32_t scale);
107
+
108
+ // \brief Adjust the precision and scale of a decimal type given a new precision and a
109
+ // new scale \param[in] suggested_precision A candidate precision \param[in]
110
+ // suggested_scale A candidate scale \return The status of the operation
111
+ Status Update(int32_t suggested_precision, int32_t suggested_scale);
112
+
113
+ // \brief A convenient interface for updating the precision and scale based on a Python
114
+ // Decimal object \param object A Python Decimal object \return The status of the
115
+ // operation
116
+ Status Update(PyObject* object);
117
+
118
+ int32_t precision() const { return precision_; }
119
+ int32_t scale() const { return scale_; }
120
+
121
+ private:
122
+ int32_t precision_;
123
+ int32_t scale_;
124
+ };
125
+
126
+ } // namespace internal
127
+ } // namespace py
128
+ } // namespace arrow
llmeval-env/lib/python3.10/site-packages/pyarrow/src/arrow/python/deserialize.cc ADDED
@@ -0,0 +1,495 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #include "arrow/python/deserialize.h"
19
+
20
+ #include "arrow/python/numpy_interop.h"
21
+
22
+ #include <cstdint>
23
+ #include <memory>
24
+ #include <string>
25
+ #include <utility>
26
+ #include <vector>
27
+
28
+ #include <numpy/arrayobject.h>
29
+ #include <numpy/arrayscalars.h>
30
+
31
+ #include "arrow/array.h"
32
+ #include "arrow/io/interfaces.h"
33
+ #include "arrow/io/memory.h"
34
+ #include "arrow/ipc/options.h"
35
+ #include "arrow/ipc/reader.h"
36
+ #include "arrow/ipc/util.h"
37
+ #include "arrow/ipc/writer.h"
38
+ #include "arrow/table.h"
39
+ #include "arrow/util/checked_cast.h"
40
+ #include "arrow/util/logging.h"
41
+ #include "arrow/util/value_parsing.h"
42
+
43
+ #include "arrow/python/common.h"
44
+ #include "arrow/python/datetime.h"
45
+ #include "arrow/python/helpers.h"
46
+ #include "arrow/python/numpy_convert.h"
47
+ #include "arrow/python/pyarrow.h"
48
+ #include "arrow/python/serialize.h"
49
+
50
+ namespace arrow {
51
+
52
+ using internal::checked_cast;
53
+ using internal::ParseValue;
54
+
55
+ namespace py {
56
+
57
+ Status CallDeserializeCallback(PyObject* context, PyObject* value,
58
+ PyObject** deserialized_object);
59
+
60
+ Status DeserializeTuple(PyObject* context, const Array& array, int64_t start_idx,
61
+ int64_t stop_idx, PyObject* base, const SerializedPyObject& blobs,
62
+ PyObject** out);
63
+
64
+ Status DeserializeList(PyObject* context, const Array& array, int64_t start_idx,
65
+ int64_t stop_idx, PyObject* base, const SerializedPyObject& blobs,
66
+ PyObject** out);
67
+
68
+ Status DeserializeSet(PyObject* context, const Array& array, int64_t start_idx,
69
+ int64_t stop_idx, PyObject* base, const SerializedPyObject& blobs,
70
+ PyObject** out);
71
+
72
+ Status DeserializeDict(PyObject* context, const Array& array, int64_t start_idx,
73
+ int64_t stop_idx, PyObject* base, const SerializedPyObject& blobs,
74
+ PyObject** out) {
75
+ const auto& data = checked_cast<const StructArray&>(array);
76
+ OwnedRef keys, vals;
77
+ OwnedRef result(PyDict_New());
78
+ RETURN_IF_PYERROR();
79
+
80
+ DCHECK_EQ(2, data.num_fields());
81
+
82
+ RETURN_NOT_OK(DeserializeList(context, *data.field(0), start_idx, stop_idx, base, blobs,
83
+ keys.ref()));
84
+ RETURN_NOT_OK(DeserializeList(context, *data.field(1), start_idx, stop_idx, base, blobs,
85
+ vals.ref()));
86
+ for (int64_t i = start_idx; i < stop_idx; ++i) {
87
+ // PyDict_SetItem behaves differently from PyList_SetItem and PyTuple_SetItem.
88
+ // The latter two steal references whereas PyDict_SetItem does not. So we need
89
+ // to make sure the reference count is decremented by letting the OwnedRef
90
+ // go out of scope at the end.
91
+ int ret = PyDict_SetItem(result.obj(), PyList_GET_ITEM(keys.obj(), i - start_idx),
92
+ PyList_GET_ITEM(vals.obj(), i - start_idx));
93
+ if (ret != 0) {
94
+ return ConvertPyError();
95
+ }
96
+ }
97
+ static PyObject* py_type = PyUnicode_FromString("_pytype_");
98
+ if (PyDict_Contains(result.obj(), py_type)) {
99
+ RETURN_NOT_OK(CallDeserializeCallback(context, result.obj(), out));
100
+ } else {
101
+ *out = result.detach();
102
+ }
103
+ return Status::OK();
104
+ }
105
+
106
+ Status DeserializeArray(int32_t index, PyObject* base, const SerializedPyObject& blobs,
107
+ PyObject** out) {
108
+ RETURN_NOT_OK(py::TensorToNdarray(blobs.ndarrays[index], base, out));
109
+ // Mark the array as immutable
110
+ OwnedRef flags(PyObject_GetAttrString(*out, "flags"));
111
+ if (flags.obj() == NULL) {
112
+ return ConvertPyError();
113
+ }
114
+ if (PyObject_SetAttrString(flags.obj(), "writeable", Py_False) < 0) {
115
+ return ConvertPyError();
116
+ }
117
+ return Status::OK();
118
+ }
119
+
120
+ Status GetValue(PyObject* context, const Array& arr, int64_t index, int8_t type,
121
+ PyObject* base, const SerializedPyObject& blobs, PyObject** result) {
122
+ switch (type) {
123
+ case PythonType::NONE:
124
+ Py_INCREF(Py_None);
125
+ *result = Py_None;
126
+ return Status::OK();
127
+ case PythonType::BOOL:
128
+ *result = PyBool_FromLong(checked_cast<const BooleanArray&>(arr).Value(index));
129
+ return Status::OK();
130
+ case PythonType::PY2INT:
131
+ case PythonType::INT: {
132
+ *result = PyLong_FromSsize_t(checked_cast<const Int64Array&>(arr).Value(index));
133
+ return Status::OK();
134
+ }
135
+ case PythonType::BYTES: {
136
+ auto view = checked_cast<const BinaryArray&>(arr).GetView(index);
137
+ *result = PyBytes_FromStringAndSize(view.data(), view.length());
138
+ return CheckPyError();
139
+ }
140
+ case PythonType::STRING: {
141
+ auto view = checked_cast<const StringArray&>(arr).GetView(index);
142
+ *result = PyUnicode_FromStringAndSize(view.data(), view.length());
143
+ return CheckPyError();
144
+ }
145
+ case PythonType::HALF_FLOAT: {
146
+ *result = PyHalf_FromHalf(checked_cast<const HalfFloatArray&>(arr).Value(index));
147
+ RETURN_IF_PYERROR();
148
+ return Status::OK();
149
+ }
150
+ case PythonType::FLOAT:
151
+ *result = PyFloat_FromDouble(checked_cast<const FloatArray&>(arr).Value(index));
152
+ return Status::OK();
153
+ case PythonType::DOUBLE:
154
+ *result = PyFloat_FromDouble(checked_cast<const DoubleArray&>(arr).Value(index));
155
+ return Status::OK();
156
+ case PythonType::DATE64: {
157
+ RETURN_NOT_OK(internal::PyDateTime_from_int(
158
+ checked_cast<const Date64Array&>(arr).Value(index), TimeUnit::MICRO, result));
159
+ RETURN_IF_PYERROR();
160
+ return Status::OK();
161
+ }
162
+ case PythonType::LIST: {
163
+ const auto& l = checked_cast<const ListArray&>(arr);
164
+ return DeserializeList(context, *l.values(), l.value_offset(index),
165
+ l.value_offset(index + 1), base, blobs, result);
166
+ }
167
+ case PythonType::DICT: {
168
+ const auto& l = checked_cast<const ListArray&>(arr);
169
+ return DeserializeDict(context, *l.values(), l.value_offset(index),
170
+ l.value_offset(index + 1), base, blobs, result);
171
+ }
172
+ case PythonType::TUPLE: {
173
+ const auto& l = checked_cast<const ListArray&>(arr);
174
+ return DeserializeTuple(context, *l.values(), l.value_offset(index),
175
+ l.value_offset(index + 1), base, blobs, result);
176
+ }
177
+ case PythonType::SET: {
178
+ const auto& l = checked_cast<const ListArray&>(arr);
179
+ return DeserializeSet(context, *l.values(), l.value_offset(index),
180
+ l.value_offset(index + 1), base, blobs, result);
181
+ }
182
+ case PythonType::TENSOR: {
183
+ int32_t ref = checked_cast<const Int32Array&>(arr).Value(index);
184
+ *result = wrap_tensor(blobs.tensors[ref]);
185
+ return Status::OK();
186
+ }
187
+ case PythonType::SPARSECOOTENSOR: {
188
+ int32_t ref = checked_cast<const Int32Array&>(arr).Value(index);
189
+ const std::shared_ptr<SparseCOOTensor>& sparse_coo_tensor =
190
+ arrow::internal::checked_pointer_cast<SparseCOOTensor>(
191
+ blobs.sparse_tensors[ref]);
192
+ *result = wrap_sparse_coo_tensor(sparse_coo_tensor);
193
+ return Status::OK();
194
+ }
195
+ case PythonType::SPARSECSRMATRIX: {
196
+ int32_t ref = checked_cast<const Int32Array&>(arr).Value(index);
197
+ const std::shared_ptr<SparseCSRMatrix>& sparse_csr_matrix =
198
+ arrow::internal::checked_pointer_cast<SparseCSRMatrix>(
199
+ blobs.sparse_tensors[ref]);
200
+ *result = wrap_sparse_csr_matrix(sparse_csr_matrix);
201
+ return Status::OK();
202
+ }
203
+ case PythonType::SPARSECSCMATRIX: {
204
+ int32_t ref = checked_cast<const Int32Array&>(arr).Value(index);
205
+ const std::shared_ptr<SparseCSCMatrix>& sparse_csc_matrix =
206
+ arrow::internal::checked_pointer_cast<SparseCSCMatrix>(
207
+ blobs.sparse_tensors[ref]);
208
+ *result = wrap_sparse_csc_matrix(sparse_csc_matrix);
209
+ return Status::OK();
210
+ }
211
+ case PythonType::SPARSECSFTENSOR: {
212
+ int32_t ref = checked_cast<const Int32Array&>(arr).Value(index);
213
+ const std::shared_ptr<SparseCSFTensor>& sparse_csf_tensor =
214
+ arrow::internal::checked_pointer_cast<SparseCSFTensor>(
215
+ blobs.sparse_tensors[ref]);
216
+ *result = wrap_sparse_csf_tensor(sparse_csf_tensor);
217
+ return Status::OK();
218
+ }
219
+ case PythonType::NDARRAY: {
220
+ int32_t ref = checked_cast<const Int32Array&>(arr).Value(index);
221
+ return DeserializeArray(ref, base, blobs, result);
222
+ }
223
+ case PythonType::BUFFER: {
224
+ int32_t ref = checked_cast<const Int32Array&>(arr).Value(index);
225
+ *result = wrap_buffer(blobs.buffers[ref]);
226
+ return Status::OK();
227
+ }
228
+ default: {
229
+ ARROW_CHECK(false) << "union tag " << type << "' not recognized";
230
+ }
231
+ }
232
+ return Status::OK();
233
+ }
234
+
235
+ Status GetPythonTypes(const UnionArray& data, std::vector<int8_t>* result) {
236
+ ARROW_CHECK(result != nullptr);
237
+ auto type = data.type();
238
+ for (int i = 0; i < type->num_fields(); ++i) {
239
+ int8_t tag = 0;
240
+ const std::string& data = type->field(i)->name();
241
+ if (!ParseValue<Int8Type>(data.c_str(), data.size(), &tag)) {
242
+ return Status::SerializationError("Cannot convert string: \"",
243
+ type->field(i)->name(), "\" to int8_t");
244
+ }
245
+ result->push_back(tag);
246
+ }
247
+ return Status::OK();
248
+ }
249
+
250
+ template <typename CreateSequenceFn, typename SetItemFn>
251
+ Status DeserializeSequence(PyObject* context, const Array& array, int64_t start_idx,
252
+ int64_t stop_idx, PyObject* base,
253
+ const SerializedPyObject& blobs,
254
+ CreateSequenceFn&& create_sequence, SetItemFn&& set_item,
255
+ PyObject** out) {
256
+ const auto& data = checked_cast<const DenseUnionArray&>(array);
257
+ OwnedRef result(create_sequence(stop_idx - start_idx));
258
+ RETURN_IF_PYERROR();
259
+ const int8_t* type_codes = data.raw_type_codes();
260
+ const int32_t* value_offsets = data.raw_value_offsets();
261
+ std::vector<int8_t> python_types;
262
+ RETURN_NOT_OK(GetPythonTypes(data, &python_types));
263
+ for (int64_t i = start_idx; i < stop_idx; ++i) {
264
+ const int64_t offset = value_offsets[i];
265
+ const uint8_t type = type_codes[i];
266
+ PyObject* value;
267
+ RETURN_NOT_OK(GetValue(context, *data.field(type), offset, python_types[type], base,
268
+ blobs, &value));
269
+ RETURN_NOT_OK(set_item(result.obj(), i - start_idx, value));
270
+ }
271
+ *out = result.detach();
272
+ return Status::OK();
273
+ }
274
+
275
+ Status DeserializeList(PyObject* context, const Array& array, int64_t start_idx,
276
+ int64_t stop_idx, PyObject* base, const SerializedPyObject& blobs,
277
+ PyObject** out) {
278
+ return DeserializeSequence(
279
+ context, array, start_idx, stop_idx, base, blobs,
280
+ [](int64_t size) { return PyList_New(size); },
281
+ [](PyObject* seq, int64_t index, PyObject* item) {
282
+ PyList_SET_ITEM(seq, index, item);
283
+ return Status::OK();
284
+ },
285
+ out);
286
+ }
287
+
288
+ Status DeserializeTuple(PyObject* context, const Array& array, int64_t start_idx,
289
+ int64_t stop_idx, PyObject* base, const SerializedPyObject& blobs,
290
+ PyObject** out) {
291
+ return DeserializeSequence(
292
+ context, array, start_idx, stop_idx, base, blobs,
293
+ [](int64_t size) { return PyTuple_New(size); },
294
+ [](PyObject* seq, int64_t index, PyObject* item) {
295
+ PyTuple_SET_ITEM(seq, index, item);
296
+ return Status::OK();
297
+ },
298
+ out);
299
+ }
300
+
301
+ Status DeserializeSet(PyObject* context, const Array& array, int64_t start_idx,
302
+ int64_t stop_idx, PyObject* base, const SerializedPyObject& blobs,
303
+ PyObject** out) {
304
+ return DeserializeSequence(
305
+ context, array, start_idx, stop_idx, base, blobs,
306
+ [](int64_t size) { return PySet_New(nullptr); },
307
+ [](PyObject* seq, int64_t index, PyObject* item) {
308
+ int err = PySet_Add(seq, item);
309
+ Py_DECREF(item);
310
+ if (err < 0) {
311
+ RETURN_IF_PYERROR();
312
+ }
313
+ return Status::OK();
314
+ },
315
+ out);
316
+ }
317
+
318
+ Status ReadSerializedObject(io::RandomAccessFile* src, SerializedPyObject* out) {
319
+ int32_t num_tensors;
320
+ int32_t num_sparse_tensors;
321
+ int32_t num_ndarrays;
322
+ int32_t num_buffers;
323
+
324
+ // Read number of tensors
325
+ RETURN_NOT_OK(src->Read(sizeof(int32_t), reinterpret_cast<uint8_t*>(&num_tensors)));
326
+ RETURN_NOT_OK(
327
+ src->Read(sizeof(int32_t), reinterpret_cast<uint8_t*>(&num_sparse_tensors)));
328
+ RETURN_NOT_OK(src->Read(sizeof(int32_t), reinterpret_cast<uint8_t*>(&num_ndarrays)));
329
+ RETURN_NOT_OK(src->Read(sizeof(int32_t), reinterpret_cast<uint8_t*>(&num_buffers)));
330
+
331
+ // Align stream to 8-byte offset
332
+ RETURN_NOT_OK(ipc::AlignStream(src, ipc::kArrowIpcAlignment));
333
+ std::shared_ptr<RecordBatchReader> reader;
334
+ ARROW_ASSIGN_OR_RAISE(reader, ipc::RecordBatchStreamReader::Open(src));
335
+ RETURN_NOT_OK(reader->ReadNext(&out->batch));
336
+
337
+ /// Skip EOS marker
338
+ RETURN_NOT_OK(src->Advance(4));
339
+
340
+ /// Align stream so tensor bodies are 64-byte aligned
341
+ RETURN_NOT_OK(ipc::AlignStream(src, ipc::kTensorAlignment));
342
+
343
+ for (int i = 0; i < num_tensors; ++i) {
344
+ std::shared_ptr<Tensor> tensor;
345
+ ARROW_ASSIGN_OR_RAISE(tensor, ipc::ReadTensor(src));
346
+ RETURN_NOT_OK(ipc::AlignStream(src, ipc::kTensorAlignment));
347
+ out->tensors.push_back(tensor);
348
+ }
349
+
350
+ for (int i = 0; i < num_sparse_tensors; ++i) {
351
+ std::shared_ptr<SparseTensor> sparse_tensor;
352
+ ARROW_ASSIGN_OR_RAISE(sparse_tensor, ipc::ReadSparseTensor(src));
353
+ RETURN_NOT_OK(ipc::AlignStream(src, ipc::kTensorAlignment));
354
+ out->sparse_tensors.push_back(sparse_tensor);
355
+ }
356
+
357
+ for (int i = 0; i < num_ndarrays; ++i) {
358
+ std::shared_ptr<Tensor> ndarray;
359
+ ARROW_ASSIGN_OR_RAISE(ndarray, ipc::ReadTensor(src));
360
+ RETURN_NOT_OK(ipc::AlignStream(src, ipc::kTensorAlignment));
361
+ out->ndarrays.push_back(ndarray);
362
+ }
363
+
364
+ ARROW_ASSIGN_OR_RAISE(int64_t offset, src->Tell());
365
+ for (int i = 0; i < num_buffers; ++i) {
366
+ int64_t size;
367
+ RETURN_NOT_OK(src->ReadAt(offset, sizeof(int64_t), &size));
368
+ offset += sizeof(int64_t);
369
+ ARROW_ASSIGN_OR_RAISE(auto buffer, src->ReadAt(offset, size));
370
+ out->buffers.push_back(buffer);
371
+ offset += size;
372
+ }
373
+
374
+ return Status::OK();
375
+ }
376
+
377
+ Status DeserializeObject(PyObject* context, const SerializedPyObject& obj, PyObject* base,
378
+ PyObject** out) {
379
+ PyAcquireGIL lock;
380
+ return DeserializeList(context, *obj.batch->column(0), 0, obj.batch->num_rows(), base,
381
+ obj, out);
382
+ }
383
+
384
+ Status GetSerializedFromComponents(int num_tensors,
385
+ const SparseTensorCounts& num_sparse_tensors,
386
+ int num_ndarrays, int num_buffers, PyObject* data,
387
+ SerializedPyObject* out) {
388
+ PyAcquireGIL gil;
389
+ const Py_ssize_t data_length = PyList_Size(data);
390
+ RETURN_IF_PYERROR();
391
+
392
+ const Py_ssize_t expected_data_length = 1 + num_tensors * 2 +
393
+ num_sparse_tensors.num_total_buffers() +
394
+ num_ndarrays * 2 + num_buffers;
395
+ if (data_length != expected_data_length) {
396
+ return Status::Invalid("Invalid number of buffers in data");
397
+ }
398
+
399
+ auto GetBuffer = [&data](Py_ssize_t index, std::shared_ptr<Buffer>* out) {
400
+ ARROW_CHECK_LE(index, PyList_Size(data));
401
+ PyObject* py_buf = PyList_GET_ITEM(data, index);
402
+ return unwrap_buffer(py_buf).Value(out);
403
+ };
404
+
405
+ Py_ssize_t buffer_index = 0;
406
+
407
+ // Read the union batch describing object structure
408
+ {
409
+ std::shared_ptr<Buffer> data_buffer;
410
+ RETURN_NOT_OK(GetBuffer(buffer_index++, &data_buffer));
411
+ gil.release();
412
+ io::BufferReader buf_reader(data_buffer);
413
+ std::shared_ptr<RecordBatchReader> reader;
414
+ ARROW_ASSIGN_OR_RAISE(reader, ipc::RecordBatchStreamReader::Open(&buf_reader));
415
+ RETURN_NOT_OK(reader->ReadNext(&out->batch));
416
+ gil.acquire();
417
+ }
418
+
419
+ // Zero-copy reconstruct tensors
420
+ for (int i = 0; i < num_tensors; ++i) {
421
+ std::shared_ptr<Buffer> metadata;
422
+ std::shared_ptr<Buffer> body;
423
+ std::shared_ptr<Tensor> tensor;
424
+ RETURN_NOT_OK(GetBuffer(buffer_index++, &metadata));
425
+ RETURN_NOT_OK(GetBuffer(buffer_index++, &body));
426
+
427
+ ipc::Message message(metadata, body);
428
+
429
+ ARROW_ASSIGN_OR_RAISE(tensor, ipc::ReadTensor(message));
430
+ out->tensors.emplace_back(std::move(tensor));
431
+ }
432
+
433
+ // Zero-copy reconstruct sparse tensors
434
+ for (int i = 0, n = num_sparse_tensors.num_total_tensors(); i < n; ++i) {
435
+ ipc::IpcPayload payload;
436
+ RETURN_NOT_OK(GetBuffer(buffer_index++, &payload.metadata));
437
+
438
+ ARROW_ASSIGN_OR_RAISE(
439
+ size_t num_bodies,
440
+ ipc::internal::ReadSparseTensorBodyBufferCount(*payload.metadata));
441
+
442
+ payload.body_buffers.reserve(num_bodies);
443
+ for (size_t i = 0; i < num_bodies; ++i) {
444
+ std::shared_ptr<Buffer> body;
445
+ RETURN_NOT_OK(GetBuffer(buffer_index++, &body));
446
+ payload.body_buffers.emplace_back(body);
447
+ }
448
+
449
+ std::shared_ptr<SparseTensor> sparse_tensor;
450
+ ARROW_ASSIGN_OR_RAISE(sparse_tensor, ipc::internal::ReadSparseTensorPayload(payload));
451
+ out->sparse_tensors.emplace_back(std::move(sparse_tensor));
452
+ }
453
+
454
+ // Zero-copy reconstruct tensors for numpy ndarrays
455
+ for (int i = 0; i < num_ndarrays; ++i) {
456
+ std::shared_ptr<Buffer> metadata;
457
+ std::shared_ptr<Buffer> body;
458
+ std::shared_ptr<Tensor> tensor;
459
+ RETURN_NOT_OK(GetBuffer(buffer_index++, &metadata));
460
+ RETURN_NOT_OK(GetBuffer(buffer_index++, &body));
461
+
462
+ ipc::Message message(metadata, body);
463
+
464
+ ARROW_ASSIGN_OR_RAISE(tensor, ipc::ReadTensor(message));
465
+ out->ndarrays.emplace_back(std::move(tensor));
466
+ }
467
+
468
+ // Unwrap and append buffers
469
+ for (int i = 0; i < num_buffers; ++i) {
470
+ std::shared_ptr<Buffer> buffer;
471
+ RETURN_NOT_OK(GetBuffer(buffer_index++, &buffer));
472
+ out->buffers.emplace_back(std::move(buffer));
473
+ }
474
+
475
+ return Status::OK();
476
+ }
477
+
478
+ Status DeserializeNdarray(const SerializedPyObject& object,
479
+ std::shared_ptr<Tensor>* out) {
480
+ if (object.ndarrays.size() != 1) {
481
+ return Status::Invalid("Object is not an Ndarray");
482
+ }
483
+ *out = object.ndarrays[0];
484
+ return Status::OK();
485
+ }
486
+
487
+ Status NdarrayFromBuffer(std::shared_ptr<Buffer> src, std::shared_ptr<Tensor>* out) {
488
+ io::BufferReader reader(src);
489
+ SerializedPyObject object;
490
+ RETURN_NOT_OK(ReadSerializedObject(&reader, &object));
491
+ return DeserializeNdarray(object, out);
492
+ }
493
+
494
+ } // namespace py
495
+ } // namespace arrow
llmeval-env/lib/python3.10/site-packages/pyarrow/src/arrow/python/deserialize.h ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <cstdint>
21
+ #include <memory>
22
+ #include <vector>
23
+
24
+ #include "arrow/python/serialize.h"
25
+ #include "arrow/python/visibility.h"
26
+ #include "arrow/status.h"
27
+
28
+ namespace arrow {
29
+
30
+ class RecordBatch;
31
+ class Tensor;
32
+
33
+ namespace io {
34
+
35
+ class RandomAccessFile;
36
+
37
+ } // namespace io
38
+
39
+ namespace py {
40
+
41
+ struct ARROW_PYTHON_EXPORT SparseTensorCounts {
42
+ int coo;
43
+ int csr;
44
+ int csc;
45
+ int csf;
46
+ int ndim_csf;
47
+
48
+ int num_total_tensors() const { return coo + csr + csc + csf; }
49
+ int num_total_buffers() const {
50
+ return coo * 3 + csr * 4 + csc * 4 + 2 * ndim_csf + csf;
51
+ }
52
+ };
53
+
54
+ /// \brief Read serialized Python sequence from file interface using Arrow IPC
55
+ /// \param[in] src a RandomAccessFile
56
+ /// \param[out] out the reconstructed data
57
+ /// \return Status
58
+ ARROW_PYTHON_EXPORT
59
+ Status ReadSerializedObject(io::RandomAccessFile* src, SerializedPyObject* out);
60
+
61
+ /// \brief Reconstruct SerializedPyObject from representation produced by
62
+ /// SerializedPyObject::GetComponents.
63
+ ///
64
+ /// \param[in] num_tensors number of tensors in the object
65
+ /// \param[in] num_sparse_tensors number of sparse tensors in the object
66
+ /// \param[in] num_ndarrays number of numpy Ndarrays in the object
67
+ /// \param[in] num_buffers number of buffers in the object
68
+ /// \param[in] data a list containing pyarrow.Buffer instances. It must be 1 +
69
+ /// num_tensors * 2 + num_coo_tensors * 3 + num_csr_tensors * 4 + num_csc_tensors * 4 +
70
+ /// num_csf_tensors * (2 * ndim_csf + 3) + num_buffers in length
71
+ /// \param[out] out the reconstructed object
72
+ /// \return Status
73
+ ARROW_PYTHON_EXPORT
74
+ Status GetSerializedFromComponents(int num_tensors,
75
+ const SparseTensorCounts& num_sparse_tensors,
76
+ int num_ndarrays, int num_buffers, PyObject* data,
77
+ SerializedPyObject* out);
78
+
79
+ /// \brief Reconstruct Python object from Arrow-serialized representation
80
+ /// \param[in] context Serialization context which contains custom serialization
81
+ /// and deserialization callbacks. Can be any Python object with a
82
+ /// _serialize_callback method for serialization and a _deserialize_callback
83
+ /// method for deserialization. If context is None, no custom serialization
84
+ /// will be attempted.
85
+ /// \param[in] object Object to deserialize
86
+ /// \param[in] base a Python object holding the underlying data that any NumPy
87
+ /// arrays will reference, to avoid premature deallocation
88
+ /// \param[out] out The returned object
89
+ /// \return Status
90
+ /// This acquires the GIL
91
+ ARROW_PYTHON_EXPORT
92
+ Status DeserializeObject(PyObject* context, const SerializedPyObject& object,
93
+ PyObject* base, PyObject** out);
94
+
95
+ /// \brief Reconstruct Ndarray from Arrow-serialized representation
96
+ /// \param[in] object Object to deserialize
97
+ /// \param[out] out The deserialized tensor
98
+ /// \return Status
99
+ ARROW_PYTHON_EXPORT
100
+ Status DeserializeNdarray(const SerializedPyObject& object, std::shared_ptr<Tensor>* out);
101
+
102
+ ARROW_PYTHON_EXPORT
103
+ Status NdarrayFromBuffer(std::shared_ptr<Buffer> src, std::shared_ptr<Tensor>* out);
104
+
105
+ } // namespace py
106
+ } // namespace arrow
llmeval-env/lib/python3.10/site-packages/pyarrow/src/arrow/python/extension_type.cc ADDED
@@ -0,0 +1,217 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #include <memory>
19
+ #include <sstream>
20
+ #include <utility>
21
+
22
+ #include "arrow/python/extension_type.h"
23
+ #include "arrow/python/helpers.h"
24
+ #include "arrow/python/pyarrow.h"
25
+ #include "arrow/util/checked_cast.h"
26
+ #include "arrow/util/logging.h"
27
+
28
+ namespace arrow {
29
+
30
+ using internal::checked_cast;
31
+
32
+ namespace py {
33
+
34
+ namespace {
35
+
36
+ // Serialize a Python ExtensionType instance
37
+ Status SerializeExtInstance(PyObject* type_instance, std::string* out) {
38
+ OwnedRef res(
39
+ cpp_PyObject_CallMethod(type_instance, "__arrow_ext_serialize__", nullptr));
40
+ if (!res) {
41
+ return ConvertPyError();
42
+ }
43
+ if (!PyBytes_Check(res.obj())) {
44
+ return Status::TypeError(
45
+ "__arrow_ext_serialize__ should return bytes object, "
46
+ "got ",
47
+ internal::PyObject_StdStringRepr(res.obj()));
48
+ }
49
+ *out = internal::PyBytes_AsStdString(res.obj());
50
+ return Status::OK();
51
+ }
52
+
53
+ // Deserialize a Python ExtensionType instance
54
+ PyObject* DeserializeExtInstance(PyObject* type_class,
55
+ std::shared_ptr<DataType> storage_type,
56
+ const std::string& serialized_data) {
57
+ OwnedRef storage_ref(wrap_data_type(storage_type));
58
+ if (!storage_ref) {
59
+ return nullptr;
60
+ }
61
+ OwnedRef data_ref(PyBytes_FromStringAndSize(
62
+ serialized_data.data(), static_cast<Py_ssize_t>(serialized_data.size())));
63
+ if (!data_ref) {
64
+ return nullptr;
65
+ }
66
+
67
+ return cpp_PyObject_CallMethod(type_class, "__arrow_ext_deserialize__", "OO",
68
+ storage_ref.obj(), data_ref.obj());
69
+ }
70
+
71
+ } // namespace
72
+
73
+ static const char* kExtensionName = "arrow.py_extension_type";
74
+
75
+ std::string PyExtensionType::ToString(bool show_metadata) const {
76
+ PyAcquireGIL lock;
77
+
78
+ std::stringstream ss;
79
+ OwnedRef instance(GetInstance());
80
+ ss << "extension<" << this->extension_name() << "<" << Py_TYPE(instance.obj())->tp_name
81
+ << ">>";
82
+ return ss.str();
83
+ }
84
+
85
+ PyExtensionType::PyExtensionType(std::shared_ptr<DataType> storage_type, PyObject* typ,
86
+ PyObject* inst)
87
+ : ExtensionType(storage_type),
88
+ extension_name_(kExtensionName),
89
+ type_class_(typ),
90
+ type_instance_(inst) {}
91
+
92
+ PyExtensionType::PyExtensionType(std::shared_ptr<DataType> storage_type,
93
+ std::string extension_name, PyObject* typ,
94
+ PyObject* inst)
95
+ : ExtensionType(storage_type),
96
+ extension_name_(std::move(extension_name)),
97
+ type_class_(typ),
98
+ type_instance_(inst) {}
99
+
100
+ bool PyExtensionType::ExtensionEquals(const ExtensionType& other) const {
101
+ PyAcquireGIL lock;
102
+
103
+ if (other.extension_name() != extension_name()) {
104
+ return false;
105
+ }
106
+ const auto& other_ext = checked_cast<const PyExtensionType&>(other);
107
+ int res = -1;
108
+ if (!type_instance_) {
109
+ if (other_ext.type_instance_) {
110
+ return false;
111
+ }
112
+ // Compare Python types
113
+ res = PyObject_RichCompareBool(type_class_.obj(), other_ext.type_class_.obj(), Py_EQ);
114
+ } else {
115
+ if (!other_ext.type_instance_) {
116
+ return false;
117
+ }
118
+ // Compare Python instances
119
+ OwnedRef left(GetInstance());
120
+ OwnedRef right(other_ext.GetInstance());
121
+ if (!left || !right) {
122
+ goto error;
123
+ }
124
+ res = PyObject_RichCompareBool(left.obj(), right.obj(), Py_EQ);
125
+ }
126
+ if (res == -1) {
127
+ goto error;
128
+ }
129
+ return res == 1;
130
+
131
+ error:
132
+ // Cannot propagate error
133
+ PyErr_WriteUnraisable(nullptr);
134
+ return false;
135
+ }
136
+
137
+ std::shared_ptr<Array> PyExtensionType::MakeArray(std::shared_ptr<ArrayData> data) const {
138
+ DCHECK_EQ(data->type->id(), Type::EXTENSION);
139
+ return std::make_shared<ExtensionArray>(data);
140
+ }
141
+
142
+ std::string PyExtensionType::Serialize() const {
143
+ DCHECK(type_instance_);
144
+ return serialized_;
145
+ }
146
+
147
+ Result<std::shared_ptr<DataType>> PyExtensionType::Deserialize(
148
+ std::shared_ptr<DataType> storage_type, const std::string& serialized_data) const {
149
+ PyAcquireGIL lock;
150
+
151
+ if (import_pyarrow()) {
152
+ return ConvertPyError();
153
+ }
154
+ OwnedRef res(DeserializeExtInstance(type_class_.obj(), storage_type, serialized_data));
155
+ if (!res) {
156
+ return ConvertPyError();
157
+ }
158
+ return unwrap_data_type(res.obj());
159
+ }
160
+
161
+ PyObject* PyExtensionType::GetInstance() const {
162
+ if (!type_instance_) {
163
+ PyErr_SetString(PyExc_TypeError, "Not an instance");
164
+ return nullptr;
165
+ }
166
+ DCHECK(PyWeakref_CheckRef(type_instance_.obj()));
167
+ PyObject* inst = PyWeakref_GET_OBJECT(type_instance_.obj());
168
+ if (inst != Py_None) {
169
+ // Cached instance still alive
170
+ Py_INCREF(inst);
171
+ return inst;
172
+ } else {
173
+ // Must reconstruct from serialized form
174
+ // XXX cache again?
175
+ return DeserializeExtInstance(type_class_.obj(), storage_type_, serialized_);
176
+ }
177
+ }
178
+
179
+ Status PyExtensionType::SetInstance(PyObject* inst) const {
180
+ // Check we have the right type
181
+ PyObject* typ = reinterpret_cast<PyObject*>(Py_TYPE(inst));
182
+ if (typ != type_class_.obj()) {
183
+ return Status::TypeError("Unexpected Python ExtensionType class ",
184
+ internal::PyObject_StdStringRepr(typ), " expected ",
185
+ internal::PyObject_StdStringRepr(type_class_.obj()));
186
+ }
187
+
188
+ PyObject* wr = PyWeakref_NewRef(inst, nullptr);
189
+ if (wr == NULL) {
190
+ return ConvertPyError();
191
+ }
192
+ type_instance_.reset(wr);
193
+ return SerializeExtInstance(inst, &serialized_);
194
+ }
195
+
196
+ Status PyExtensionType::FromClass(const std::shared_ptr<DataType> storage_type,
197
+ const std::string extension_name, PyObject* typ,
198
+ std::shared_ptr<ExtensionType>* out) {
199
+ Py_INCREF(typ);
200
+ out->reset(new PyExtensionType(storage_type, std::move(extension_name), typ));
201
+ return Status::OK();
202
+ }
203
+
204
+ Status RegisterPyExtensionType(const std::shared_ptr<DataType>& type) {
205
+ DCHECK_EQ(type->id(), Type::EXTENSION);
206
+ auto ext_type = std::dynamic_pointer_cast<ExtensionType>(type);
207
+ return RegisterExtensionType(ext_type);
208
+ }
209
+
210
+ Status UnregisterPyExtensionType(const std::string& type_name) {
211
+ return UnregisterExtensionType(type_name);
212
+ }
213
+
214
+ std::string PyExtensionName() { return kExtensionName; }
215
+
216
+ } // namespace py
217
+ } // namespace arrow
llmeval-env/lib/python3.10/site-packages/pyarrow/src/arrow/python/extension_type.h ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <memory>
21
+ #include <string>
22
+
23
+ #include "arrow/extension_type.h"
24
+ #include "arrow/python/common.h"
25
+ #include "arrow/python/visibility.h"
26
+ #include "arrow/util/macros.h"
27
+
28
+ namespace arrow {
29
+ namespace py {
30
+
31
+ class ARROW_PYTHON_EXPORT PyExtensionType : public ExtensionType {
32
+ public:
33
+ // Implement extensionType API
34
+ std::string extension_name() const override { return extension_name_; }
35
+
36
+ std::string ToString(bool show_metadata = false) const override;
37
+
38
+ bool ExtensionEquals(const ExtensionType& other) const override;
39
+
40
+ std::shared_ptr<Array> MakeArray(std::shared_ptr<ArrayData> data) const override;
41
+
42
+ Result<std::shared_ptr<DataType>> Deserialize(
43
+ std::shared_ptr<DataType> storage_type,
44
+ const std::string& serialized) const override;
45
+
46
+ std::string Serialize() const override;
47
+
48
+ // For use from Cython
49
+ // Assumes that `typ` is borrowed
50
+ static Status FromClass(const std::shared_ptr<DataType> storage_type,
51
+ const std::string extension_name, PyObject* typ,
52
+ std::shared_ptr<ExtensionType>* out);
53
+
54
+ // Return new ref
55
+ PyObject* GetInstance() const;
56
+ Status SetInstance(PyObject*) const;
57
+
58
+ protected:
59
+ PyExtensionType(std::shared_ptr<DataType> storage_type, PyObject* typ,
60
+ PyObject* inst = NULLPTR);
61
+ PyExtensionType(std::shared_ptr<DataType> storage_type, std::string extension_name,
62
+ PyObject* typ, PyObject* inst = NULLPTR);
63
+
64
+ std::string extension_name_;
65
+
66
+ // These fields are mutable because of two-step initialization.
67
+ mutable OwnedRefNoGIL type_class_;
68
+ // A weakref or null. Storing a strong reference to the Python extension type
69
+ // instance would create an unreclaimable reference cycle between Python and C++
70
+ // (the Python instance has to keep a strong reference to the C++ ExtensionType
71
+ // in other direction). Instead, we store a weakref to the instance.
72
+ // If the weakref is dead, we reconstruct the instance from its serialized form.
73
+ mutable OwnedRefNoGIL type_instance_;
74
+ // Empty if type_instance_ is null
75
+ mutable std::string serialized_;
76
+ };
77
+
78
+ ARROW_PYTHON_EXPORT std::string PyExtensionName();
79
+
80
+ ARROW_PYTHON_EXPORT Status RegisterPyExtensionType(const std::shared_ptr<DataType>&);
81
+
82
+ ARROW_PYTHON_EXPORT Status UnregisterPyExtensionType(const std::string& type_name);
83
+
84
+ } // namespace py
85
+ } // namespace arrow
llmeval-env/lib/python3.10/site-packages/pyarrow/src/arrow/python/filesystem.cc ADDED
@@ -0,0 +1,206 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #include "arrow/python/filesystem.h"
19
+ #include "arrow/util/logging.h"
20
+
21
+ namespace arrow {
22
+
23
+ using fs::FileInfo;
24
+ using fs::FileSelector;
25
+
26
+ namespace py {
27
+ namespace fs {
28
+
29
+ PyFileSystem::PyFileSystem(PyObject* handler, PyFileSystemVtable vtable)
30
+ : handler_(handler), vtable_(std::move(vtable)) {
31
+ Py_INCREF(handler);
32
+ }
33
+
34
+ PyFileSystem::~PyFileSystem() {}
35
+
36
+ std::shared_ptr<PyFileSystem> PyFileSystem::Make(PyObject* handler,
37
+ PyFileSystemVtable vtable) {
38
+ return std::make_shared<PyFileSystem>(handler, std::move(vtable));
39
+ }
40
+
41
+ std::string PyFileSystem::type_name() const {
42
+ std::string result;
43
+ auto st = SafeCallIntoPython([&]() -> Status {
44
+ vtable_.get_type_name(handler_.obj(), &result);
45
+ if (PyErr_Occurred()) {
46
+ PyErr_WriteUnraisable(handler_.obj());
47
+ }
48
+ return Status::OK();
49
+ });
50
+ ARROW_UNUSED(st);
51
+ return result;
52
+ }
53
+
54
+ bool PyFileSystem::Equals(const FileSystem& other) const {
55
+ bool result;
56
+ auto st = SafeCallIntoPython([&]() -> Status {
57
+ result = vtable_.equals(handler_.obj(), other);
58
+ if (PyErr_Occurred()) {
59
+ PyErr_WriteUnraisable(handler_.obj());
60
+ }
61
+ return Status::OK();
62
+ });
63
+ ARROW_UNUSED(st);
64
+ return result;
65
+ }
66
+
67
+ Result<FileInfo> PyFileSystem::GetFileInfo(const std::string& path) {
68
+ FileInfo info;
69
+
70
+ auto st = SafeCallIntoPython([&]() -> Status {
71
+ vtable_.get_file_info(handler_.obj(), path, &info);
72
+ return CheckPyError();
73
+ });
74
+ RETURN_NOT_OK(st);
75
+ return info;
76
+ }
77
+
78
+ Result<std::vector<FileInfo>> PyFileSystem::GetFileInfo(
79
+ const std::vector<std::string>& paths) {
80
+ std::vector<FileInfo> infos;
81
+
82
+ auto st = SafeCallIntoPython([&]() -> Status {
83
+ vtable_.get_file_info_vector(handler_.obj(), paths, &infos);
84
+ return CheckPyError();
85
+ });
86
+ RETURN_NOT_OK(st);
87
+ return infos;
88
+ }
89
+
90
+ Result<std::vector<FileInfo>> PyFileSystem::GetFileInfo(const FileSelector& select) {
91
+ std::vector<FileInfo> infos;
92
+
93
+ auto st = SafeCallIntoPython([&]() -> Status {
94
+ vtable_.get_file_info_selector(handler_.obj(), select, &infos);
95
+ return CheckPyError();
96
+ });
97
+ RETURN_NOT_OK(st);
98
+ return infos;
99
+ }
100
+
101
+ Status PyFileSystem::CreateDir(const std::string& path, bool recursive) {
102
+ return SafeCallIntoPython([&]() -> Status {
103
+ vtable_.create_dir(handler_.obj(), path, recursive);
104
+ return CheckPyError();
105
+ });
106
+ }
107
+
108
+ Status PyFileSystem::DeleteDir(const std::string& path) {
109
+ return SafeCallIntoPython([&]() -> Status {
110
+ vtable_.delete_dir(handler_.obj(), path);
111
+ return CheckPyError();
112
+ });
113
+ }
114
+
115
+ Status PyFileSystem::DeleteDirContents(const std::string& path, bool missing_dir_ok) {
116
+ return SafeCallIntoPython([&]() -> Status {
117
+ vtable_.delete_dir_contents(handler_.obj(), path, missing_dir_ok);
118
+ return CheckPyError();
119
+ });
120
+ }
121
+
122
+ Status PyFileSystem::DeleteRootDirContents() {
123
+ return SafeCallIntoPython([&]() -> Status {
124
+ vtable_.delete_root_dir_contents(handler_.obj());
125
+ return CheckPyError();
126
+ });
127
+ }
128
+
129
+ Status PyFileSystem::DeleteFile(const std::string& path) {
130
+ return SafeCallIntoPython([&]() -> Status {
131
+ vtable_.delete_file(handler_.obj(), path);
132
+ return CheckPyError();
133
+ });
134
+ }
135
+
136
+ Status PyFileSystem::Move(const std::string& src, const std::string& dest) {
137
+ return SafeCallIntoPython([&]() -> Status {
138
+ vtable_.move(handler_.obj(), src, dest);
139
+ return CheckPyError();
140
+ });
141
+ }
142
+
143
+ Status PyFileSystem::CopyFile(const std::string& src, const std::string& dest) {
144
+ return SafeCallIntoPython([&]() -> Status {
145
+ vtable_.copy_file(handler_.obj(), src, dest);
146
+ return CheckPyError();
147
+ });
148
+ }
149
+
150
+ Result<std::shared_ptr<io::InputStream>> PyFileSystem::OpenInputStream(
151
+ const std::string& path) {
152
+ std::shared_ptr<io::InputStream> stream;
153
+ auto st = SafeCallIntoPython([&]() -> Status {
154
+ vtable_.open_input_stream(handler_.obj(), path, &stream);
155
+ return CheckPyError();
156
+ });
157
+ RETURN_NOT_OK(st);
158
+ return stream;
159
+ }
160
+
161
+ Result<std::shared_ptr<io::RandomAccessFile>> PyFileSystem::OpenInputFile(
162
+ const std::string& path) {
163
+ std::shared_ptr<io::RandomAccessFile> stream;
164
+ auto st = SafeCallIntoPython([&]() -> Status {
165
+ vtable_.open_input_file(handler_.obj(), path, &stream);
166
+ return CheckPyError();
167
+ });
168
+ RETURN_NOT_OK(st);
169
+ return stream;
170
+ }
171
+
172
+ Result<std::shared_ptr<io::OutputStream>> PyFileSystem::OpenOutputStream(
173
+ const std::string& path, const std::shared_ptr<const KeyValueMetadata>& metadata) {
174
+ std::shared_ptr<io::OutputStream> stream;
175
+ auto st = SafeCallIntoPython([&]() -> Status {
176
+ vtable_.open_output_stream(handler_.obj(), path, metadata, &stream);
177
+ return CheckPyError();
178
+ });
179
+ RETURN_NOT_OK(st);
180
+ return stream;
181
+ }
182
+
183
+ Result<std::shared_ptr<io::OutputStream>> PyFileSystem::OpenAppendStream(
184
+ const std::string& path, const std::shared_ptr<const KeyValueMetadata>& metadata) {
185
+ std::shared_ptr<io::OutputStream> stream;
186
+ auto st = SafeCallIntoPython([&]() -> Status {
187
+ vtable_.open_append_stream(handler_.obj(), path, metadata, &stream);
188
+ return CheckPyError();
189
+ });
190
+ RETURN_NOT_OK(st);
191
+ return stream;
192
+ }
193
+
194
+ Result<std::string> PyFileSystem::NormalizePath(std::string path) {
195
+ std::string normalized;
196
+ auto st = SafeCallIntoPython([&]() -> Status {
197
+ vtable_.normalize_path(handler_.obj(), path, &normalized);
198
+ return CheckPyError();
199
+ });
200
+ RETURN_NOT_OK(st);
201
+ return normalized;
202
+ }
203
+
204
+ } // namespace fs
205
+ } // namespace py
206
+ } // namespace arrow
llmeval-env/lib/python3.10/site-packages/pyarrow/src/arrow/python/filesystem.h ADDED
@@ -0,0 +1,130 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <memory>
21
+ #include <string>
22
+ #include <vector>
23
+
24
+ #include "arrow/filesystem/filesystem.h"
25
+ #include "arrow/python/common.h"
26
+ #include "arrow/python/visibility.h"
27
+ #include "arrow/util/macros.h"
28
+
29
+ namespace arrow::py::fs {
30
+
31
+ class ARROW_PYTHON_EXPORT PyFileSystemVtable {
32
+ public:
33
+ std::function<void(PyObject*, std::string* out)> get_type_name;
34
+ std::function<bool(PyObject*, const arrow::fs::FileSystem& other)> equals;
35
+
36
+ std::function<void(PyObject*, const std::string& path, arrow::fs::FileInfo* out)>
37
+ get_file_info;
38
+ std::function<void(PyObject*, const std::vector<std::string>& paths,
39
+ std::vector<arrow::fs::FileInfo>* out)>
40
+ get_file_info_vector;
41
+ std::function<void(PyObject*, const arrow::fs::FileSelector&,
42
+ std::vector<arrow::fs::FileInfo>* out)>
43
+ get_file_info_selector;
44
+
45
+ std::function<void(PyObject*, const std::string& path, bool)> create_dir;
46
+ std::function<void(PyObject*, const std::string& path)> delete_dir;
47
+ std::function<void(PyObject*, const std::string& path, bool)> delete_dir_contents;
48
+ std::function<void(PyObject*)> delete_root_dir_contents;
49
+ std::function<void(PyObject*, const std::string& path)> delete_file;
50
+ std::function<void(PyObject*, const std::string& src, const std::string& dest)> move;
51
+ std::function<void(PyObject*, const std::string& src, const std::string& dest)>
52
+ copy_file;
53
+
54
+ std::function<void(PyObject*, const std::string& path,
55
+ std::shared_ptr<io::InputStream>* out)>
56
+ open_input_stream;
57
+ std::function<void(PyObject*, const std::string& path,
58
+ std::shared_ptr<io::RandomAccessFile>* out)>
59
+ open_input_file;
60
+ std::function<void(PyObject*, const std::string& path,
61
+ const std::shared_ptr<const KeyValueMetadata>&,
62
+ std::shared_ptr<io::OutputStream>* out)>
63
+ open_output_stream;
64
+ std::function<void(PyObject*, const std::string& path,
65
+ const std::shared_ptr<const KeyValueMetadata>&,
66
+ std::shared_ptr<io::OutputStream>* out)>
67
+ open_append_stream;
68
+
69
+ std::function<void(PyObject*, const std::string& path, std::string* out)>
70
+ normalize_path;
71
+ };
72
+
73
+ class ARROW_PYTHON_EXPORT PyFileSystem : public arrow::fs::FileSystem {
74
+ public:
75
+ PyFileSystem(PyObject* handler, PyFileSystemVtable vtable);
76
+ ~PyFileSystem() override;
77
+
78
+ static std::shared_ptr<PyFileSystem> Make(PyObject* handler, PyFileSystemVtable vtable);
79
+
80
+ std::string type_name() const override;
81
+
82
+ bool Equals(const FileSystem& other) const override;
83
+
84
+ /// \cond FALSE
85
+ using FileSystem::CreateDir;
86
+ using FileSystem::DeleteDirContents;
87
+ using FileSystem::GetFileInfo;
88
+ using FileSystem::OpenAppendStream;
89
+ using FileSystem::OpenOutputStream;
90
+ /// \endcond
91
+
92
+ Result<arrow::fs::FileInfo> GetFileInfo(const std::string& path) override;
93
+ Result<std::vector<arrow::fs::FileInfo>> GetFileInfo(
94
+ const std::vector<std::string>& paths) override;
95
+ Result<std::vector<arrow::fs::FileInfo>> GetFileInfo(
96
+ const arrow::fs::FileSelector& select) override;
97
+
98
+ Status CreateDir(const std::string& path, bool recursive) override;
99
+
100
+ Status DeleteDir(const std::string& path) override;
101
+ Status DeleteDirContents(const std::string& path, bool missing_dir_ok) override;
102
+ Status DeleteRootDirContents() override;
103
+
104
+ Status DeleteFile(const std::string& path) override;
105
+
106
+ Status Move(const std::string& src, const std::string& dest) override;
107
+
108
+ Status CopyFile(const std::string& src, const std::string& dest) override;
109
+
110
+ Result<std::shared_ptr<io::InputStream>> OpenInputStream(
111
+ const std::string& path) override;
112
+ Result<std::shared_ptr<io::RandomAccessFile>> OpenInputFile(
113
+ const std::string& path) override;
114
+ Result<std::shared_ptr<io::OutputStream>> OpenOutputStream(
115
+ const std::string& path,
116
+ const std::shared_ptr<const KeyValueMetadata>& metadata) override;
117
+ Result<std::shared_ptr<io::OutputStream>> OpenAppendStream(
118
+ const std::string& path,
119
+ const std::shared_ptr<const KeyValueMetadata>& metadata) override;
120
+
121
+ Result<std::string> NormalizePath(std::string path) override;
122
+
123
+ PyObject* handler() const { return handler_.obj(); }
124
+
125
+ private:
126
+ OwnedRefNoGIL handler_;
127
+ PyFileSystemVtable vtable_;
128
+ };
129
+
130
+ } // namespace arrow::py::fs
llmeval-env/lib/python3.10/site-packages/pyarrow/src/arrow/python/flight.cc ADDED
@@ -0,0 +1,388 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #include <signal.h>
19
+ #include <utility>
20
+
21
+ #include "arrow/python/flight.h"
22
+ #include "arrow/util/io_util.h"
23
+ #include "arrow/util/logging.h"
24
+
25
+ using arrow::flight::FlightPayload;
26
+
27
+ namespace arrow {
28
+ namespace py {
29
+ namespace flight {
30
+
31
+ const char* kPyServerMiddlewareName = "arrow.py_server_middleware";
32
+
33
+ PyServerAuthHandler::PyServerAuthHandler(PyObject* handler,
34
+ const PyServerAuthHandlerVtable& vtable)
35
+ : vtable_(vtable) {
36
+ Py_INCREF(handler);
37
+ handler_.reset(handler);
38
+ }
39
+
40
+ Status PyServerAuthHandler::Authenticate(arrow::flight::ServerAuthSender* outgoing,
41
+ arrow::flight::ServerAuthReader* incoming) {
42
+ return SafeCallIntoPython([=] {
43
+ const Status status = vtable_.authenticate(handler_.obj(), outgoing, incoming);
44
+ RETURN_NOT_OK(CheckPyError());
45
+ return status;
46
+ });
47
+ }
48
+
49
+ Status PyServerAuthHandler::IsValid(const std::string& token,
50
+ std::string* peer_identity) {
51
+ return SafeCallIntoPython([=] {
52
+ const Status status = vtable_.is_valid(handler_.obj(), token, peer_identity);
53
+ RETURN_NOT_OK(CheckPyError());
54
+ return status;
55
+ });
56
+ }
57
+
58
+ PyClientAuthHandler::PyClientAuthHandler(PyObject* handler,
59
+ const PyClientAuthHandlerVtable& vtable)
60
+ : vtable_(vtable) {
61
+ Py_INCREF(handler);
62
+ handler_.reset(handler);
63
+ }
64
+
65
+ Status PyClientAuthHandler::Authenticate(arrow::flight::ClientAuthSender* outgoing,
66
+ arrow::flight::ClientAuthReader* incoming) {
67
+ return SafeCallIntoPython([=] {
68
+ const Status status = vtable_.authenticate(handler_.obj(), outgoing, incoming);
69
+ RETURN_NOT_OK(CheckPyError());
70
+ return status;
71
+ });
72
+ }
73
+
74
+ Status PyClientAuthHandler::GetToken(std::string* token) {
75
+ return SafeCallIntoPython([=] {
76
+ const Status status = vtable_.get_token(handler_.obj(), token);
77
+ RETURN_NOT_OK(CheckPyError());
78
+ return status;
79
+ });
80
+ }
81
+
82
+ PyFlightServer::PyFlightServer(PyObject* server, const PyFlightServerVtable& vtable)
83
+ : vtable_(vtable) {
84
+ Py_INCREF(server);
85
+ server_.reset(server);
86
+ }
87
+
88
+ Status PyFlightServer::ListFlights(
89
+ const arrow::flight::ServerCallContext& context,
90
+ const arrow::flight::Criteria* criteria,
91
+ std::unique_ptr<arrow::flight::FlightListing>* listings) {
92
+ return SafeCallIntoPython([&] {
93
+ const Status status =
94
+ vtable_.list_flights(server_.obj(), context, criteria, listings);
95
+ RETURN_NOT_OK(CheckPyError());
96
+ return status;
97
+ });
98
+ }
99
+
100
+ Status PyFlightServer::GetFlightInfo(const arrow::flight::ServerCallContext& context,
101
+ const arrow::flight::FlightDescriptor& request,
102
+ std::unique_ptr<arrow::flight::FlightInfo>* info) {
103
+ return SafeCallIntoPython([&] {
104
+ const Status status = vtable_.get_flight_info(server_.obj(), context, request, info);
105
+ RETURN_NOT_OK(CheckPyError());
106
+ return status;
107
+ });
108
+ }
109
+
110
+ Status PyFlightServer::GetSchema(const arrow::flight::ServerCallContext& context,
111
+ const arrow::flight::FlightDescriptor& request,
112
+ std::unique_ptr<arrow::flight::SchemaResult>* result) {
113
+ return SafeCallIntoPython([&] {
114
+ const Status status = vtable_.get_schema(server_.obj(), context, request, result);
115
+ RETURN_NOT_OK(CheckPyError());
116
+ return status;
117
+ });
118
+ }
119
+
120
+ Status PyFlightServer::DoGet(const arrow::flight::ServerCallContext& context,
121
+ const arrow::flight::Ticket& request,
122
+ std::unique_ptr<arrow::flight::FlightDataStream>* stream) {
123
+ return SafeCallIntoPython([&] {
124
+ const Status status = vtable_.do_get(server_.obj(), context, request, stream);
125
+ RETURN_NOT_OK(CheckPyError());
126
+ return status;
127
+ });
128
+ }
129
+
130
+ Status PyFlightServer::DoPut(
131
+ const arrow::flight::ServerCallContext& context,
132
+ std::unique_ptr<arrow::flight::FlightMessageReader> reader,
133
+ std::unique_ptr<arrow::flight::FlightMetadataWriter> writer) {
134
+ return SafeCallIntoPython([&] {
135
+ const Status status =
136
+ vtable_.do_put(server_.obj(), context, std::move(reader), std::move(writer));
137
+ RETURN_NOT_OK(CheckPyError());
138
+ return status;
139
+ });
140
+ }
141
+
142
+ Status PyFlightServer::DoExchange(
143
+ const arrow::flight::ServerCallContext& context,
144
+ std::unique_ptr<arrow::flight::FlightMessageReader> reader,
145
+ std::unique_ptr<arrow::flight::FlightMessageWriter> writer) {
146
+ return SafeCallIntoPython([&] {
147
+ const Status status =
148
+ vtable_.do_exchange(server_.obj(), context, std::move(reader), std::move(writer));
149
+ RETURN_NOT_OK(CheckPyError());
150
+ return status;
151
+ });
152
+ }
153
+
154
+ Status PyFlightServer::DoAction(const arrow::flight::ServerCallContext& context,
155
+ const arrow::flight::Action& action,
156
+ std::unique_ptr<arrow::flight::ResultStream>* result) {
157
+ return SafeCallIntoPython([&] {
158
+ const Status status = vtable_.do_action(server_.obj(), context, action, result);
159
+ RETURN_NOT_OK(CheckPyError());
160
+ return status;
161
+ });
162
+ }
163
+
164
+ Status PyFlightServer::ListActions(const arrow::flight::ServerCallContext& context,
165
+ std::vector<arrow::flight::ActionType>* actions) {
166
+ return SafeCallIntoPython([&] {
167
+ const Status status = vtable_.list_actions(server_.obj(), context, actions);
168
+ RETURN_NOT_OK(CheckPyError());
169
+ return status;
170
+ });
171
+ }
172
+
173
+ Status PyFlightServer::ServeWithSignals() {
174
+ // Respect the current Python settings, i.e. only interrupt the server if there is
175
+ // an active signal handler for SIGINT and SIGTERM.
176
+ std::vector<int> signals;
177
+ for (const int signum : {SIGINT, SIGTERM}) {
178
+ ARROW_ASSIGN_OR_RAISE(auto handler, ::arrow::internal::GetSignalHandler(signum));
179
+ auto cb = handler.callback();
180
+ if (cb != SIG_DFL && cb != SIG_IGN) {
181
+ signals.push_back(signum);
182
+ }
183
+ }
184
+ RETURN_NOT_OK(SetShutdownOnSignals(signals));
185
+
186
+ // Serve until we got told to shutdown or a signal interrupted us
187
+ RETURN_NOT_OK(Serve());
188
+ int signum = GotSignal();
189
+ if (signum != 0) {
190
+ // Issue the signal again with Python's signal handlers restored
191
+ PyAcquireGIL lock;
192
+ raise(signum);
193
+ // XXX Ideally we would loop and serve again if no exception was raised.
194
+ // Unfortunately, gRPC will return immediately if Serve() is called again.
195
+ ARROW_UNUSED(PyErr_CheckSignals());
196
+ }
197
+
198
+ return Status::OK();
199
+ }
200
+
201
+ PyFlightResultStream::PyFlightResultStream(PyObject* generator,
202
+ PyFlightResultStreamCallback callback)
203
+ : callback_(callback) {
204
+ Py_INCREF(generator);
205
+ generator_.reset(generator);
206
+ }
207
+
208
+ arrow::Result<std::unique_ptr<arrow::flight::Result>> PyFlightResultStream::Next() {
209
+ return SafeCallIntoPython(
210
+ [=]() -> arrow::Result<std::unique_ptr<arrow::flight::Result>> {
211
+ std::unique_ptr<arrow::flight::Result> result;
212
+ const Status status = callback_(generator_.obj(), &result);
213
+ RETURN_NOT_OK(CheckPyError());
214
+ RETURN_NOT_OK(status);
215
+ return result;
216
+ });
217
+ }
218
+
219
+ PyFlightDataStream::PyFlightDataStream(
220
+ PyObject* data_source, std::unique_ptr<arrow::flight::FlightDataStream> stream)
221
+ : stream_(std::move(stream)) {
222
+ Py_INCREF(data_source);
223
+ data_source_.reset(data_source);
224
+ }
225
+
226
+ std::shared_ptr<Schema> PyFlightDataStream::schema() { return stream_->schema(); }
227
+
228
+ arrow::Result<FlightPayload> PyFlightDataStream::GetSchemaPayload() {
229
+ return stream_->GetSchemaPayload();
230
+ }
231
+
232
+ arrow::Result<FlightPayload> PyFlightDataStream::Next() { return stream_->Next(); }
233
+
234
+ PyGeneratorFlightDataStream::PyGeneratorFlightDataStream(
235
+ PyObject* generator, std::shared_ptr<arrow::Schema> schema,
236
+ PyGeneratorFlightDataStreamCallback callback, const ipc::IpcWriteOptions& options)
237
+ : schema_(schema), mapper_(*schema_), options_(options), callback_(callback) {
238
+ Py_INCREF(generator);
239
+ generator_.reset(generator);
240
+ }
241
+
242
+ std::shared_ptr<Schema> PyGeneratorFlightDataStream::schema() { return schema_; }
243
+
244
+ arrow::Result<FlightPayload> PyGeneratorFlightDataStream::GetSchemaPayload() {
245
+ FlightPayload payload;
246
+ RETURN_NOT_OK(ipc::GetSchemaPayload(*schema_, options_, mapper_, &payload.ipc_message));
247
+ return payload;
248
+ }
249
+
250
+ arrow::Result<FlightPayload> PyGeneratorFlightDataStream::Next() {
251
+ return SafeCallIntoPython([=]() -> arrow::Result<FlightPayload> {
252
+ FlightPayload payload;
253
+ const Status status = callback_(generator_.obj(), &payload);
254
+ RETURN_NOT_OK(CheckPyError());
255
+ RETURN_NOT_OK(status);
256
+ return payload;
257
+ });
258
+ }
259
+
260
+ // Flight Server Middleware
261
+
262
+ PyServerMiddlewareFactory::PyServerMiddlewareFactory(PyObject* factory,
263
+ StartCallCallback start_call)
264
+ : start_call_(start_call) {
265
+ Py_INCREF(factory);
266
+ factory_.reset(factory);
267
+ }
268
+
269
+ Status PyServerMiddlewareFactory::StartCall(
270
+ const arrow::flight::CallInfo& info,
271
+ const arrow::flight::CallHeaders& incoming_headers,
272
+ std::shared_ptr<arrow::flight::ServerMiddleware>* middleware) {
273
+ return SafeCallIntoPython([&] {
274
+ const Status status = start_call_(factory_.obj(), info, incoming_headers, middleware);
275
+ RETURN_NOT_OK(CheckPyError());
276
+ return status;
277
+ });
278
+ }
279
+
280
+ PyServerMiddleware::PyServerMiddleware(PyObject* middleware, Vtable vtable)
281
+ : vtable_(vtable) {
282
+ Py_INCREF(middleware);
283
+ middleware_.reset(middleware);
284
+ }
285
+
286
+ void PyServerMiddleware::SendingHeaders(arrow::flight::AddCallHeaders* outgoing_headers) {
287
+ const Status& status = SafeCallIntoPython([&] {
288
+ const Status status = vtable_.sending_headers(middleware_.obj(), outgoing_headers);
289
+ RETURN_NOT_OK(CheckPyError());
290
+ return status;
291
+ });
292
+
293
+ ARROW_WARN_NOT_OK(status, "Python server middleware failed in SendingHeaders");
294
+ }
295
+
296
+ void PyServerMiddleware::CallCompleted(const Status& call_status) {
297
+ const Status& status = SafeCallIntoPython([&] {
298
+ const Status status = vtable_.call_completed(middleware_.obj(), call_status);
299
+ RETURN_NOT_OK(CheckPyError());
300
+ return status;
301
+ });
302
+
303
+ ARROW_WARN_NOT_OK(status, "Python server middleware failed in CallCompleted");
304
+ }
305
+
306
+ std::string PyServerMiddleware::name() const { return kPyServerMiddlewareName; }
307
+
308
+ PyObject* PyServerMiddleware::py_object() const { return middleware_.obj(); }
309
+
310
+ // Flight Client Middleware
311
+
312
+ PyClientMiddlewareFactory::PyClientMiddlewareFactory(PyObject* factory,
313
+ StartCallCallback start_call)
314
+ : start_call_(start_call) {
315
+ Py_INCREF(factory);
316
+ factory_.reset(factory);
317
+ }
318
+
319
+ void PyClientMiddlewareFactory::StartCall(
320
+ const arrow::flight::CallInfo& info,
321
+ std::unique_ptr<arrow::flight::ClientMiddleware>* middleware) {
322
+ const Status& status = SafeCallIntoPython([&] {
323
+ const Status status = start_call_(factory_.obj(), info, middleware);
324
+ RETURN_NOT_OK(CheckPyError());
325
+ return status;
326
+ });
327
+
328
+ ARROW_WARN_NOT_OK(status, "Python client middleware failed in StartCall");
329
+ }
330
+
331
+ PyClientMiddleware::PyClientMiddleware(PyObject* middleware, Vtable vtable)
332
+ : vtable_(vtable) {
333
+ Py_INCREF(middleware);
334
+ middleware_.reset(middleware);
335
+ }
336
+
337
+ void PyClientMiddleware::SendingHeaders(arrow::flight::AddCallHeaders* outgoing_headers) {
338
+ const Status& status = SafeCallIntoPython([&] {
339
+ const Status status = vtable_.sending_headers(middleware_.obj(), outgoing_headers);
340
+ RETURN_NOT_OK(CheckPyError());
341
+ return status;
342
+ });
343
+
344
+ ARROW_WARN_NOT_OK(status, "Python client middleware failed in StartCall");
345
+ }
346
+
347
+ void PyClientMiddleware::ReceivedHeaders(
348
+ const arrow::flight::CallHeaders& incoming_headers) {
349
+ const Status& status = SafeCallIntoPython([&] {
350
+ const Status status = vtable_.received_headers(middleware_.obj(), incoming_headers);
351
+ RETURN_NOT_OK(CheckPyError());
352
+ return status;
353
+ });
354
+
355
+ ARROW_WARN_NOT_OK(status, "Python client middleware failed in StartCall");
356
+ }
357
+
358
+ void PyClientMiddleware::CallCompleted(const Status& call_status) {
359
+ const Status& status = SafeCallIntoPython([&] {
360
+ const Status status = vtable_.call_completed(middleware_.obj(), call_status);
361
+ RETURN_NOT_OK(CheckPyError());
362
+ return status;
363
+ });
364
+
365
+ ARROW_WARN_NOT_OK(status, "Python client middleware failed in StartCall");
366
+ }
367
+
368
+ Status CreateFlightInfo(const std::shared_ptr<arrow::Schema>& schema,
369
+ const arrow::flight::FlightDescriptor& descriptor,
370
+ const std::vector<arrow::flight::FlightEndpoint>& endpoints,
371
+ int64_t total_records, int64_t total_bytes,
372
+ std::unique_ptr<arrow::flight::FlightInfo>* out) {
373
+ ARROW_ASSIGN_OR_RAISE(auto result,
374
+ arrow::flight::FlightInfo::Make(*schema, descriptor, endpoints,
375
+ total_records, total_bytes));
376
+ *out = std::unique_ptr<arrow::flight::FlightInfo>(
377
+ new arrow::flight::FlightInfo(std::move(result)));
378
+ return Status::OK();
379
+ }
380
+
381
+ Status CreateSchemaResult(const std::shared_ptr<arrow::Schema>& schema,
382
+ std::unique_ptr<arrow::flight::SchemaResult>* out) {
383
+ return arrow::flight::SchemaResult::Make(*schema).Value(out);
384
+ }
385
+
386
+ } // namespace flight
387
+ } // namespace py
388
+ } // namespace arrow
llmeval-env/lib/python3.10/site-packages/pyarrow/src/arrow/python/flight.h ADDED
@@ -0,0 +1,350 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <memory>
21
+ #include <string>
22
+ #include <vector>
23
+
24
+ #include "arrow/flight/api.h"
25
+ #include "arrow/ipc/dictionary.h"
26
+ #include "arrow/python/common.h"
27
+
28
+ #if defined(_WIN32) || defined(__CYGWIN__) // Windows
29
+ #if defined(_MSC_VER)
30
+ #pragma warning(disable : 4251)
31
+ #else
32
+ #pragma GCC diagnostic ignored "-Wattributes"
33
+ #endif
34
+
35
+ #ifdef ARROW_PYTHON_STATIC
36
+ #define ARROW_PYFLIGHT_EXPORT
37
+ #elif defined(ARROW_PYFLIGHT_EXPORTING)
38
+ #define ARROW_PYFLIGHT_EXPORT __declspec(dllexport)
39
+ #else
40
+ #define ARROW_PYFLIGHT_EXPORT __declspec(dllimport)
41
+ #endif
42
+
43
+ #else // Not Windows
44
+ #ifndef ARROW_PYFLIGHT_EXPORT
45
+ #define ARROW_PYFLIGHT_EXPORT __attribute__((visibility("default")))
46
+ #endif
47
+ #endif // Non-Windows
48
+
49
+ namespace arrow {
50
+
51
+ namespace py {
52
+
53
+ namespace flight {
54
+
55
+ ARROW_PYFLIGHT_EXPORT
56
+ extern const char* kPyServerMiddlewareName;
57
+
58
+ /// \brief A table of function pointers for calling from C++ into
59
+ /// Python.
60
+ class ARROW_PYFLIGHT_EXPORT PyFlightServerVtable {
61
+ public:
62
+ std::function<Status(PyObject*, const arrow::flight::ServerCallContext&,
63
+ const arrow::flight::Criteria*,
64
+ std::unique_ptr<arrow::flight::FlightListing>*)>
65
+ list_flights;
66
+ std::function<Status(PyObject*, const arrow::flight::ServerCallContext&,
67
+ const arrow::flight::FlightDescriptor&,
68
+ std::unique_ptr<arrow::flight::FlightInfo>*)>
69
+ get_flight_info;
70
+ std::function<Status(PyObject*, const arrow::flight::ServerCallContext&,
71
+ const arrow::flight::FlightDescriptor&,
72
+ std::unique_ptr<arrow::flight::SchemaResult>*)>
73
+ get_schema;
74
+ std::function<Status(PyObject*, const arrow::flight::ServerCallContext&,
75
+ const arrow::flight::Ticket&,
76
+ std::unique_ptr<arrow::flight::FlightDataStream>*)>
77
+ do_get;
78
+ std::function<Status(PyObject*, const arrow::flight::ServerCallContext&,
79
+ std::unique_ptr<arrow::flight::FlightMessageReader>,
80
+ std::unique_ptr<arrow::flight::FlightMetadataWriter>)>
81
+ do_put;
82
+ std::function<Status(PyObject*, const arrow::flight::ServerCallContext&,
83
+ std::unique_ptr<arrow::flight::FlightMessageReader>,
84
+ std::unique_ptr<arrow::flight::FlightMessageWriter>)>
85
+ do_exchange;
86
+ std::function<Status(PyObject*, const arrow::flight::ServerCallContext&,
87
+ const arrow::flight::Action&,
88
+ std::unique_ptr<arrow::flight::ResultStream>*)>
89
+ do_action;
90
+ std::function<Status(PyObject*, const arrow::flight::ServerCallContext&,
91
+ std::vector<arrow::flight::ActionType>*)>
92
+ list_actions;
93
+ };
94
+
95
+ class ARROW_PYFLIGHT_EXPORT PyServerAuthHandlerVtable {
96
+ public:
97
+ std::function<Status(PyObject*, arrow::flight::ServerAuthSender*,
98
+ arrow::flight::ServerAuthReader*)>
99
+ authenticate;
100
+ std::function<Status(PyObject*, const std::string&, std::string*)> is_valid;
101
+ };
102
+
103
+ class ARROW_PYFLIGHT_EXPORT PyClientAuthHandlerVtable {
104
+ public:
105
+ std::function<Status(PyObject*, arrow::flight::ClientAuthSender*,
106
+ arrow::flight::ClientAuthReader*)>
107
+ authenticate;
108
+ std::function<Status(PyObject*, std::string*)> get_token;
109
+ };
110
+
111
+ /// \brief A helper to implement an auth mechanism in Python.
112
+ class ARROW_PYFLIGHT_EXPORT PyServerAuthHandler
113
+ : public arrow::flight::ServerAuthHandler {
114
+ public:
115
+ explicit PyServerAuthHandler(PyObject* handler,
116
+ const PyServerAuthHandlerVtable& vtable);
117
+ Status Authenticate(arrow::flight::ServerAuthSender* outgoing,
118
+ arrow::flight::ServerAuthReader* incoming) override;
119
+ Status IsValid(const std::string& token, std::string* peer_identity) override;
120
+
121
+ private:
122
+ OwnedRefNoGIL handler_;
123
+ PyServerAuthHandlerVtable vtable_;
124
+ };
125
+
126
+ /// \brief A helper to implement an auth mechanism in Python.
127
+ class ARROW_PYFLIGHT_EXPORT PyClientAuthHandler
128
+ : public arrow::flight::ClientAuthHandler {
129
+ public:
130
+ explicit PyClientAuthHandler(PyObject* handler,
131
+ const PyClientAuthHandlerVtable& vtable);
132
+ Status Authenticate(arrow::flight::ClientAuthSender* outgoing,
133
+ arrow::flight::ClientAuthReader* incoming) override;
134
+ Status GetToken(std::string* token) override;
135
+
136
+ private:
137
+ OwnedRefNoGIL handler_;
138
+ PyClientAuthHandlerVtable vtable_;
139
+ };
140
+
141
+ class ARROW_PYFLIGHT_EXPORT PyFlightServer : public arrow::flight::FlightServerBase {
142
+ public:
143
+ explicit PyFlightServer(PyObject* server, const PyFlightServerVtable& vtable);
144
+
145
+ // Like Serve(), but set up signals and invoke Python signal handlers
146
+ // if necessary. This function may return with a Python exception set.
147
+ Status ServeWithSignals();
148
+
149
+ Status ListFlights(const arrow::flight::ServerCallContext& context,
150
+ const arrow::flight::Criteria* criteria,
151
+ std::unique_ptr<arrow::flight::FlightListing>* listings) override;
152
+ Status GetFlightInfo(const arrow::flight::ServerCallContext& context,
153
+ const arrow::flight::FlightDescriptor& request,
154
+ std::unique_ptr<arrow::flight::FlightInfo>* info) override;
155
+ Status GetSchema(const arrow::flight::ServerCallContext& context,
156
+ const arrow::flight::FlightDescriptor& request,
157
+ std::unique_ptr<arrow::flight::SchemaResult>* result) override;
158
+ Status DoGet(const arrow::flight::ServerCallContext& context,
159
+ const arrow::flight::Ticket& request,
160
+ std::unique_ptr<arrow::flight::FlightDataStream>* stream) override;
161
+ Status DoPut(const arrow::flight::ServerCallContext& context,
162
+ std::unique_ptr<arrow::flight::FlightMessageReader> reader,
163
+ std::unique_ptr<arrow::flight::FlightMetadataWriter> writer) override;
164
+ Status DoExchange(const arrow::flight::ServerCallContext& context,
165
+ std::unique_ptr<arrow::flight::FlightMessageReader> reader,
166
+ std::unique_ptr<arrow::flight::FlightMessageWriter> writer) override;
167
+ Status DoAction(const arrow::flight::ServerCallContext& context,
168
+ const arrow::flight::Action& action,
169
+ std::unique_ptr<arrow::flight::ResultStream>* result) override;
170
+ Status ListActions(const arrow::flight::ServerCallContext& context,
171
+ std::vector<arrow::flight::ActionType>* actions) override;
172
+
173
+ private:
174
+ OwnedRefNoGIL server_;
175
+ PyFlightServerVtable vtable_;
176
+ };
177
+
178
+ /// \brief A callback that obtains the next result from a Flight action.
179
+ typedef std::function<Status(PyObject*, std::unique_ptr<arrow::flight::Result>*)>
180
+ PyFlightResultStreamCallback;
181
+
182
+ /// \brief A ResultStream built around a Python callback.
183
+ class ARROW_PYFLIGHT_EXPORT PyFlightResultStream : public arrow::flight::ResultStream {
184
+ public:
185
+ /// \brief Construct a FlightResultStream from a Python object and callback.
186
+ /// Must only be called while holding the GIL.
187
+ explicit PyFlightResultStream(PyObject* generator,
188
+ PyFlightResultStreamCallback callback);
189
+ arrow::Result<std::unique_ptr<arrow::flight::Result>> Next() override;
190
+
191
+ private:
192
+ OwnedRefNoGIL generator_;
193
+ PyFlightResultStreamCallback callback_;
194
+ };
195
+
196
+ /// \brief A wrapper around a FlightDataStream that keeps alive a
197
+ /// Python object backing it.
198
+ class ARROW_PYFLIGHT_EXPORT PyFlightDataStream : public arrow::flight::FlightDataStream {
199
+ public:
200
+ /// \brief Construct a FlightDataStream from a Python object and underlying stream.
201
+ /// Must only be called while holding the GIL.
202
+ explicit PyFlightDataStream(PyObject* data_source,
203
+ std::unique_ptr<arrow::flight::FlightDataStream> stream);
204
+
205
+ std::shared_ptr<Schema> schema() override;
206
+ arrow::Result<arrow::flight::FlightPayload> GetSchemaPayload() override;
207
+ arrow::Result<arrow::flight::FlightPayload> Next() override;
208
+
209
+ private:
210
+ OwnedRefNoGIL data_source_;
211
+ std::unique_ptr<arrow::flight::FlightDataStream> stream_;
212
+ };
213
+
214
+ class ARROW_PYFLIGHT_EXPORT PyServerMiddlewareFactory
215
+ : public arrow::flight::ServerMiddlewareFactory {
216
+ public:
217
+ /// \brief A callback to create the middleware instance in Python
218
+ typedef std::function<Status(
219
+ PyObject*, const arrow::flight::CallInfo& info,
220
+ const arrow::flight::CallHeaders& incoming_headers,
221
+ std::shared_ptr<arrow::flight::ServerMiddleware>* middleware)>
222
+ StartCallCallback;
223
+
224
+ /// \brief Must only be called while holding the GIL.
225
+ explicit PyServerMiddlewareFactory(PyObject* factory, StartCallCallback start_call);
226
+
227
+ Status StartCall(const arrow::flight::CallInfo& info,
228
+ const arrow::flight::CallHeaders& incoming_headers,
229
+ std::shared_ptr<arrow::flight::ServerMiddleware>* middleware) override;
230
+
231
+ private:
232
+ OwnedRefNoGIL factory_;
233
+ StartCallCallback start_call_;
234
+ };
235
+
236
+ class ARROW_PYFLIGHT_EXPORT PyServerMiddleware : public arrow::flight::ServerMiddleware {
237
+ public:
238
+ typedef std::function<Status(PyObject*,
239
+ arrow::flight::AddCallHeaders* outgoing_headers)>
240
+ SendingHeadersCallback;
241
+ typedef std::function<Status(PyObject*, const Status& status)> CallCompletedCallback;
242
+
243
+ struct Vtable {
244
+ SendingHeadersCallback sending_headers;
245
+ CallCompletedCallback call_completed;
246
+ };
247
+
248
+ /// \brief Must only be called while holding the GIL.
249
+ explicit PyServerMiddleware(PyObject* middleware, Vtable vtable);
250
+
251
+ void SendingHeaders(arrow::flight::AddCallHeaders* outgoing_headers) override;
252
+ void CallCompleted(const Status& status) override;
253
+ std::string name() const override;
254
+ /// \brief Get the underlying Python object.
255
+ PyObject* py_object() const;
256
+
257
+ private:
258
+ OwnedRefNoGIL middleware_;
259
+ Vtable vtable_;
260
+ };
261
+
262
+ class ARROW_PYFLIGHT_EXPORT PyClientMiddlewareFactory
263
+ : public arrow::flight::ClientMiddlewareFactory {
264
+ public:
265
+ /// \brief A callback to create the middleware instance in Python
266
+ typedef std::function<Status(
267
+ PyObject*, const arrow::flight::CallInfo& info,
268
+ std::unique_ptr<arrow::flight::ClientMiddleware>* middleware)>
269
+ StartCallCallback;
270
+
271
+ /// \brief Must only be called while holding the GIL.
272
+ explicit PyClientMiddlewareFactory(PyObject* factory, StartCallCallback start_call);
273
+
274
+ void StartCall(const arrow::flight::CallInfo& info,
275
+ std::unique_ptr<arrow::flight::ClientMiddleware>* middleware) override;
276
+
277
+ private:
278
+ OwnedRefNoGIL factory_;
279
+ StartCallCallback start_call_;
280
+ };
281
+
282
+ class ARROW_PYFLIGHT_EXPORT PyClientMiddleware : public arrow::flight::ClientMiddleware {
283
+ public:
284
+ typedef std::function<Status(PyObject*,
285
+ arrow::flight::AddCallHeaders* outgoing_headers)>
286
+ SendingHeadersCallback;
287
+ typedef std::function<Status(PyObject*,
288
+ const arrow::flight::CallHeaders& incoming_headers)>
289
+ ReceivedHeadersCallback;
290
+ typedef std::function<Status(PyObject*, const Status& status)> CallCompletedCallback;
291
+
292
+ struct Vtable {
293
+ SendingHeadersCallback sending_headers;
294
+ ReceivedHeadersCallback received_headers;
295
+ CallCompletedCallback call_completed;
296
+ };
297
+
298
+ /// \brief Must only be called while holding the GIL.
299
+ explicit PyClientMiddleware(PyObject* factory, Vtable vtable);
300
+
301
+ void SendingHeaders(arrow::flight::AddCallHeaders* outgoing_headers) override;
302
+ void ReceivedHeaders(const arrow::flight::CallHeaders& incoming_headers) override;
303
+ void CallCompleted(const Status& status) override;
304
+
305
+ private:
306
+ OwnedRefNoGIL middleware_;
307
+ Vtable vtable_;
308
+ };
309
+
310
+ /// \brief A callback that obtains the next payload from a Flight result stream.
311
+ typedef std::function<Status(PyObject*, arrow::flight::FlightPayload*)>
312
+ PyGeneratorFlightDataStreamCallback;
313
+
314
+ /// \brief A FlightDataStream built around a Python callback.
315
+ class ARROW_PYFLIGHT_EXPORT PyGeneratorFlightDataStream
316
+ : public arrow::flight::FlightDataStream {
317
+ public:
318
+ /// \brief Construct a FlightDataStream from a Python object and underlying stream.
319
+ /// Must only be called while holding the GIL.
320
+ explicit PyGeneratorFlightDataStream(PyObject* generator,
321
+ std::shared_ptr<arrow::Schema> schema,
322
+ PyGeneratorFlightDataStreamCallback callback,
323
+ const ipc::IpcWriteOptions& options);
324
+ std::shared_ptr<Schema> schema() override;
325
+ arrow::Result<arrow::flight::FlightPayload> GetSchemaPayload() override;
326
+ arrow::Result<arrow::flight::FlightPayload> Next() override;
327
+
328
+ private:
329
+ OwnedRefNoGIL generator_;
330
+ std::shared_ptr<arrow::Schema> schema_;
331
+ ipc::DictionaryFieldMapper mapper_;
332
+ ipc::IpcWriteOptions options_;
333
+ PyGeneratorFlightDataStreamCallback callback_;
334
+ };
335
+
336
+ ARROW_PYFLIGHT_EXPORT
337
+ Status CreateFlightInfo(const std::shared_ptr<arrow::Schema>& schema,
338
+ const arrow::flight::FlightDescriptor& descriptor,
339
+ const std::vector<arrow::flight::FlightEndpoint>& endpoints,
340
+ int64_t total_records, int64_t total_bytes,
341
+ std::unique_ptr<arrow::flight::FlightInfo>* out);
342
+
343
+ /// \brief Create a SchemaResult from schema.
344
+ ARROW_PYFLIGHT_EXPORT
345
+ Status CreateSchemaResult(const std::shared_ptr<arrow::Schema>& schema,
346
+ std::unique_ptr<arrow::flight::SchemaResult>* out);
347
+
348
+ } // namespace flight
349
+ } // namespace py
350
+ } // namespace arrow
llmeval-env/lib/python3.10/site-packages/pyarrow/src/arrow/python/gdb.cc ADDED
@@ -0,0 +1,530 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #include <cstdlib>
19
+ #include <memory>
20
+ #include <utility>
21
+
22
+ #include "arrow/array.h"
23
+ #include "arrow/chunked_array.h"
24
+ #include "arrow/datum.h"
25
+ #include "arrow/extension_type.h"
26
+ #include "arrow/ipc/json_simple.h"
27
+ #include "arrow/python/gdb.h"
28
+ #include "arrow/record_batch.h"
29
+ #include "arrow/scalar.h"
30
+ #include "arrow/table.h"
31
+ #include "arrow/type.h"
32
+ #include "arrow/util/debug.h"
33
+ #include "arrow/util/decimal.h"
34
+ #include "arrow/util/key_value_metadata.h"
35
+ #include "arrow/util/logging.h"
36
+ #include "arrow/util/macros.h"
37
+
38
+ namespace arrow {
39
+
40
+ using ipc::internal::json::ArrayFromJSON;
41
+ using ipc::internal::json::ChunkedArrayFromJSON;
42
+ using ipc::internal::json::ScalarFromJSON;
43
+
44
+ namespace gdb {
45
+
46
+ // Add a nested `arrow` namespace to exercise type lookup from GDB (ARROW-15652)
47
+ namespace arrow {
48
+ void DummyFunction() {}
49
+ } // namespace arrow
50
+
51
+ namespace {
52
+
53
+ class CustomStatusDetail : public StatusDetail {
54
+ public:
55
+ const char* type_id() const override { return "custom-detail-id"; }
56
+ std::string ToString() const override { return "This is a detail"; }
57
+ };
58
+
59
+ class UuidType : public ExtensionType {
60
+ public:
61
+ UuidType() : ExtensionType(fixed_size_binary(16)) {}
62
+
63
+ std::string extension_name() const override { return "uuid"; }
64
+
65
+ bool ExtensionEquals(const ExtensionType& other) const override {
66
+ return (other.extension_name() == this->extension_name());
67
+ }
68
+
69
+ std::shared_ptr<Array> MakeArray(std::shared_ptr<ArrayData> data) const override {
70
+ return std::make_shared<ExtensionArray>(data);
71
+ }
72
+
73
+ Result<std::shared_ptr<DataType>> Deserialize(
74
+ std::shared_ptr<DataType> storage_type,
75
+ const std::string& serialized) const override {
76
+ return Status::NotImplemented("");
77
+ }
78
+
79
+ std::string Serialize() const override { return "uuid-serialized"; }
80
+ };
81
+
82
+ std::shared_ptr<Array> SliceArrayFromJSON(const std::shared_ptr<DataType>& ty,
83
+ std::string_view json, int64_t offset = 0,
84
+ int64_t length = -1) {
85
+ auto array = *ArrayFromJSON(ty, json);
86
+ if (length != -1) {
87
+ return array->Slice(offset, length);
88
+ } else {
89
+ return array->Slice(offset);
90
+ }
91
+ }
92
+
93
+ } // namespace
94
+
95
+ void TestSession() {
96
+ // We define local variables for all types for which we want to test
97
+ // pretty-printing.
98
+ // Then, at the end of this function, we trap to the debugger, so that
99
+ // test instrumentation can print values from this frame by interacting
100
+ // with the debugger.
101
+ // The test instrumentation is in pyarrow/tests/test_gdb.py
102
+
103
+ #ifdef __clang__
104
+ _Pragma("clang diagnostic push");
105
+ _Pragma("clang diagnostic ignored \"-Wunused-variable\"");
106
+ #elif defined(__GNUC__)
107
+ _Pragma("GCC diagnostic push");
108
+ _Pragma("GCC diagnostic ignored \"-Wunused-variable\"");
109
+ #endif
110
+
111
+ arrow::DummyFunction();
112
+
113
+ // Status & Result
114
+ auto ok_status = Status::OK();
115
+ auto error_status = Status::IOError("This is an error");
116
+ auto error_detail_status =
117
+ error_status.WithDetail(std::make_shared<CustomStatusDetail>());
118
+ auto ok_result = Result<int>(42);
119
+ auto error_result = Result<int>(error_status);
120
+ auto error_detail_result = Result<int>(error_detail_status);
121
+
122
+ // String views
123
+ std::string_view string_view_abc{"abc"};
124
+ std::string special_chars = std::string("foo\"bar") + '\x00' + "\r\n\t\x1f";
125
+ std::string_view string_view_special_chars(special_chars);
126
+
127
+ // Buffers
128
+ Buffer buffer_null{nullptr, 0};
129
+ Buffer buffer_abc{string_view_abc};
130
+ Buffer buffer_special_chars{string_view_special_chars};
131
+ char mutable_array[3] = {'a', 'b', 'c'};
132
+ MutableBuffer buffer_mutable{reinterpret_cast<uint8_t*>(mutable_array), 3};
133
+ auto heap_buffer = std::make_shared<Buffer>(string_view_abc);
134
+ auto heap_buffer_mutable = *AllocateBuffer(buffer_abc.size());
135
+ memcpy(heap_buffer_mutable->mutable_data(), buffer_abc.data(), buffer_abc.size());
136
+
137
+ // KeyValueMetadata
138
+ auto empty_metadata = key_value_metadata({}, {});
139
+ auto metadata = key_value_metadata(
140
+ {"key_text", "key_binary"}, {"some value", std::string("z") + '\x00' + "\x1f\xff"});
141
+
142
+ // Decimals
143
+ Decimal128 decimal128_zero{};
144
+ Decimal128 decimal128_pos{"98765432109876543210987654321098765432"};
145
+ Decimal128 decimal128_neg{"-98765432109876543210987654321098765432"};
146
+ BasicDecimal128 basic_decimal128_zero{};
147
+ BasicDecimal128 basic_decimal128_pos{decimal128_pos.native_endian_array()};
148
+ BasicDecimal128 basic_decimal128_neg{decimal128_neg.native_endian_array()};
149
+ Decimal256 decimal256_zero{};
150
+ Decimal256 decimal256_pos{
151
+ "9876543210987654321098765432109876543210987654321098765432109876543210987654"};
152
+ Decimal256 decimal256_neg{
153
+ "-9876543210987654321098765432109876543210987654321098765432109876543210987654"};
154
+ BasicDecimal256 basic_decimal256_zero{};
155
+ BasicDecimal256 basic_decimal256_pos{decimal256_pos.native_endian_array()};
156
+ BasicDecimal256 basic_decimal256_neg{decimal256_neg.native_endian_array()};
157
+
158
+ // Data types
159
+ NullType null_type;
160
+ auto heap_null_type = null();
161
+ BooleanType bool_type;
162
+ auto heap_bool_type = boolean();
163
+
164
+ Date32Type date32_type;
165
+ Date64Type date64_type;
166
+ Time32Type time_type_s(TimeUnit::SECOND);
167
+ Time32Type time_type_ms(TimeUnit::MILLI);
168
+ Time64Type time_type_us(TimeUnit::MICRO);
169
+ Time64Type time_type_ns(TimeUnit::NANO);
170
+ auto heap_time_type_ns = time64(TimeUnit::NANO);
171
+
172
+ TimestampType timestamp_type_s(TimeUnit::SECOND);
173
+ TimestampType timestamp_type_ms_timezone(TimeUnit::MILLI, "Europe/Paris");
174
+ TimestampType timestamp_type_us(TimeUnit::MICRO);
175
+ TimestampType timestamp_type_ns_timezone(TimeUnit::NANO, "Europe/Paris");
176
+ auto heap_timestamp_type_ns_timezone = timestamp(TimeUnit::NANO, "Europe/Paris");
177
+
178
+ DayTimeIntervalType day_time_interval_type;
179
+ MonthIntervalType month_interval_type;
180
+ MonthDayNanoIntervalType month_day_nano_interval_type;
181
+
182
+ DurationType duration_type_s(TimeUnit::SECOND);
183
+ DurationType duration_type_ns(TimeUnit::NANO);
184
+
185
+ BinaryType binary_type;
186
+ StringType string_type;
187
+ LargeBinaryType large_binary_type;
188
+ LargeStringType large_string_type;
189
+ FixedSizeBinaryType fixed_size_binary_type(10);
190
+ auto heap_fixed_size_binary_type = fixed_size_binary(10);
191
+
192
+ Decimal128Type decimal128_type(16, 5);
193
+ Decimal256Type decimal256_type(42, 12);
194
+ auto heap_decimal128_type = decimal128(16, 5);
195
+
196
+ ListType list_type(uint8());
197
+ LargeListType large_list_type(large_utf8());
198
+ auto heap_list_type = list(uint8());
199
+ auto heap_large_list_type = large_list(large_utf8());
200
+
201
+ FixedSizeListType fixed_size_list_type(float64(), 3);
202
+ auto heap_fixed_size_list_type = fixed_size_list(float64(), 3);
203
+
204
+ DictionaryType dict_type_unordered(int16(), utf8());
205
+ DictionaryType dict_type_ordered(int16(), utf8(), /*ordered=*/true);
206
+ auto heap_dict_type = dictionary(int16(), utf8());
207
+
208
+ MapType map_type_unsorted(utf8(), binary());
209
+ MapType map_type_sorted(utf8(), binary(), /*keys_sorted=*/true);
210
+ auto heap_map_type = map(utf8(), binary());
211
+
212
+ StructType struct_type_empty({});
213
+ StructType struct_type(
214
+ {field("ints", int8()), field("strs", utf8(), /*nullable=*/false)});
215
+ auto heap_struct_type =
216
+ struct_({field("ints", int8()), field("strs", utf8(), /*nullable=*/false)});
217
+
218
+ std::vector<int8_t> union_type_codes({7, 42});
219
+ FieldVector union_fields(
220
+ {field("ints", int8()), field("strs", utf8(), /*nullable=*/false)});
221
+ SparseUnionType sparse_union_type(union_fields, union_type_codes);
222
+ DenseUnionType dense_union_type(union_fields, union_type_codes);
223
+
224
+ UuidType uuid_type{};
225
+ std::shared_ptr<DataType> heap_uuid_type = std::make_shared<UuidType>();
226
+
227
+ // Schema
228
+ auto schema_empty = schema({});
229
+ auto schema_non_empty = schema({field("ints", int8()), field("strs", utf8())});
230
+ auto schema_with_metadata = schema_non_empty->WithMetadata(
231
+ key_value_metadata({"key1", "key2"}, {"value1", "value2"}));
232
+
233
+ // Fields
234
+ Field int_field("ints", int64());
235
+ Field float_field("floats", float32(), /*nullable=*/false);
236
+ auto heap_int_field = field("ints", int64());
237
+
238
+ // Scalars
239
+ NullScalar null_scalar;
240
+ auto heap_null_scalar = MakeNullScalar(null());
241
+
242
+ BooleanScalar bool_scalar_null{};
243
+ BooleanScalar bool_scalar{true};
244
+ auto heap_bool_scalar = *MakeScalar(boolean(), true);
245
+
246
+ Int8Scalar int8_scalar_null{};
247
+ UInt8Scalar uint8_scalar_null{};
248
+ Int64Scalar int64_scalar_null{};
249
+ UInt64Scalar uint64_scalar_null{};
250
+ Int8Scalar int8_scalar{-42};
251
+ UInt8Scalar uint8_scalar{234};
252
+ Int64Scalar int64_scalar{-9223372036854775807LL - 1};
253
+ UInt64Scalar uint64_scalar{18446744073709551615ULL};
254
+ HalfFloatScalar half_float_scalar{48640}; // -1.5
255
+ FloatScalar float_scalar{1.25f};
256
+ DoubleScalar double_scalar{2.5};
257
+
258
+ Time32Scalar time_scalar_s{100, TimeUnit::SECOND};
259
+ Time32Scalar time_scalar_ms{1000, TimeUnit::MILLI};
260
+ Time64Scalar time_scalar_us{10000, TimeUnit::MICRO};
261
+ Time64Scalar time_scalar_ns{100000, TimeUnit::NANO};
262
+ Time64Scalar time_scalar_null{time64(TimeUnit::NANO)};
263
+
264
+ DurationScalar duration_scalar_s{-100, TimeUnit::SECOND};
265
+ DurationScalar duration_scalar_ms{-1000, TimeUnit::MILLI};
266
+ DurationScalar duration_scalar_us{-10000, TimeUnit::MICRO};
267
+ DurationScalar duration_scalar_ns{-100000, TimeUnit::NANO};
268
+ DurationScalar duration_scalar_null{duration(TimeUnit::NANO)};
269
+
270
+ TimestampScalar timestamp_scalar_s{12345, timestamp(TimeUnit::SECOND)};
271
+ TimestampScalar timestamp_scalar_ms{-123456, timestamp(TimeUnit::MILLI)};
272
+ TimestampScalar timestamp_scalar_us{1234567, timestamp(TimeUnit::MICRO)};
273
+ TimestampScalar timestamp_scalar_ns{-12345678, timestamp(TimeUnit::NANO)};
274
+ TimestampScalar timestamp_scalar_null{timestamp(TimeUnit::NANO)};
275
+
276
+ TimestampScalar timestamp_scalar_s_tz{12345,
277
+ timestamp(TimeUnit::SECOND, "Europe/Paris")};
278
+ TimestampScalar timestamp_scalar_ms_tz{-123456,
279
+ timestamp(TimeUnit::MILLI, "Europe/Paris")};
280
+ TimestampScalar timestamp_scalar_us_tz{1234567,
281
+ timestamp(TimeUnit::MICRO, "Europe/Paris")};
282
+ TimestampScalar timestamp_scalar_ns_tz{-12345678,
283
+ timestamp(TimeUnit::NANO, "Europe/Paris")};
284
+ TimestampScalar timestamp_scalar_null_tz{timestamp(TimeUnit::NANO, "Europe/Paris")};
285
+
286
+ MonthIntervalScalar month_interval_scalar{23};
287
+ MonthIntervalScalar month_interval_scalar_null{};
288
+ DayTimeIntervalScalar day_time_interval_scalar{{23, -456}};
289
+ DayTimeIntervalScalar day_time_interval_scalar_null{};
290
+ MonthDayNanoIntervalScalar month_day_nano_interval_scalar{{1, 23, -456}};
291
+ MonthDayNanoIntervalScalar month_day_nano_interval_scalar_null{};
292
+
293
+ Date32Scalar date32_scalar{23};
294
+ Date32Scalar date32_scalar_null{};
295
+ Date64Scalar date64_scalar{45 * 86400000LL};
296
+ Date64Scalar date64_scalar_null{};
297
+
298
+ Decimal128Scalar decimal128_scalar_pos_scale_pos{Decimal128("1234567"),
299
+ decimal128(10, 4)};
300
+ Decimal128Scalar decimal128_scalar_pos_scale_neg{Decimal128("-1234567"),
301
+ decimal128(10, 4)};
302
+ Decimal128Scalar decimal128_scalar_neg_scale_pos{Decimal128("1234567"),
303
+ decimal128(10, -4)};
304
+ Decimal128Scalar decimal128_scalar_neg_scale_neg{Decimal128("-1234567"),
305
+ decimal128(10, -4)};
306
+ Decimal128Scalar decimal128_scalar_null{decimal128(10, 4)};
307
+ auto heap_decimal128_scalar = *MakeScalar(decimal128(10, 4), Decimal128("1234567"));
308
+
309
+ Decimal256Scalar decimal256_scalar_pos_scale_pos{
310
+ Decimal256("1234567890123456789012345678901234567890123456"), decimal256(50, 4)};
311
+ Decimal256Scalar decimal256_scalar_pos_scale_neg{
312
+ Decimal256("-1234567890123456789012345678901234567890123456"), decimal256(50, 4)};
313
+ Decimal256Scalar decimal256_scalar_neg_scale_pos{
314
+ Decimal256("1234567890123456789012345678901234567890123456"), decimal256(50, -4)};
315
+ Decimal256Scalar decimal256_scalar_neg_scale_neg{
316
+ Decimal256("-1234567890123456789012345678901234567890123456"), decimal256(50, -4)};
317
+ Decimal256Scalar decimal256_scalar_null{decimal256(50, 4)};
318
+ auto heap_decimal256_scalar = *MakeScalar(
319
+ decimal256(50, 4), Decimal256("1234567890123456789012345678901234567890123456"));
320
+
321
+ BinaryScalar binary_scalar_null{};
322
+ BinaryScalar binary_scalar_unallocated{std::shared_ptr<Buffer>{nullptr}};
323
+ BinaryScalar binary_scalar_empty{Buffer::FromString("")};
324
+ BinaryScalar binary_scalar_abc{Buffer::FromString("abc")};
325
+ BinaryScalar binary_scalar_bytes{
326
+ Buffer::FromString(std::string() + '\x00' + "\x1f\xff")};
327
+
328
+ StringScalar string_scalar_null{};
329
+ StringScalar string_scalar_unallocated{std::shared_ptr<Buffer>{nullptr}};
330
+ StringScalar string_scalar_empty{Buffer::FromString("")};
331
+ StringScalar string_scalar_hehe{Buffer::FromString("héhé")};
332
+ StringScalar string_scalar_invalid_chars{
333
+ Buffer::FromString(std::string("abc") + '\x00' + "def\xffghi")};
334
+
335
+ LargeBinaryScalar large_binary_scalar_abc{Buffer::FromString("abc")};
336
+ LargeStringScalar large_string_scalar_hehe{Buffer::FromString("héhé")};
337
+
338
+ FixedSizeBinaryScalar fixed_size_binary_scalar{Buffer::FromString("abc"),
339
+ fixed_size_binary(3)};
340
+ FixedSizeBinaryScalar fixed_size_binary_scalar_null{
341
+ Buffer::FromString(" "), fixed_size_binary(3), /*is_valid=*/false};
342
+
343
+ std::shared_ptr<Array> dict_array;
344
+ dict_array = *ArrayFromJSON(utf8(), R"(["foo", "bar", "quux"])");
345
+ DictionaryScalar dict_scalar{{std::make_shared<Int8Scalar>(42), dict_array},
346
+ dictionary(int8(), utf8())};
347
+ DictionaryScalar dict_scalar_null{dictionary(int8(), utf8())};
348
+
349
+ std::shared_ptr<Array> list_value_array = *ArrayFromJSON(int32(), R"([4, 5, 6])");
350
+ std::shared_ptr<Array> list_zero_length = *ArrayFromJSON(int32(), R"([])");
351
+ ListScalar list_scalar{list_value_array};
352
+ ListScalar list_scalar_null{list_zero_length, list(int32()), /*is_valid=*/false};
353
+ LargeListScalar large_list_scalar{list_value_array};
354
+ LargeListScalar large_list_scalar_null{list_zero_length, large_list(int32()),
355
+ /*is_valid=*/false};
356
+ FixedSizeListScalar fixed_size_list_scalar{list_value_array};
357
+ FixedSizeListScalar fixed_size_list_scalar_null{
358
+ list_value_array, fixed_size_list(int32(), 3), /*is_valid=*/false};
359
+
360
+ auto struct_scalar_type = struct_({field("ints", int32()), field("strs", utf8())});
361
+ StructScalar struct_scalar{
362
+ ScalarVector{MakeScalar(int32_t(42)), MakeScalar("some text")}, struct_scalar_type};
363
+ StructScalar struct_scalar_null{struct_scalar.value, struct_scalar_type,
364
+ /*is_valid=*/false};
365
+
366
+ auto sparse_union_scalar_type =
367
+ sparse_union(FieldVector{field("ints", int32()), field("strs", utf8())}, {7, 42});
368
+ auto dense_union_scalar_type =
369
+ dense_union(FieldVector{field("ints", int32()), field("strs", utf8())}, {7, 42});
370
+ std::vector<std::shared_ptr<Scalar>> union_values = {MakeScalar(int32_t(43)),
371
+ MakeNullScalar(utf8())};
372
+ SparseUnionScalar sparse_union_scalar{union_values, 7, sparse_union_scalar_type};
373
+ DenseUnionScalar dense_union_scalar{union_values[0], 7, dense_union_scalar_type};
374
+
375
+ union_values[0] = MakeNullScalar(int32());
376
+ SparseUnionScalar sparse_union_scalar_null{union_values, 7, sparse_union_scalar_type};
377
+ DenseUnionScalar dense_union_scalar_null{union_values[0], 7, dense_union_scalar_type};
378
+
379
+ auto extension_scalar_type = std::make_shared<UuidType>();
380
+ ExtensionScalar extension_scalar{
381
+ std::make_shared<FixedSizeBinaryScalar>(Buffer::FromString("0123456789abcdef"),
382
+ extension_scalar_type->storage_type()),
383
+ extension_scalar_type};
384
+ ExtensionScalar extension_scalar_null{extension_scalar.value, extension_scalar_type,
385
+ /*is_valid=*/false};
386
+
387
+ std::shared_ptr<Scalar> heap_map_scalar;
388
+ ARROW_CHECK_OK(
389
+ ScalarFromJSON(map(utf8(), int32()), R"([["a", 5], ["b", 6]])", &heap_map_scalar));
390
+ auto heap_map_scalar_null = MakeNullScalar(heap_map_scalar->type);
391
+
392
+ // Array and ArrayData
393
+ auto heap_null_array = SliceArrayFromJSON(null(), "[null, null]");
394
+
395
+ auto heap_int32_array = SliceArrayFromJSON(int32(), "[-5, 6, null, 42]");
396
+ ArrayData int32_array_data{*heap_int32_array->data()};
397
+ Int32Array int32_array{heap_int32_array->data()->Copy()};
398
+
399
+ auto heap_int32_array_no_nulls = SliceArrayFromJSON(int32(), "[-5, 6, 3, 42]");
400
+
401
+ const char* json_int32_array = "[-1, 2, -3, 4, null, -5, 6, -7, 8, null, -9, -10]";
402
+ auto heap_int32_array_sliced_1_9 = SliceArrayFromJSON(int32(), json_int32_array, 1, 9);
403
+ auto heap_int32_array_sliced_2_6 = SliceArrayFromJSON(int32(), json_int32_array, 2, 6);
404
+ auto heap_int32_array_sliced_8_4 = SliceArrayFromJSON(int32(), json_int32_array, 8, 4);
405
+ auto heap_int32_array_sliced_empty =
406
+ SliceArrayFromJSON(int32(), json_int32_array, 6, 0);
407
+
408
+ const char* json_bool_array =
409
+ "[false, false, true, true, null, null, false, false, true, true, "
410
+ "null, null, false, false, true, true, null, null]";
411
+ auto heap_bool_array = SliceArrayFromJSON(boolean(), json_bool_array);
412
+ auto heap_bool_array_sliced_1_9 = SliceArrayFromJSON(boolean(), json_bool_array, 1, 9);
413
+ auto heap_bool_array_sliced_2_6 = SliceArrayFromJSON(boolean(), json_bool_array, 2, 6);
414
+ auto heap_bool_array_sliced_empty =
415
+ SliceArrayFromJSON(boolean(), json_bool_array, 6, 0);
416
+
417
+ auto heap_list_array = SliceArrayFromJSON(list(int64()), "[[1, 2], null, []]");
418
+ ListArray list_array{heap_list_array->data()};
419
+
420
+ const char* json_double_array = "[-1.5, null]";
421
+ auto heap_double_array = SliceArrayFromJSON(float64(), json_double_array);
422
+
423
+ const char* json_float16_array = "[0, 48640]";
424
+ auto heap_float16_array =
425
+ *SliceArrayFromJSON(uint16(), json_float16_array)->View(float16());
426
+
427
+ auto heap_date32_array =
428
+ SliceArrayFromJSON(date32(), "[0, null, 18336, -9004, -719162, -719163]");
429
+ auto heap_date64_array = SliceArrayFromJSON(
430
+ date64(), "[1584230400000, -777945600000, -62135596800000, -62135683200000, 123]");
431
+
432
+ const char* json_time_array = "[null, -123, 456]";
433
+ auto heap_time32_array_s =
434
+ SliceArrayFromJSON(time32(TimeUnit::SECOND), json_time_array);
435
+ auto heap_time32_array_ms =
436
+ SliceArrayFromJSON(time32(TimeUnit::MILLI), json_time_array);
437
+ auto heap_time64_array_us =
438
+ SliceArrayFromJSON(time64(TimeUnit::MICRO), json_time_array);
439
+ auto heap_time64_array_ns = SliceArrayFromJSON(time64(TimeUnit::NANO), json_time_array);
440
+
441
+ auto heap_month_interval_array =
442
+ SliceArrayFromJSON(month_interval(), "[123, -456, null]");
443
+ auto heap_day_time_interval_array =
444
+ SliceArrayFromJSON(day_time_interval(), "[[1, -600], null]");
445
+ auto heap_month_day_nano_interval_array =
446
+ SliceArrayFromJSON(month_day_nano_interval(), "[[1, -600, 5000], null]");
447
+
448
+ const char* json_duration_array = "[null, -1234567890123456789]";
449
+ auto heap_duration_array_s =
450
+ SliceArrayFromJSON(duration(TimeUnit::SECOND), json_duration_array);
451
+ auto heap_duration_array_ns =
452
+ SliceArrayFromJSON(duration(TimeUnit::NANO), json_duration_array);
453
+
454
+ auto heap_timestamp_array_s = SliceArrayFromJSON(
455
+ timestamp(TimeUnit::SECOND),
456
+ R"([null, "1970-01-01 00:00:00", "1900-02-28 12:34:56", "3989-07-14 00:00:00"])");
457
+ auto heap_timestamp_array_ms = SliceArrayFromJSON(
458
+ timestamp(TimeUnit::MILLI),
459
+ R"([null, "1900-02-28 12:34:56.123", "3989-07-14 00:00:00.789"])");
460
+ auto heap_timestamp_array_us = SliceArrayFromJSON(
461
+ timestamp(TimeUnit::MICRO),
462
+ R"([null, "1900-02-28 12:34:56.654321", "3989-07-14 00:00:00.456789"])");
463
+ auto heap_timestamp_array_ns = SliceArrayFromJSON(
464
+ timestamp(TimeUnit::NANO), R"([null, "1900-02-28 12:34:56.987654321"])");
465
+
466
+ auto heap_decimal128_array = SliceArrayFromJSON(
467
+ decimal128(30, 6),
468
+ R"([null, "-1234567890123456789.012345", "1234567890123456789.012345"])");
469
+ auto heap_decimal256_array = SliceArrayFromJSON(
470
+ decimal256(50, 6), R"([null, "-123456789012345678901234567890123456789.012345"])");
471
+ auto heap_decimal128_array_sliced = heap_decimal128_array->Slice(1, 1);
472
+
473
+ auto heap_fixed_size_binary_array =
474
+ SliceArrayFromJSON(fixed_size_binary(3), "[null, \"abc\", \"\\u0000\\u001f\xff\"]");
475
+ auto heap_fixed_size_binary_array_zero_width =
476
+ SliceArrayFromJSON(fixed_size_binary(0), R"([null, ""])");
477
+ auto heap_fixed_size_binary_array_sliced = heap_fixed_size_binary_array->Slice(1, 1);
478
+
479
+ const char* json_binary_array = "[null, \"abcd\", \"\\u0000\\u001f\xff\"]";
480
+ auto heap_binary_array = SliceArrayFromJSON(binary(), json_binary_array);
481
+ auto heap_large_binary_array = SliceArrayFromJSON(large_binary(), json_binary_array);
482
+ const char* json_string_array = "[null, \"héhé\", \"invalid \xff char\"]";
483
+ auto heap_string_array = SliceArrayFromJSON(utf8(), json_string_array);
484
+ auto heap_large_string_array = SliceArrayFromJSON(large_utf8(), json_string_array);
485
+ auto heap_binary_array_sliced = heap_binary_array->Slice(1, 1);
486
+
487
+ // ChunkedArray
488
+ ArrayVector array_chunks(2);
489
+ array_chunks[0] = *ArrayFromJSON(int32(), "[1, 2]");
490
+ array_chunks[1] = *ArrayFromJSON(int32(), "[3, null, 4]");
491
+ ChunkedArray chunked_array{array_chunks};
492
+
493
+ // RecordBatch
494
+ auto batch_schema = schema({field("ints", int32()), field("strs", utf8())});
495
+ ArrayVector batch_columns{2};
496
+ batch_columns[0] = *ArrayFromJSON(int32(), "[1, 2, 3]");
497
+ batch_columns[1] = *ArrayFromJSON(utf8(), R"(["abc", null, "def"])");
498
+ auto batch = RecordBatch::Make(batch_schema, /*num_rows=*/3, batch_columns);
499
+ auto batch_with_metadata = batch->ReplaceSchemaMetadata(
500
+ key_value_metadata({"key1", "key2", "key3"}, {"value1", "value2", "value3"}));
501
+
502
+ // Table
503
+ ChunkedArrayVector table_columns{2};
504
+ ARROW_CHECK_OK(
505
+ ChunkedArrayFromJSON(int32(), {"[1, 2, 3]", "[4, 5]"}, &table_columns[0]));
506
+ ARROW_CHECK_OK(ChunkedArrayFromJSON(
507
+ utf8(), {R"(["abc", null])", R"(["def"])", R"(["ghi", "jkl"])"},
508
+ &table_columns[1]));
509
+ auto table = Table::Make(batch_schema, table_columns);
510
+
511
+ // Datum
512
+ Datum empty_datum{};
513
+ Datum scalar_datum{MakeNullScalar(boolean())};
514
+ Datum array_datum{heap_int32_array};
515
+ Datum chunked_array_datum{chunked_array};
516
+ Datum batch_datum{batch};
517
+ Datum table_datum{table};
518
+
519
+ #ifdef __clang__
520
+ _Pragma("clang diagnostic pop");
521
+ #elif defined(__GNUC__)
522
+ _Pragma("GCC diagnostic pop");
523
+ #endif
524
+
525
+ // Hook into debugger
526
+ ::arrow::internal::DebugTrap();
527
+ }
528
+
529
+ } // namespace gdb
530
+ } // namespace arrow
llmeval-env/lib/python3.10/site-packages/pyarrow/src/arrow/python/gdb.h ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include "arrow/python/visibility.h"
21
+
22
+ namespace arrow {
23
+ namespace gdb {
24
+
25
+ ARROW_PYTHON_EXPORT
26
+ void TestSession();
27
+
28
+ } // namespace gdb
29
+ } // namespace arrow
llmeval-env/lib/python3.10/site-packages/pyarrow/src/arrow/python/helpers.cc ADDED
@@ -0,0 +1,472 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // helpers.h includes a NumPy header, so we include this first
19
+ #include "arrow/python/numpy_interop.h"
20
+
21
+ #include "arrow/python/helpers.h"
22
+
23
+ #include <cmath>
24
+ #include <limits>
25
+ #include <sstream>
26
+ #include <type_traits>
27
+
28
+ #include "arrow/python/common.h"
29
+ #include "arrow/python/decimal.h"
30
+ #include "arrow/type_fwd.h"
31
+ #include "arrow/util/checked_cast.h"
32
+ #include "arrow/util/logging.h"
33
+
34
+ namespace arrow {
35
+
36
+ using internal::checked_cast;
37
+
38
+ namespace py {
39
+
40
+ #define GET_PRIMITIVE_TYPE(NAME, FACTORY) \
41
+ case Type::NAME: \
42
+ return FACTORY()
43
+
44
+ std::shared_ptr<DataType> GetPrimitiveType(Type::type type) {
45
+ switch (type) {
46
+ case Type::NA:
47
+ return null();
48
+ GET_PRIMITIVE_TYPE(UINT8, uint8);
49
+ GET_PRIMITIVE_TYPE(INT8, int8);
50
+ GET_PRIMITIVE_TYPE(UINT16, uint16);
51
+ GET_PRIMITIVE_TYPE(INT16, int16);
52
+ GET_PRIMITIVE_TYPE(UINT32, uint32);
53
+ GET_PRIMITIVE_TYPE(INT32, int32);
54
+ GET_PRIMITIVE_TYPE(UINT64, uint64);
55
+ GET_PRIMITIVE_TYPE(INT64, int64);
56
+ GET_PRIMITIVE_TYPE(DATE32, date32);
57
+ GET_PRIMITIVE_TYPE(DATE64, date64);
58
+ GET_PRIMITIVE_TYPE(BOOL, boolean);
59
+ GET_PRIMITIVE_TYPE(HALF_FLOAT, float16);
60
+ GET_PRIMITIVE_TYPE(FLOAT, float32);
61
+ GET_PRIMITIVE_TYPE(DOUBLE, float64);
62
+ GET_PRIMITIVE_TYPE(BINARY, binary);
63
+ GET_PRIMITIVE_TYPE(STRING, utf8);
64
+ GET_PRIMITIVE_TYPE(LARGE_BINARY, large_binary);
65
+ GET_PRIMITIVE_TYPE(LARGE_STRING, large_utf8);
66
+ GET_PRIMITIVE_TYPE(BINARY_VIEW, binary_view);
67
+ GET_PRIMITIVE_TYPE(STRING_VIEW, utf8_view);
68
+ GET_PRIMITIVE_TYPE(INTERVAL_MONTH_DAY_NANO, month_day_nano_interval);
69
+ default:
70
+ return nullptr;
71
+ }
72
+ }
73
+
74
+ PyObject* PyHalf_FromHalf(npy_half value) {
75
+ PyObject* result = PyArrayScalar_New(Half);
76
+ if (result != NULL) {
77
+ PyArrayScalar_ASSIGN(result, Half, value);
78
+ }
79
+ return result;
80
+ }
81
+
82
+ Status PyFloat_AsHalf(PyObject* obj, npy_half* out) {
83
+ if (PyArray_IsScalar(obj, Half)) {
84
+ *out = PyArrayScalar_VAL(obj, Half);
85
+ return Status::OK();
86
+ } else {
87
+ // XXX: cannot use npy_double_to_half() without linking with Numpy
88
+ return Status::TypeError("Expected np.float16 instance");
89
+ }
90
+ }
91
+
92
+ namespace internal {
93
+
94
+ std::string PyBytes_AsStdString(PyObject* obj) {
95
+ DCHECK(PyBytes_Check(obj));
96
+ return std::string(PyBytes_AS_STRING(obj), PyBytes_GET_SIZE(obj));
97
+ }
98
+
99
+ Status PyUnicode_AsStdString(PyObject* obj, std::string* out) {
100
+ DCHECK(PyUnicode_Check(obj));
101
+ Py_ssize_t size;
102
+ // The utf-8 representation is cached on the unicode object
103
+ const char* data = PyUnicode_AsUTF8AndSize(obj, &size);
104
+ RETURN_IF_PYERROR();
105
+ *out = std::string(data, size);
106
+ return Status::OK();
107
+ }
108
+
109
+ std::string PyObject_StdStringRepr(PyObject* obj) {
110
+ OwnedRef unicode_ref(PyObject_Repr(obj));
111
+ OwnedRef bytes_ref;
112
+
113
+ if (unicode_ref) {
114
+ bytes_ref.reset(
115
+ PyUnicode_AsEncodedString(unicode_ref.obj(), "utf8", "backslashreplace"));
116
+ }
117
+ if (!bytes_ref) {
118
+ PyErr_Clear();
119
+ std::stringstream ss;
120
+ ss << "<object of type '" << Py_TYPE(obj)->tp_name << "' repr() failed>";
121
+ return ss.str();
122
+ }
123
+ return PyBytes_AsStdString(bytes_ref.obj());
124
+ }
125
+
126
+ Status PyObject_StdStringStr(PyObject* obj, std::string* out) {
127
+ OwnedRef string_ref(PyObject_Str(obj));
128
+ RETURN_IF_PYERROR();
129
+ return PyUnicode_AsStdString(string_ref.obj(), out);
130
+ }
131
+
132
+ Result<bool> IsModuleImported(const std::string& module_name) {
133
+ // PyImport_GetModuleDict returns with a borrowed reference
134
+ OwnedRef key(PyUnicode_FromString(module_name.c_str()));
135
+ auto is_imported = PyDict_Contains(PyImport_GetModuleDict(), key.obj());
136
+ RETURN_IF_PYERROR();
137
+ return is_imported;
138
+ }
139
+
140
+ Status ImportModule(const std::string& module_name, OwnedRef* ref) {
141
+ PyObject* module = PyImport_ImportModule(module_name.c_str());
142
+ RETURN_IF_PYERROR();
143
+ ref->reset(module);
144
+ return Status::OK();
145
+ }
146
+
147
+ Status ImportFromModule(PyObject* module, const std::string& name, OwnedRef* ref) {
148
+ PyObject* attr = PyObject_GetAttrString(module, name.c_str());
149
+ RETURN_IF_PYERROR();
150
+ ref->reset(attr);
151
+ return Status::OK();
152
+ }
153
+
154
+ namespace {
155
+
156
+ Status IntegerOverflowStatus(PyObject* obj, const std::string& overflow_message) {
157
+ if (overflow_message.empty()) {
158
+ std::string obj_as_stdstring;
159
+ RETURN_NOT_OK(PyObject_StdStringStr(obj, &obj_as_stdstring));
160
+ return Status::Invalid("Value ", obj_as_stdstring,
161
+ " too large to fit in C integer type");
162
+ } else {
163
+ return Status::Invalid(overflow_message);
164
+ }
165
+ }
166
+
167
+ Result<OwnedRef> PyObjectToPyInt(PyObject* obj) {
168
+ // Try to call __index__ or __int__ on `obj`
169
+ // (starting from Python 3.10, the latter isn't done anymore by PyLong_AsLong*).
170
+ OwnedRef ref(PyNumber_Index(obj));
171
+ if (ref) {
172
+ return std::move(ref);
173
+ }
174
+ PyErr_Clear();
175
+ const auto nb = Py_TYPE(obj)->tp_as_number;
176
+ if (nb && nb->nb_int) {
177
+ ref.reset(nb->nb_int(obj));
178
+ if (!ref) {
179
+ RETURN_IF_PYERROR();
180
+ }
181
+ DCHECK(ref);
182
+ return std::move(ref);
183
+ }
184
+ return Status::TypeError(
185
+ "object of type ",
186
+ PyObject_StdStringRepr(reinterpret_cast<PyObject*>(Py_TYPE(obj))),
187
+ " cannot be converted to int");
188
+ }
189
+
190
+ // Extract C signed int from Python object
191
+ template <typename Int, enable_if_t<std::is_signed<Int>::value, Int> = 0>
192
+ Status CIntFromPythonImpl(PyObject* obj, Int* out, const std::string& overflow_message) {
193
+ static_assert(sizeof(Int) <= sizeof(long long), // NOLINT
194
+ "integer type larger than long long");
195
+
196
+ OwnedRef ref;
197
+ if (!PyLong_Check(obj)) {
198
+ ARROW_ASSIGN_OR_RAISE(ref, PyObjectToPyInt(obj));
199
+ obj = ref.obj();
200
+ }
201
+
202
+ if (sizeof(Int) > sizeof(long)) { // NOLINT
203
+ const auto value = PyLong_AsLongLong(obj);
204
+ if (ARROW_PREDICT_FALSE(value == -1)) {
205
+ RETURN_IF_PYERROR();
206
+ }
207
+ if (ARROW_PREDICT_FALSE(value < std::numeric_limits<Int>::min() ||
208
+ value > std::numeric_limits<Int>::max())) {
209
+ return IntegerOverflowStatus(obj, overflow_message);
210
+ }
211
+ *out = static_cast<Int>(value);
212
+ } else {
213
+ const auto value = PyLong_AsLong(obj);
214
+ if (ARROW_PREDICT_FALSE(value == -1)) {
215
+ RETURN_IF_PYERROR();
216
+ }
217
+ if (ARROW_PREDICT_FALSE(value < std::numeric_limits<Int>::min() ||
218
+ value > std::numeric_limits<Int>::max())) {
219
+ return IntegerOverflowStatus(obj, overflow_message);
220
+ }
221
+ *out = static_cast<Int>(value);
222
+ }
223
+ return Status::OK();
224
+ }
225
+
226
+ // Extract C unsigned int from Python object
227
+ template <typename Int, enable_if_t<std::is_unsigned<Int>::value, Int> = 0>
228
+ Status CIntFromPythonImpl(PyObject* obj, Int* out, const std::string& overflow_message) {
229
+ static_assert(sizeof(Int) <= sizeof(unsigned long long), // NOLINT
230
+ "integer type larger than unsigned long long");
231
+
232
+ OwnedRef ref;
233
+ if (!PyLong_Check(obj)) {
234
+ ARROW_ASSIGN_OR_RAISE(ref, PyObjectToPyInt(obj));
235
+ obj = ref.obj();
236
+ }
237
+
238
+ if (sizeof(Int) > sizeof(unsigned long)) { // NOLINT
239
+ const auto value = PyLong_AsUnsignedLongLong(obj);
240
+ if (ARROW_PREDICT_FALSE(value == static_cast<decltype(value)>(-1))) {
241
+ RETURN_IF_PYERROR();
242
+ }
243
+ if (ARROW_PREDICT_FALSE(value > std::numeric_limits<Int>::max())) {
244
+ return IntegerOverflowStatus(obj, overflow_message);
245
+ }
246
+ *out = static_cast<Int>(value);
247
+ } else {
248
+ const auto value = PyLong_AsUnsignedLong(obj);
249
+ if (ARROW_PREDICT_FALSE(value == static_cast<decltype(value)>(-1))) {
250
+ RETURN_IF_PYERROR();
251
+ }
252
+ if (ARROW_PREDICT_FALSE(value > std::numeric_limits<Int>::max())) {
253
+ return IntegerOverflowStatus(obj, overflow_message);
254
+ }
255
+ *out = static_cast<Int>(value);
256
+ }
257
+ return Status::OK();
258
+ }
259
+
260
+ } // namespace
261
+
262
+ template <typename Int>
263
+ Status CIntFromPython(PyObject* obj, Int* out, const std::string& overflow_message) {
264
+ if (PyBool_Check(obj)) {
265
+ return Status::TypeError("Expected integer, got bool");
266
+ }
267
+ return CIntFromPythonImpl(obj, out, overflow_message);
268
+ }
269
+
270
+ template Status CIntFromPython(PyObject*, int8_t*, const std::string&);
271
+ template Status CIntFromPython(PyObject*, int16_t*, const std::string&);
272
+ template Status CIntFromPython(PyObject*, int32_t*, const std::string&);
273
+ template Status CIntFromPython(PyObject*, int64_t*, const std::string&);
274
+ template Status CIntFromPython(PyObject*, uint8_t*, const std::string&);
275
+ template Status CIntFromPython(PyObject*, uint16_t*, const std::string&);
276
+ template Status CIntFromPython(PyObject*, uint32_t*, const std::string&);
277
+ template Status CIntFromPython(PyObject*, uint64_t*, const std::string&);
278
+
279
+ inline bool MayHaveNaN(PyObject* obj) {
280
+ // Some core types can be very quickly type-checked and do not allow NaN values
281
+ const int64_t non_nan_tpflags = Py_TPFLAGS_LONG_SUBCLASS | Py_TPFLAGS_LIST_SUBCLASS |
282
+ Py_TPFLAGS_TUPLE_SUBCLASS | Py_TPFLAGS_BYTES_SUBCLASS |
283
+ Py_TPFLAGS_UNICODE_SUBCLASS | Py_TPFLAGS_DICT_SUBCLASS |
284
+ Py_TPFLAGS_BASE_EXC_SUBCLASS | Py_TPFLAGS_TYPE_SUBCLASS;
285
+ return !PyType_HasFeature(Py_TYPE(obj), non_nan_tpflags);
286
+ }
287
+
288
+ bool PyFloat_IsNaN(PyObject* obj) {
289
+ return PyFloat_Check(obj) && std::isnan(PyFloat_AsDouble(obj));
290
+ }
291
+
292
+ namespace {
293
+
294
+ static bool pandas_static_initialized = false;
295
+
296
+ // Once initialized, these variables hold borrowed references to Pandas static data.
297
+ // We should not use OwnedRef here because Python destructors would be
298
+ // called on a finalized interpreter.
299
+ static PyObject* pandas_NA = nullptr;
300
+ static PyObject* pandas_NaT = nullptr;
301
+ static PyObject* pandas_Timedelta = nullptr;
302
+ static PyObject* pandas_Timestamp = nullptr;
303
+ static PyTypeObject* pandas_NaTType = nullptr;
304
+ static PyObject* pandas_DateOffset = nullptr;
305
+
306
+ } // namespace
307
+
308
+ void InitPandasStaticData() {
309
+ // NOTE: This is called with the GIL held. We needn't (and shouldn't,
310
+ // to avoid deadlocks) use an additional C++ lock (ARROW-10519).
311
+ if (pandas_static_initialized) {
312
+ return;
313
+ }
314
+
315
+ OwnedRef pandas;
316
+
317
+ // Import pandas
318
+ Status s = ImportModule("pandas", &pandas);
319
+ if (!s.ok()) {
320
+ return;
321
+ }
322
+
323
+ // Since ImportModule can release the GIL, another thread could have
324
+ // already initialized the static data.
325
+ if (pandas_static_initialized) {
326
+ return;
327
+ }
328
+ OwnedRef ref;
329
+
330
+ // set NaT sentinel and its type
331
+ if (ImportFromModule(pandas.obj(), "NaT", &ref).ok()) {
332
+ pandas_NaT = ref.obj();
333
+ // PyObject_Type returns a new reference but we trust that pandas.NaT will
334
+ // outlive our use of this PyObject*
335
+ pandas_NaTType = Py_TYPE(ref.obj());
336
+ }
337
+
338
+ // retain a reference to Timedelta
339
+ if (ImportFromModule(pandas.obj(), "Timedelta", &ref).ok()) {
340
+ pandas_Timedelta = ref.obj();
341
+ }
342
+
343
+ // retain a reference to Timestamp
344
+ if (ImportFromModule(pandas.obj(), "Timestamp", &ref).ok()) {
345
+ pandas_Timestamp = ref.obj();
346
+ }
347
+
348
+ // if pandas.NA exists, retain a reference to it
349
+ if (ImportFromModule(pandas.obj(), "NA", &ref).ok()) {
350
+ pandas_NA = ref.obj();
351
+ }
352
+
353
+ // Import DateOffset type
354
+ if (ImportFromModule(pandas.obj(), "DateOffset", &ref).ok()) {
355
+ pandas_DateOffset = ref.obj();
356
+ }
357
+
358
+ pandas_static_initialized = true;
359
+ }
360
+
361
+ bool PandasObjectIsNull(PyObject* obj) {
362
+ if (!MayHaveNaN(obj)) {
363
+ return false;
364
+ }
365
+ if (obj == Py_None) {
366
+ return true;
367
+ }
368
+ if (PyFloat_IsNaN(obj) || (pandas_NA && obj == pandas_NA) ||
369
+ (pandas_NaTType && PyObject_TypeCheck(obj, pandas_NaTType)) ||
370
+ (internal::PyDecimal_Check(obj) && internal::PyDecimal_ISNAN(obj))) {
371
+ return true;
372
+ }
373
+ return false;
374
+ }
375
+
376
+ bool IsPandasTimedelta(PyObject* obj) {
377
+ return pandas_Timedelta && PyObject_IsInstance(obj, pandas_Timedelta);
378
+ }
379
+
380
+ bool IsPandasTimestamp(PyObject* obj) {
381
+ return pandas_Timestamp && PyObject_IsInstance(obj, pandas_Timestamp);
382
+ }
383
+
384
+ PyObject* BorrowPandasDataOffsetType() { return pandas_DateOffset; }
385
+
386
+ Status InvalidValue(PyObject* obj, const std::string& why) {
387
+ auto obj_as_str = PyObject_StdStringRepr(obj);
388
+ return Status::Invalid("Could not convert ", std::move(obj_as_str), " with type ",
389
+ Py_TYPE(obj)->tp_name, ": ", why);
390
+ }
391
+
392
+ Status InvalidType(PyObject* obj, const std::string& why) {
393
+ auto obj_as_str = PyObject_StdStringRepr(obj);
394
+ return Status::TypeError("Could not convert ", std::move(obj_as_str), " with type ",
395
+ Py_TYPE(obj)->tp_name, ": ", why);
396
+ }
397
+
398
+ Status UnboxIntegerAsInt64(PyObject* obj, int64_t* out) {
399
+ if (PyLong_Check(obj)) {
400
+ int overflow = 0;
401
+ *out = PyLong_AsLongLongAndOverflow(obj, &overflow);
402
+ if (overflow) {
403
+ return Status::Invalid("PyLong is too large to fit int64");
404
+ }
405
+ } else if (PyArray_IsScalar(obj, Byte)) {
406
+ *out = reinterpret_cast<PyByteScalarObject*>(obj)->obval;
407
+ } else if (PyArray_IsScalar(obj, UByte)) {
408
+ *out = reinterpret_cast<PyUByteScalarObject*>(obj)->obval;
409
+ } else if (PyArray_IsScalar(obj, Short)) {
410
+ *out = reinterpret_cast<PyShortScalarObject*>(obj)->obval;
411
+ } else if (PyArray_IsScalar(obj, UShort)) {
412
+ *out = reinterpret_cast<PyUShortScalarObject*>(obj)->obval;
413
+ } else if (PyArray_IsScalar(obj, Int)) {
414
+ *out = reinterpret_cast<PyIntScalarObject*>(obj)->obval;
415
+ } else if (PyArray_IsScalar(obj, UInt)) {
416
+ *out = reinterpret_cast<PyUIntScalarObject*>(obj)->obval;
417
+ } else if (PyArray_IsScalar(obj, Long)) {
418
+ *out = reinterpret_cast<PyLongScalarObject*>(obj)->obval;
419
+ } else if (PyArray_IsScalar(obj, ULong)) {
420
+ *out = reinterpret_cast<PyULongScalarObject*>(obj)->obval;
421
+ } else if (PyArray_IsScalar(obj, LongLong)) {
422
+ *out = reinterpret_cast<PyLongLongScalarObject*>(obj)->obval;
423
+ } else if (PyArray_IsScalar(obj, Int64)) {
424
+ *out = reinterpret_cast<PyInt64ScalarObject*>(obj)->obval;
425
+ } else if (PyArray_IsScalar(obj, ULongLong)) {
426
+ *out = reinterpret_cast<PyULongLongScalarObject*>(obj)->obval;
427
+ } else if (PyArray_IsScalar(obj, UInt64)) {
428
+ *out = reinterpret_cast<PyUInt64ScalarObject*>(obj)->obval;
429
+ } else {
430
+ return Status::Invalid("Integer scalar type not recognized");
431
+ }
432
+ return Status::OK();
433
+ }
434
+
435
+ Status IntegerScalarToDoubleSafe(PyObject* obj, double* out) {
436
+ int64_t value = 0;
437
+ RETURN_NOT_OK(UnboxIntegerAsInt64(obj, &value));
438
+
439
+ constexpr int64_t kDoubleMax = 1LL << 53;
440
+ constexpr int64_t kDoubleMin = -(1LL << 53);
441
+
442
+ if (value < kDoubleMin || value > kDoubleMax) {
443
+ return Status::Invalid("Integer value ", value, " is outside of the range exactly",
444
+ " representable by a IEEE 754 double precision value");
445
+ }
446
+ *out = static_cast<double>(value);
447
+ return Status::OK();
448
+ }
449
+
450
+ Status IntegerScalarToFloat32Safe(PyObject* obj, float* out) {
451
+ int64_t value = 0;
452
+ RETURN_NOT_OK(UnboxIntegerAsInt64(obj, &value));
453
+
454
+ constexpr int64_t kFloatMax = 1LL << 24;
455
+ constexpr int64_t kFloatMin = -(1LL << 24);
456
+
457
+ if (value < kFloatMin || value > kFloatMax) {
458
+ return Status::Invalid("Integer value ", value, " is outside of the range exactly",
459
+ " representable by a IEEE 754 single precision value");
460
+ }
461
+ *out = static_cast<float>(value);
462
+ return Status::OK();
463
+ }
464
+
465
+ void DebugPrint(PyObject* obj) {
466
+ std::string repr = PyObject_StdStringRepr(obj);
467
+ PySys_WriteStderr("%s\n", repr.c_str());
468
+ }
469
+
470
+ } // namespace internal
471
+ } // namespace py
472
+ } // namespace arrow
llmeval-env/lib/python3.10/site-packages/pyarrow/src/arrow/python/inference.cc ADDED
@@ -0,0 +1,745 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #include "arrow/python/inference.h"
19
+ #include "arrow/python/numpy_interop.h"
20
+
21
+ #include <datetime.h>
22
+
23
+ #include <algorithm>
24
+ #include <limits>
25
+ #include <map>
26
+ #include <string>
27
+ #include <utility>
28
+ #include <vector>
29
+
30
+ #include "arrow/scalar.h"
31
+ #include "arrow/status.h"
32
+ #include "arrow/util/decimal.h"
33
+ #include "arrow/util/logging.h"
34
+
35
+ #include "arrow/python/datetime.h"
36
+ #include "arrow/python/decimal.h"
37
+ #include "arrow/python/helpers.h"
38
+ #include "arrow/python/iterators.h"
39
+ #include "arrow/python/numpy_convert.h"
40
+
41
+ namespace arrow {
42
+ namespace py {
43
+ namespace {
44
+ // Assigns a tuple to interval_types_tuple containing the nametuple for
45
+ // MonthDayNanoIntervalType and if present dateutil's relativedelta and
46
+ // pandas DateOffset.
47
+ Status ImportPresentIntervalTypes(OwnedRefNoGIL* interval_types_tuple) {
48
+ OwnedRef relative_delta_module;
49
+ // These are Optional imports so swallow errors.
50
+ OwnedRef relative_delta_type;
51
+ // Try to import pandas to get types.
52
+ internal::InitPandasStaticData();
53
+ if (internal::ImportModule("dateutil.relativedelta", &relative_delta_module).ok()) {
54
+ RETURN_NOT_OK(internal::ImportFromModule(relative_delta_module.obj(), "relativedelta",
55
+ &relative_delta_type));
56
+ }
57
+
58
+ PyObject* date_offset_type = internal::BorrowPandasDataOffsetType();
59
+ interval_types_tuple->reset(
60
+ PyTuple_New(1 + (date_offset_type != nullptr ? 1 : 0) +
61
+ (relative_delta_type.obj() != nullptr ? 1 : 0)));
62
+ RETURN_IF_PYERROR();
63
+ int index = 0;
64
+ PyTuple_SetItem(interval_types_tuple->obj(), index++,
65
+ internal::NewMonthDayNanoTupleType());
66
+ RETURN_IF_PYERROR();
67
+ if (date_offset_type != nullptr) {
68
+ Py_XINCREF(date_offset_type);
69
+ PyTuple_SetItem(interval_types_tuple->obj(), index++, date_offset_type);
70
+ RETURN_IF_PYERROR();
71
+ }
72
+ if (relative_delta_type.obj() != nullptr) {
73
+ PyTuple_SetItem(interval_types_tuple->obj(), index++, relative_delta_type.detach());
74
+ RETURN_IF_PYERROR();
75
+ }
76
+ return Status::OK();
77
+ }
78
+
79
+ } // namespace
80
+
81
+ #define _NUMPY_UNIFY_NOOP(DTYPE) \
82
+ case NPY_##DTYPE: \
83
+ return OK;
84
+
85
+ #define _NUMPY_UNIFY_PROMOTE(DTYPE) \
86
+ case NPY_##DTYPE: \
87
+ current_type_num_ = dtype; \
88
+ current_dtype_ = descr; \
89
+ return OK;
90
+
91
+ #define _NUMPY_UNIFY_PROMOTE_TO(DTYPE, NEW_TYPE) \
92
+ case NPY_##DTYPE: \
93
+ current_type_num_ = NPY_##NEW_TYPE; \
94
+ current_dtype_ = PyArray_DescrFromType(current_type_num_); \
95
+ return OK;
96
+
97
+ // Form a consensus NumPy dtype to use for Arrow conversion for a
98
+ // collection of dtype objects observed one at a time
99
+ class NumPyDtypeUnifier {
100
+ public:
101
+ enum Action { OK, INVALID };
102
+
103
+ NumPyDtypeUnifier() : current_type_num_(-1), current_dtype_(nullptr) {}
104
+
105
+ Status InvalidMix(int new_dtype) {
106
+ return Status::Invalid("Cannot mix NumPy dtypes ",
107
+ GetNumPyTypeName(current_type_num_), " and ",
108
+ GetNumPyTypeName(new_dtype));
109
+ }
110
+
111
+ int Observe_BOOL(PyArray_Descr* descr, int dtype) { return INVALID; }
112
+
113
+ int Observe_INT8(PyArray_Descr* descr, int dtype) {
114
+ switch (dtype) {
115
+ _NUMPY_UNIFY_PROMOTE(INT16);
116
+ _NUMPY_UNIFY_PROMOTE(INT32);
117
+ _NUMPY_UNIFY_PROMOTE(INT64);
118
+ _NUMPY_UNIFY_PROMOTE(FLOAT32);
119
+ _NUMPY_UNIFY_PROMOTE(FLOAT64);
120
+ default:
121
+ return INVALID;
122
+ }
123
+ }
124
+
125
+ int Observe_INT16(PyArray_Descr* descr, int dtype) {
126
+ switch (dtype) {
127
+ _NUMPY_UNIFY_NOOP(INT8);
128
+ _NUMPY_UNIFY_PROMOTE(INT32);
129
+ _NUMPY_UNIFY_PROMOTE(INT64);
130
+ _NUMPY_UNIFY_NOOP(UINT8);
131
+ _NUMPY_UNIFY_PROMOTE(FLOAT32);
132
+ _NUMPY_UNIFY_PROMOTE(FLOAT64);
133
+ default:
134
+ return INVALID;
135
+ }
136
+ }
137
+
138
+ int Observe_INT32(PyArray_Descr* descr, int dtype) {
139
+ switch (dtype) {
140
+ _NUMPY_UNIFY_NOOP(INT8);
141
+ _NUMPY_UNIFY_NOOP(INT16);
142
+ _NUMPY_UNIFY_PROMOTE(INT32);
143
+ _NUMPY_UNIFY_PROMOTE(INT64);
144
+ _NUMPY_UNIFY_NOOP(UINT8);
145
+ _NUMPY_UNIFY_NOOP(UINT16);
146
+ _NUMPY_UNIFY_PROMOTE_TO(FLOAT32, FLOAT64);
147
+ _NUMPY_UNIFY_PROMOTE(FLOAT64);
148
+ default:
149
+ return INVALID;
150
+ }
151
+ }
152
+
153
+ int Observe_INT64(PyArray_Descr* descr, int dtype) {
154
+ switch (dtype) {
155
+ _NUMPY_UNIFY_NOOP(INT8);
156
+ _NUMPY_UNIFY_NOOP(INT16);
157
+ _NUMPY_UNIFY_NOOP(INT32);
158
+ _NUMPY_UNIFY_NOOP(INT64);
159
+ _NUMPY_UNIFY_NOOP(UINT8);
160
+ _NUMPY_UNIFY_NOOP(UINT16);
161
+ _NUMPY_UNIFY_NOOP(UINT32);
162
+ _NUMPY_UNIFY_PROMOTE_TO(FLOAT32, FLOAT64);
163
+ _NUMPY_UNIFY_PROMOTE(FLOAT64);
164
+ default:
165
+ return INVALID;
166
+ }
167
+ }
168
+
169
+ int Observe_UINT8(PyArray_Descr* descr, int dtype) {
170
+ switch (dtype) {
171
+ _NUMPY_UNIFY_PROMOTE(UINT16);
172
+ _NUMPY_UNIFY_PROMOTE(UINT32);
173
+ _NUMPY_UNIFY_PROMOTE(UINT64);
174
+ _NUMPY_UNIFY_PROMOTE(FLOAT32);
175
+ _NUMPY_UNIFY_PROMOTE(FLOAT64);
176
+ default:
177
+ return INVALID;
178
+ }
179
+ }
180
+
181
+ int Observe_UINT16(PyArray_Descr* descr, int dtype) {
182
+ switch (dtype) {
183
+ _NUMPY_UNIFY_NOOP(UINT8);
184
+ _NUMPY_UNIFY_PROMOTE(UINT32);
185
+ _NUMPY_UNIFY_PROMOTE(UINT64);
186
+ _NUMPY_UNIFY_PROMOTE(FLOAT32);
187
+ _NUMPY_UNIFY_PROMOTE(FLOAT64);
188
+ default:
189
+ return INVALID;
190
+ }
191
+ }
192
+
193
+ int Observe_UINT32(PyArray_Descr* descr, int dtype) {
194
+ switch (dtype) {
195
+ _NUMPY_UNIFY_NOOP(UINT8);
196
+ _NUMPY_UNIFY_NOOP(UINT16);
197
+ _NUMPY_UNIFY_PROMOTE(UINT64);
198
+ _NUMPY_UNIFY_PROMOTE_TO(FLOAT32, FLOAT64);
199
+ _NUMPY_UNIFY_PROMOTE(FLOAT64);
200
+ default:
201
+ return INVALID;
202
+ }
203
+ }
204
+
205
+ int Observe_UINT64(PyArray_Descr* descr, int dtype) {
206
+ switch (dtype) {
207
+ _NUMPY_UNIFY_NOOP(UINT8);
208
+ _NUMPY_UNIFY_NOOP(UINT16);
209
+ _NUMPY_UNIFY_NOOP(UINT32);
210
+ _NUMPY_UNIFY_PROMOTE_TO(FLOAT32, FLOAT64);
211
+ _NUMPY_UNIFY_PROMOTE(FLOAT64);
212
+ default:
213
+ return INVALID;
214
+ }
215
+ }
216
+
217
+ int Observe_FLOAT16(PyArray_Descr* descr, int dtype) {
218
+ switch (dtype) {
219
+ _NUMPY_UNIFY_PROMOTE(FLOAT32);
220
+ _NUMPY_UNIFY_PROMOTE(FLOAT64);
221
+ default:
222
+ return INVALID;
223
+ }
224
+ }
225
+
226
+ int Observe_FLOAT32(PyArray_Descr* descr, int dtype) {
227
+ switch (dtype) {
228
+ _NUMPY_UNIFY_NOOP(INT8);
229
+ _NUMPY_UNIFY_NOOP(INT16);
230
+ _NUMPY_UNIFY_NOOP(INT32);
231
+ _NUMPY_UNIFY_NOOP(INT64);
232
+ _NUMPY_UNIFY_NOOP(UINT8);
233
+ _NUMPY_UNIFY_NOOP(UINT16);
234
+ _NUMPY_UNIFY_NOOP(UINT32);
235
+ _NUMPY_UNIFY_NOOP(UINT64);
236
+ _NUMPY_UNIFY_PROMOTE(FLOAT64);
237
+ default:
238
+ return INVALID;
239
+ }
240
+ }
241
+
242
+ int Observe_FLOAT64(PyArray_Descr* descr, int dtype) {
243
+ switch (dtype) {
244
+ _NUMPY_UNIFY_NOOP(INT8);
245
+ _NUMPY_UNIFY_NOOP(INT16);
246
+ _NUMPY_UNIFY_NOOP(INT32);
247
+ _NUMPY_UNIFY_NOOP(INT64);
248
+ _NUMPY_UNIFY_NOOP(UINT8);
249
+ _NUMPY_UNIFY_NOOP(UINT16);
250
+ _NUMPY_UNIFY_NOOP(UINT32);
251
+ _NUMPY_UNIFY_NOOP(UINT64);
252
+ default:
253
+ return INVALID;
254
+ }
255
+ }
256
+
257
+ int Observe_DATETIME(PyArray_Descr* dtype_obj) {
258
+ // TODO: check that units are all the same
259
+ return OK;
260
+ }
261
+
262
+ Status Observe(PyArray_Descr* descr) {
263
+ int dtype = fix_numpy_type_num(descr->type_num);
264
+
265
+ if (current_type_num_ == -1) {
266
+ current_dtype_ = descr;
267
+ current_type_num_ = dtype;
268
+ return Status::OK();
269
+ } else if (current_type_num_ == dtype) {
270
+ return Status::OK();
271
+ }
272
+
273
+ #define OBSERVE_CASE(DTYPE) \
274
+ case NPY_##DTYPE: \
275
+ action = Observe_##DTYPE(descr, dtype); \
276
+ break;
277
+
278
+ int action = OK;
279
+ switch (current_type_num_) {
280
+ OBSERVE_CASE(BOOL);
281
+ OBSERVE_CASE(INT8);
282
+ OBSERVE_CASE(INT16);
283
+ OBSERVE_CASE(INT32);
284
+ OBSERVE_CASE(INT64);
285
+ OBSERVE_CASE(UINT8);
286
+ OBSERVE_CASE(UINT16);
287
+ OBSERVE_CASE(UINT32);
288
+ OBSERVE_CASE(UINT64);
289
+ OBSERVE_CASE(FLOAT16);
290
+ OBSERVE_CASE(FLOAT32);
291
+ OBSERVE_CASE(FLOAT64);
292
+ case NPY_DATETIME:
293
+ action = Observe_DATETIME(descr);
294
+ break;
295
+ default:
296
+ return Status::NotImplemented("Unsupported numpy type ", GetNumPyTypeName(dtype));
297
+ }
298
+
299
+ if (action == INVALID) {
300
+ return InvalidMix(dtype);
301
+ }
302
+ return Status::OK();
303
+ }
304
+
305
+ bool dtype_was_observed() const { return current_type_num_ != -1; }
306
+
307
+ PyArray_Descr* current_dtype() const { return current_dtype_; }
308
+
309
+ int current_type_num() const { return current_type_num_; }
310
+
311
+ private:
312
+ int current_type_num_;
313
+ PyArray_Descr* current_dtype_;
314
+ };
315
+
316
+ class TypeInferrer {
317
+ // A type inference visitor for Python values
318
+ public:
319
+ // \param validate_interval the number of elements to observe before checking
320
+ // whether the data is mixed type or has other problems. This helps avoid
321
+ // excess computation for each element while also making sure we "bail out"
322
+ // early with long sequences that may have problems up front
323
+ // \param make_unions permit mixed-type data by creating union types (not yet
324
+ // implemented)
325
+ explicit TypeInferrer(bool pandas_null_sentinels = false,
326
+ int64_t validate_interval = 100, bool make_unions = false)
327
+ : pandas_null_sentinels_(pandas_null_sentinels),
328
+ validate_interval_(validate_interval),
329
+ make_unions_(make_unions),
330
+ total_count_(0),
331
+ none_count_(0),
332
+ bool_count_(0),
333
+ int_count_(0),
334
+ date_count_(0),
335
+ time_count_(0),
336
+ timestamp_micro_count_(0),
337
+ duration_count_(0),
338
+ float_count_(0),
339
+ binary_count_(0),
340
+ unicode_count_(0),
341
+ decimal_count_(0),
342
+ list_count_(0),
343
+ struct_count_(0),
344
+ arrow_scalar_count_(0),
345
+ numpy_dtype_count_(0),
346
+ interval_count_(0),
347
+ max_decimal_metadata_(std::numeric_limits<int32_t>::min(),
348
+ std::numeric_limits<int32_t>::min()),
349
+ decimal_type_() {
350
+ ARROW_CHECK_OK(internal::ImportDecimalType(&decimal_type_));
351
+ ARROW_CHECK_OK(ImportPresentIntervalTypes(&interval_types_));
352
+ }
353
+
354
+ /// \param[in] obj a Python object in the sequence
355
+ /// \param[out] keep_going if sufficient information has been gathered to
356
+ /// attempt to begin converting the sequence, *keep_going will be set to true
357
+ /// to signal to the calling visitor loop to terminate
358
+ Status Visit(PyObject* obj, bool* keep_going) {
359
+ ++total_count_;
360
+
361
+ if (obj == Py_None || (pandas_null_sentinels_ && internal::PandasObjectIsNull(obj))) {
362
+ ++none_count_;
363
+ } else if (PyBool_Check(obj)) {
364
+ ++bool_count_;
365
+ *keep_going = make_unions_;
366
+ } else if (PyFloat_Check(obj)) {
367
+ ++float_count_;
368
+ *keep_going = make_unions_;
369
+ } else if (internal::IsPyInteger(obj)) {
370
+ ++int_count_;
371
+ } else if (PyDateTime_Check(obj)) {
372
+ // infer timezone from the first encountered datetime object
373
+ if (!timestamp_micro_count_) {
374
+ OwnedRef tzinfo(PyObject_GetAttrString(obj, "tzinfo"));
375
+ if (tzinfo.obj() != nullptr && tzinfo.obj() != Py_None) {
376
+ ARROW_ASSIGN_OR_RAISE(timezone_, internal::TzinfoToString(tzinfo.obj()));
377
+ }
378
+ }
379
+ ++timestamp_micro_count_;
380
+ *keep_going = make_unions_;
381
+ } else if (PyDelta_Check(obj)) {
382
+ ++duration_count_;
383
+ *keep_going = make_unions_;
384
+ } else if (PyDate_Check(obj)) {
385
+ ++date_count_;
386
+ *keep_going = make_unions_;
387
+ } else if (PyTime_Check(obj)) {
388
+ ++time_count_;
389
+ *keep_going = make_unions_;
390
+ } else if (internal::IsPyBinary(obj)) {
391
+ ++binary_count_;
392
+ *keep_going = make_unions_;
393
+ } else if (PyUnicode_Check(obj)) {
394
+ ++unicode_count_;
395
+ *keep_going = make_unions_;
396
+ } else if (arrow::py::is_scalar(obj)) {
397
+ RETURN_NOT_OK(VisitArrowScalar(obj, keep_going));
398
+ } else if (PyArray_CheckAnyScalarExact(obj)) {
399
+ RETURN_NOT_OK(VisitDType(PyArray_DescrFromScalar(obj), keep_going));
400
+ } else if (PySet_Check(obj) || (Py_TYPE(obj) == &PyDictValues_Type)) {
401
+ RETURN_NOT_OK(VisitSet(obj, keep_going));
402
+ } else if (PyArray_Check(obj)) {
403
+ RETURN_NOT_OK(VisitNdarray(obj, keep_going));
404
+ } else if (PyDict_Check(obj)) {
405
+ RETURN_NOT_OK(VisitDict(obj));
406
+ } else if (PyList_Check(obj) ||
407
+ (PyTuple_Check(obj) &&
408
+ !PyObject_IsInstance(obj, PyTuple_GetItem(interval_types_.obj(), 0)))) {
409
+ RETURN_NOT_OK(VisitList(obj, keep_going));
410
+ } else if (PyObject_IsInstance(obj, decimal_type_.obj())) {
411
+ RETURN_NOT_OK(max_decimal_metadata_.Update(obj));
412
+ ++decimal_count_;
413
+ } else if (PyObject_IsInstance(obj, interval_types_.obj())) {
414
+ ++interval_count_;
415
+ } else {
416
+ return internal::InvalidValue(obj,
417
+ "did not recognize Python value type when inferring "
418
+ "an Arrow data type");
419
+ }
420
+
421
+ if (total_count_ % validate_interval_ == 0) {
422
+ RETURN_NOT_OK(Validate());
423
+ }
424
+
425
+ return Status::OK();
426
+ }
427
+
428
+ // Infer value type from a sequence of values
429
+ Status VisitSequence(PyObject* obj, PyObject* mask = nullptr) {
430
+ if (mask == nullptr || mask == Py_None) {
431
+ return internal::VisitSequence(
432
+ obj, /*offset=*/0,
433
+ [this](PyObject* value, bool* keep_going) { return Visit(value, keep_going); });
434
+ } else {
435
+ return internal::VisitSequenceMasked(
436
+ obj, mask, /*offset=*/0,
437
+ [this](PyObject* value, uint8_t masked, bool* keep_going) {
438
+ if (!masked) {
439
+ return Visit(value, keep_going);
440
+ } else {
441
+ return Status::OK();
442
+ }
443
+ });
444
+ }
445
+ }
446
+
447
+ // Infer value type from a sequence of values
448
+ Status VisitIterable(PyObject* obj) {
449
+ return internal::VisitIterable(obj, [this](PyObject* value, bool* keep_going) {
450
+ return Visit(value, keep_going);
451
+ });
452
+ }
453
+
454
+ Status GetType(std::shared_ptr<DataType>* out) {
455
+ // TODO(wesm): handling forming unions
456
+ if (make_unions_) {
457
+ return Status::NotImplemented("Creating union types not yet supported");
458
+ }
459
+
460
+ RETURN_NOT_OK(Validate());
461
+
462
+ if (arrow_scalar_count_ > 0 && arrow_scalar_count_ + none_count_ != total_count_) {
463
+ return Status::Invalid(
464
+ "pyarrow scalars cannot be mixed "
465
+ "with other Python scalar values currently");
466
+ }
467
+
468
+ if (numpy_dtype_count_ > 0) {
469
+ // All NumPy scalars and Nones/nulls
470
+ if (numpy_dtype_count_ + none_count_ == total_count_) {
471
+ return NumPyDtypeToArrow(numpy_unifier_.current_dtype()).Value(out);
472
+ }
473
+
474
+ // The "bad path": data contains a mix of NumPy scalars and
475
+ // other kinds of scalars. Note this can happen innocuously
476
+ // because numpy.nan is not a NumPy scalar (it's a built-in
477
+ // PyFloat)
478
+
479
+ // TODO(ARROW-5564): Merge together type unification so this
480
+ // hack is not necessary
481
+ switch (numpy_unifier_.current_type_num()) {
482
+ case NPY_BOOL:
483
+ bool_count_ += numpy_dtype_count_;
484
+ break;
485
+ case NPY_INT8:
486
+ case NPY_INT16:
487
+ case NPY_INT32:
488
+ case NPY_INT64:
489
+ case NPY_UINT8:
490
+ case NPY_UINT16:
491
+ case NPY_UINT32:
492
+ case NPY_UINT64:
493
+ int_count_ += numpy_dtype_count_;
494
+ break;
495
+ case NPY_FLOAT32:
496
+ case NPY_FLOAT64:
497
+ float_count_ += numpy_dtype_count_;
498
+ break;
499
+ case NPY_DATETIME:
500
+ return Status::Invalid(
501
+ "numpy.datetime64 scalars cannot be mixed "
502
+ "with other Python scalar values currently");
503
+ }
504
+ }
505
+
506
+ if (list_count_) {
507
+ std::shared_ptr<DataType> value_type;
508
+ RETURN_NOT_OK(list_inferrer_->GetType(&value_type));
509
+ *out = list(value_type);
510
+ } else if (struct_count_) {
511
+ RETURN_NOT_OK(GetStructType(out));
512
+ } else if (decimal_count_) {
513
+ if (max_decimal_metadata_.precision() > Decimal128Type::kMaxPrecision) {
514
+ // the default constructor does not validate the precision and scale
515
+ ARROW_ASSIGN_OR_RAISE(*out,
516
+ Decimal256Type::Make(max_decimal_metadata_.precision(),
517
+ max_decimal_metadata_.scale()));
518
+ } else {
519
+ ARROW_ASSIGN_OR_RAISE(*out,
520
+ Decimal128Type::Make(max_decimal_metadata_.precision(),
521
+ max_decimal_metadata_.scale()));
522
+ }
523
+ } else if (float_count_) {
524
+ // Prioritize floats before integers
525
+ *out = float64();
526
+ } else if (int_count_) {
527
+ *out = int64();
528
+ } else if (date_count_) {
529
+ *out = date32();
530
+ } else if (time_count_) {
531
+ *out = time64(TimeUnit::MICRO);
532
+ } else if (timestamp_micro_count_) {
533
+ *out = timestamp(TimeUnit::MICRO, timezone_);
534
+ } else if (duration_count_) {
535
+ *out = duration(TimeUnit::MICRO);
536
+ } else if (bool_count_) {
537
+ *out = boolean();
538
+ } else if (binary_count_) {
539
+ *out = binary();
540
+ } else if (unicode_count_) {
541
+ *out = utf8();
542
+ } else if (interval_count_) {
543
+ *out = month_day_nano_interval();
544
+ } else if (arrow_scalar_count_) {
545
+ *out = scalar_type_;
546
+ } else {
547
+ *out = null();
548
+ }
549
+ return Status::OK();
550
+ }
551
+
552
+ int64_t total_count() const { return total_count_; }
553
+
554
+ protected:
555
+ Status Validate() const {
556
+ if (list_count_ > 0) {
557
+ if (list_count_ + none_count_ != total_count_) {
558
+ return Status::Invalid("cannot mix list and non-list, non-null values");
559
+ }
560
+ RETURN_NOT_OK(list_inferrer_->Validate());
561
+ } else if (struct_count_ > 0) {
562
+ if (struct_count_ + none_count_ != total_count_) {
563
+ return Status::Invalid("cannot mix struct and non-struct, non-null values");
564
+ }
565
+ for (const auto& it : struct_inferrers_) {
566
+ RETURN_NOT_OK(it.second.Validate());
567
+ }
568
+ }
569
+ return Status::OK();
570
+ }
571
+
572
+ Status VisitArrowScalar(PyObject* obj, bool* keep_going /* unused */) {
573
+ ARROW_ASSIGN_OR_RAISE(auto scalar, arrow::py::unwrap_scalar(obj));
574
+ // Check that all the scalar types for the sequence are the same
575
+ if (arrow_scalar_count_ > 0 && *scalar->type != *scalar_type_) {
576
+ return internal::InvalidValue(obj, "cannot mix scalars with different types");
577
+ }
578
+ scalar_type_ = scalar->type;
579
+ ++arrow_scalar_count_;
580
+ return Status::OK();
581
+ }
582
+
583
+ Status VisitDType(PyArray_Descr* dtype, bool* keep_going) {
584
+ // Continue visiting dtypes for now.
585
+ // TODO(wesm): devise approach for unions
586
+ ++numpy_dtype_count_;
587
+ *keep_going = true;
588
+ return numpy_unifier_.Observe(dtype);
589
+ }
590
+
591
+ Status VisitList(PyObject* obj, bool* keep_going /* unused */) {
592
+ if (!list_inferrer_) {
593
+ list_inferrer_.reset(
594
+ new TypeInferrer(pandas_null_sentinels_, validate_interval_, make_unions_));
595
+ }
596
+ ++list_count_;
597
+ return list_inferrer_->VisitSequence(obj);
598
+ }
599
+
600
+ Status VisitSet(PyObject* obj, bool* keep_going /* unused */) {
601
+ if (!list_inferrer_) {
602
+ list_inferrer_.reset(
603
+ new TypeInferrer(pandas_null_sentinels_, validate_interval_, make_unions_));
604
+ }
605
+ ++list_count_;
606
+ return list_inferrer_->VisitIterable(obj);
607
+ }
608
+
609
+ Status VisitNdarray(PyObject* obj, bool* keep_going) {
610
+ PyArray_Descr* dtype = PyArray_DESCR(reinterpret_cast<PyArrayObject*>(obj));
611
+ if (dtype->type_num == NPY_OBJECT) {
612
+ return VisitList(obj, keep_going);
613
+ }
614
+ // Not an object array: infer child Arrow type from dtype
615
+ if (!list_inferrer_) {
616
+ list_inferrer_.reset(
617
+ new TypeInferrer(pandas_null_sentinels_, validate_interval_, make_unions_));
618
+ }
619
+ ++list_count_;
620
+
621
+ // XXX(wesm): In ARROW-4324 I added accounting to check whether
622
+ // all of the non-null values have NumPy dtypes, but the
623
+ // total_count not being properly incremented here
624
+ ++(*list_inferrer_).total_count_;
625
+ return list_inferrer_->VisitDType(dtype, keep_going);
626
+ }
627
+
628
+ Status VisitDict(PyObject* obj) {
629
+ PyObject* key_obj;
630
+ PyObject* value_obj;
631
+ Py_ssize_t pos = 0;
632
+
633
+ while (PyDict_Next(obj, &pos, &key_obj, &value_obj)) {
634
+ std::string key;
635
+ if (PyUnicode_Check(key_obj)) {
636
+ RETURN_NOT_OK(internal::PyUnicode_AsStdString(key_obj, &key));
637
+ } else if (PyBytes_Check(key_obj)) {
638
+ key = internal::PyBytes_AsStdString(key_obj);
639
+ } else {
640
+ return Status::TypeError("Expected dict key of type str or bytes, got '",
641
+ Py_TYPE(key_obj)->tp_name, "'");
642
+ }
643
+ // Get or create visitor for this key
644
+ auto it = struct_inferrers_.find(key);
645
+ if (it == struct_inferrers_.end()) {
646
+ it = struct_inferrers_
647
+ .insert(
648
+ std::make_pair(key, TypeInferrer(pandas_null_sentinels_,
649
+ validate_interval_, make_unions_)))
650
+ .first;
651
+ }
652
+ TypeInferrer* visitor = &it->second;
653
+
654
+ // We ignore termination signals from child visitors for now
655
+ //
656
+ // TODO(wesm): keep track of whether type inference has terminated for
657
+ // the child visitors to avoid doing unneeded work
658
+ bool keep_going = true;
659
+ RETURN_NOT_OK(visitor->Visit(value_obj, &keep_going));
660
+ }
661
+
662
+ // We do not terminate visiting dicts since we want the union of all
663
+ // observed keys
664
+ ++struct_count_;
665
+ return Status::OK();
666
+ }
667
+
668
+ Status GetStructType(std::shared_ptr<DataType>* out) {
669
+ std::vector<std::shared_ptr<Field>> fields;
670
+ for (auto&& it : struct_inferrers_) {
671
+ std::shared_ptr<DataType> field_type;
672
+ RETURN_NOT_OK(it.second.GetType(&field_type));
673
+ fields.emplace_back(field(it.first, field_type));
674
+ }
675
+ *out = struct_(fields);
676
+ return Status::OK();
677
+ }
678
+
679
+ private:
680
+ bool pandas_null_sentinels_;
681
+ int64_t validate_interval_;
682
+ bool make_unions_;
683
+ int64_t total_count_;
684
+ int64_t none_count_;
685
+ int64_t bool_count_;
686
+ int64_t int_count_;
687
+ int64_t date_count_;
688
+ int64_t time_count_;
689
+ int64_t timestamp_micro_count_;
690
+ std::string timezone_;
691
+ int64_t duration_count_;
692
+ int64_t float_count_;
693
+ int64_t binary_count_;
694
+ int64_t unicode_count_;
695
+ int64_t decimal_count_;
696
+ int64_t list_count_;
697
+ int64_t struct_count_;
698
+ int64_t arrow_scalar_count_;
699
+ int64_t numpy_dtype_count_;
700
+ int64_t interval_count_;
701
+ std::unique_ptr<TypeInferrer> list_inferrer_;
702
+ std::map<std::string, TypeInferrer> struct_inferrers_;
703
+ std::shared_ptr<DataType> scalar_type_;
704
+
705
+ // If we observe a strongly-typed value in e.g. a NumPy array, we can store
706
+ // it here to skip the type counting logic above
707
+ NumPyDtypeUnifier numpy_unifier_;
708
+
709
+ internal::DecimalMetadata max_decimal_metadata_;
710
+
711
+ OwnedRefNoGIL decimal_type_;
712
+ OwnedRefNoGIL interval_types_;
713
+ };
714
+
715
+ // Non-exhaustive type inference
716
+ Result<std::shared_ptr<DataType>> InferArrowType(PyObject* obj, PyObject* mask,
717
+ bool pandas_null_sentinels) {
718
+ if (pandas_null_sentinels) {
719
+ // ARROW-842: If pandas is not installed then null checks will be less
720
+ // comprehensive, but that is okay.
721
+ internal::InitPandasStaticData();
722
+ }
723
+
724
+ std::shared_ptr<DataType> out_type;
725
+ TypeInferrer inferrer(pandas_null_sentinels);
726
+ RETURN_NOT_OK(inferrer.VisitSequence(obj, mask));
727
+ RETURN_NOT_OK(inferrer.GetType(&out_type));
728
+ if (out_type == nullptr) {
729
+ return Status::TypeError("Unable to determine data type");
730
+ } else {
731
+ return std::move(out_type);
732
+ }
733
+ }
734
+
735
+ ARROW_PYTHON_EXPORT
736
+ bool IsPyBool(PyObject* obj) { return internal::PyBoolScalar_Check(obj); }
737
+
738
+ ARROW_PYTHON_EXPORT
739
+ bool IsPyInt(PyObject* obj) { return internal::PyIntScalar_Check(obj); }
740
+
741
+ ARROW_PYTHON_EXPORT
742
+ bool IsPyFloat(PyObject* obj) { return internal::PyFloatScalar_Check(obj); }
743
+
744
+ } // namespace py
745
+ } // namespace arrow
llmeval-env/lib/python3.10/site-packages/pyarrow/src/arrow/python/inference.h ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // Functions for converting between CPython built-in data structures and Arrow
19
+ // data structures
20
+
21
+ #pragma once
22
+
23
+ #include "arrow/python/platform.h"
24
+
25
+ #include <memory>
26
+
27
+ #include "arrow/python/visibility.h"
28
+ #include "arrow/type.h"
29
+ #include "arrow/util/macros.h"
30
+
31
+ #include "common.h"
32
+
33
+ namespace arrow {
34
+
35
+ class Array;
36
+ class Status;
37
+
38
+ namespace py {
39
+
40
+ // These functions take a sequence input, not arbitrary iterables
41
+
42
+ /// \brief Infer Arrow type from a Python sequence
43
+ /// \param[in] obj the sequence of values
44
+ /// \param[in] mask an optional mask where True values are null. May
45
+ /// be nullptr
46
+ /// \param[in] pandas_null_sentinels use pandas's null value markers
47
+ ARROW_PYTHON_EXPORT
48
+ Result<std::shared_ptr<arrow::DataType>> InferArrowType(PyObject* obj, PyObject* mask,
49
+ bool pandas_null_sentinels);
50
+
51
+ /// Checks whether the passed Python object is a boolean scalar
52
+ ARROW_PYTHON_EXPORT
53
+ bool IsPyBool(PyObject* obj);
54
+
55
+ /// Checks whether the passed Python object is an integer scalar
56
+ ARROW_PYTHON_EXPORT
57
+ bool IsPyInt(PyObject* obj);
58
+
59
+ /// Checks whether the passed Python object is a float scalar
60
+ ARROW_PYTHON_EXPORT
61
+ bool IsPyFloat(PyObject* obj);
62
+
63
+ } // namespace py
64
+ } // namespace arrow
llmeval-env/lib/python3.10/site-packages/pyarrow/src/arrow/python/init.cc ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // Trigger the array import (inversion of NO_IMPORT_ARRAY)
19
+ #define NUMPY_IMPORT_ARRAY
20
+
21
+ #include "arrow/python/init.h"
22
+ #include "arrow/python/numpy_interop.h"
23
+
24
+ int arrow_init_numpy() { return arrow::py::import_numpy(); }
llmeval-env/lib/python3.10/site-packages/pyarrow/src/arrow/python/init.h ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include "arrow/python/platform.h"
21
+ #include "arrow/python/visibility.h"
22
+
23
+ extern "C" {
24
+ ARROW_PYTHON_EXPORT
25
+ int arrow_init_numpy();
26
+ }
llmeval-env/lib/python3.10/site-packages/pyarrow/src/arrow/python/io.cc ADDED
@@ -0,0 +1,387 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #include "io.h"
19
+
20
+ #include <cstdint>
21
+ #include <cstdlib>
22
+ #include <memory>
23
+ #include <mutex>
24
+ #include <string>
25
+
26
+ #include "arrow/io/memory.h"
27
+ #include "arrow/memory_pool.h"
28
+ #include "arrow/status.h"
29
+ #include "arrow/util/logging.h"
30
+
31
+ #include "arrow/python/common.h"
32
+ #include "arrow/python/pyarrow.h"
33
+
34
+ namespace arrow {
35
+
36
+ using arrow::io::TransformInputStream;
37
+
38
+ namespace py {
39
+
40
+ // ----------------------------------------------------------------------
41
+ // Python file
42
+
43
+ // A common interface to a Python file-like object. Must acquire GIL before
44
+ // calling any methods
45
+ class PythonFile {
46
+ public:
47
+ explicit PythonFile(PyObject* file) : file_(file), checked_read_buffer_(false) {
48
+ Py_INCREF(file);
49
+ }
50
+
51
+ Status CheckClosed() const {
52
+ if (!file_) {
53
+ return Status::Invalid("operation on closed Python file");
54
+ }
55
+ return Status::OK();
56
+ }
57
+
58
+ Status Close() {
59
+ if (file_) {
60
+ PyObject* result = cpp_PyObject_CallMethod(file_.obj(), "close", "()");
61
+ Py_XDECREF(result);
62
+ file_.reset();
63
+ PY_RETURN_IF_ERROR(StatusCode::IOError);
64
+ }
65
+ return Status::OK();
66
+ }
67
+
68
+ Status Abort() {
69
+ file_.reset();
70
+ return Status::OK();
71
+ }
72
+
73
+ bool closed() const {
74
+ if (!file_) {
75
+ return true;
76
+ }
77
+ PyObject* result = PyObject_GetAttrString(file_.obj(), "closed");
78
+ if (result == NULL) {
79
+ // Can't propagate the error, so write it out and return an arbitrary value
80
+ PyErr_WriteUnraisable(NULL);
81
+ return true;
82
+ }
83
+ int ret = PyObject_IsTrue(result);
84
+ Py_XDECREF(result);
85
+ if (ret < 0) {
86
+ PyErr_WriteUnraisable(NULL);
87
+ return true;
88
+ }
89
+ return ret != 0;
90
+ }
91
+
92
+ Status Seek(int64_t position, int whence) {
93
+ RETURN_NOT_OK(CheckClosed());
94
+
95
+ // NOTE: `long long` is at least 64 bits in the C standard, the cast below is
96
+ // therefore safe.
97
+
98
+ // whence: 0 for relative to start of file, 2 for end of file
99
+ PyObject* result = cpp_PyObject_CallMethod(file_.obj(), "seek", "(Li)",
100
+ static_cast<long long>(position), whence);
101
+ Py_XDECREF(result);
102
+ PY_RETURN_IF_ERROR(StatusCode::IOError);
103
+ return Status::OK();
104
+ }
105
+
106
+ Status Read(int64_t nbytes, PyObject** out) {
107
+ RETURN_NOT_OK(CheckClosed());
108
+
109
+ PyObject* result = cpp_PyObject_CallMethod(file_.obj(), "read", "(L)",
110
+ static_cast<long long>(nbytes));
111
+ PY_RETURN_IF_ERROR(StatusCode::IOError);
112
+ *out = result;
113
+ return Status::OK();
114
+ }
115
+
116
+ Status ReadBuffer(int64_t nbytes, PyObject** out) {
117
+ PyObject* result = cpp_PyObject_CallMethod(file_.obj(), "read_buffer", "(L)",
118
+ static_cast<long long>(nbytes));
119
+ PY_RETURN_IF_ERROR(StatusCode::IOError);
120
+ *out = result;
121
+ return Status::OK();
122
+ }
123
+
124
+ Status Write(const void* data, int64_t nbytes) {
125
+ RETURN_NOT_OK(CheckClosed());
126
+
127
+ // Since the data isn't owned, we have to make a copy
128
+ PyObject* py_data =
129
+ PyBytes_FromStringAndSize(reinterpret_cast<const char*>(data), nbytes);
130
+ PY_RETURN_IF_ERROR(StatusCode::IOError);
131
+
132
+ PyObject* result = cpp_PyObject_CallMethod(file_.obj(), "write", "(O)", py_data);
133
+ Py_XDECREF(py_data);
134
+ Py_XDECREF(result);
135
+ PY_RETURN_IF_ERROR(StatusCode::IOError);
136
+ return Status::OK();
137
+ }
138
+
139
+ Status Write(const std::shared_ptr<Buffer>& buffer) {
140
+ RETURN_NOT_OK(CheckClosed());
141
+
142
+ PyObject* py_data = wrap_buffer(buffer);
143
+ PY_RETURN_IF_ERROR(StatusCode::IOError);
144
+
145
+ PyObject* result = cpp_PyObject_CallMethod(file_.obj(), "write", "(O)", py_data);
146
+ Py_XDECREF(py_data);
147
+ Py_XDECREF(result);
148
+ PY_RETURN_IF_ERROR(StatusCode::IOError);
149
+ return Status::OK();
150
+ }
151
+
152
+ Result<int64_t> Tell() {
153
+ RETURN_NOT_OK(CheckClosed());
154
+
155
+ PyObject* result = cpp_PyObject_CallMethod(file_.obj(), "tell", "()");
156
+ PY_RETURN_IF_ERROR(StatusCode::IOError);
157
+
158
+ int64_t position = PyLong_AsLongLong(result);
159
+ Py_DECREF(result);
160
+
161
+ // PyLong_AsLongLong can raise OverflowError
162
+ PY_RETURN_IF_ERROR(StatusCode::IOError);
163
+ return position;
164
+ }
165
+
166
+ std::mutex& lock() { return lock_; }
167
+
168
+ bool HasReadBuffer() {
169
+ if (!checked_read_buffer_) { // we don't want to check this each time
170
+ has_read_buffer_ = PyObject_HasAttrString(file_.obj(), "read_buffer") == 1;
171
+ checked_read_buffer_ = true;
172
+ }
173
+ return has_read_buffer_;
174
+ }
175
+
176
+ private:
177
+ std::mutex lock_;
178
+ OwnedRefNoGIL file_;
179
+ bool has_read_buffer_;
180
+ bool checked_read_buffer_;
181
+ };
182
+
183
+ // ----------------------------------------------------------------------
184
+ // Seekable input stream
185
+
186
+ PyReadableFile::PyReadableFile(PyObject* file) { file_.reset(new PythonFile(file)); }
187
+
188
+ // The destructor does not close the underlying Python file object, as
189
+ // there may be multiple references to it. Instead let the Python
190
+ // destructor do its job.
191
+ PyReadableFile::~PyReadableFile() {}
192
+
193
+ Status PyReadableFile::Abort() {
194
+ return SafeCallIntoPython([this]() { return file_->Abort(); });
195
+ }
196
+
197
+ Status PyReadableFile::Close() {
198
+ return SafeCallIntoPython([this]() { return file_->Close(); });
199
+ }
200
+
201
+ bool PyReadableFile::closed() const {
202
+ bool res;
203
+ Status st = SafeCallIntoPython([this, &res]() {
204
+ res = file_->closed();
205
+ return Status::OK();
206
+ });
207
+ return res;
208
+ }
209
+
210
+ Status PyReadableFile::Seek(int64_t position) {
211
+ return SafeCallIntoPython([=] { return file_->Seek(position, 0); });
212
+ }
213
+
214
+ Result<int64_t> PyReadableFile::Tell() const {
215
+ return SafeCallIntoPython([=]() -> Result<int64_t> { return file_->Tell(); });
216
+ }
217
+
218
+ Result<int64_t> PyReadableFile::Read(int64_t nbytes, void* out) {
219
+ return SafeCallIntoPython([=]() -> Result<int64_t> {
220
+ OwnedRef bytes;
221
+ RETURN_NOT_OK(file_->Read(nbytes, bytes.ref()));
222
+ PyObject* bytes_obj = bytes.obj();
223
+ DCHECK(bytes_obj != NULL);
224
+
225
+ Py_buffer py_buf;
226
+ if (!PyObject_GetBuffer(bytes_obj, &py_buf, PyBUF_ANY_CONTIGUOUS)) {
227
+ const uint8_t* data = reinterpret_cast<const uint8_t*>(py_buf.buf);
228
+ std::memcpy(out, data, py_buf.len);
229
+ int64_t len = py_buf.len;
230
+ PyBuffer_Release(&py_buf);
231
+ return len;
232
+ } else {
233
+ return Status::TypeError(
234
+ "Python file read() should have returned a bytes object or an object "
235
+ "supporting the buffer protocol, got '",
236
+ Py_TYPE(bytes_obj)->tp_name, "' (did you open the file in binary mode?)");
237
+ }
238
+ });
239
+ }
240
+
241
+ Result<std::shared_ptr<Buffer>> PyReadableFile::Read(int64_t nbytes) {
242
+ return SafeCallIntoPython([=]() -> Result<std::shared_ptr<Buffer>> {
243
+ OwnedRef buffer_obj;
244
+ if (file_->HasReadBuffer()) {
245
+ RETURN_NOT_OK(file_->ReadBuffer(nbytes, buffer_obj.ref()));
246
+ } else {
247
+ RETURN_NOT_OK(file_->Read(nbytes, buffer_obj.ref()));
248
+ }
249
+ DCHECK(buffer_obj.obj() != NULL);
250
+
251
+ return PyBuffer::FromPyObject(buffer_obj.obj());
252
+ });
253
+ }
254
+
255
+ Result<int64_t> PyReadableFile::ReadAt(int64_t position, int64_t nbytes, void* out) {
256
+ std::lock_guard<std::mutex> guard(file_->lock());
257
+ return SafeCallIntoPython([=]() -> Result<int64_t> {
258
+ RETURN_NOT_OK(Seek(position));
259
+ return Read(nbytes, out);
260
+ });
261
+ }
262
+
263
+ Result<std::shared_ptr<Buffer>> PyReadableFile::ReadAt(int64_t position, int64_t nbytes) {
264
+ std::lock_guard<std::mutex> guard(file_->lock());
265
+ return SafeCallIntoPython([=]() -> Result<std::shared_ptr<Buffer>> {
266
+ RETURN_NOT_OK(Seek(position));
267
+ return Read(nbytes);
268
+ });
269
+ }
270
+
271
+ Result<int64_t> PyReadableFile::GetSize() {
272
+ return SafeCallIntoPython([=]() -> Result<int64_t> {
273
+ ARROW_ASSIGN_OR_RAISE(int64_t current_position, file_->Tell());
274
+ RETURN_NOT_OK(file_->Seek(0, 2));
275
+
276
+ ARROW_ASSIGN_OR_RAISE(int64_t file_size, file_->Tell());
277
+ // Restore previous file position
278
+ RETURN_NOT_OK(file_->Seek(current_position, 0));
279
+
280
+ return file_size;
281
+ });
282
+ }
283
+
284
+ // ----------------------------------------------------------------------
285
+ // Output stream
286
+
287
+ PyOutputStream::PyOutputStream(PyObject* file) : position_(0) {
288
+ file_.reset(new PythonFile(file));
289
+ }
290
+
291
+ // The destructor does not close the underlying Python file object, as
292
+ // there may be multiple references to it. Instead let the Python
293
+ // destructor do its job.
294
+ PyOutputStream::~PyOutputStream() {}
295
+
296
+ Status PyOutputStream::Abort() {
297
+ return SafeCallIntoPython([=]() { return file_->Abort(); });
298
+ }
299
+
300
+ Status PyOutputStream::Close() {
301
+ return SafeCallIntoPython([=]() { return file_->Close(); });
302
+ }
303
+
304
+ bool PyOutputStream::closed() const {
305
+ bool res;
306
+ Status st = SafeCallIntoPython([this, &res]() {
307
+ res = file_->closed();
308
+ return Status::OK();
309
+ });
310
+ return res;
311
+ }
312
+
313
+ Result<int64_t> PyOutputStream::Tell() const { return position_; }
314
+
315
+ Status PyOutputStream::Write(const void* data, int64_t nbytes) {
316
+ return SafeCallIntoPython([=]() {
317
+ position_ += nbytes;
318
+ return file_->Write(data, nbytes);
319
+ });
320
+ }
321
+
322
+ Status PyOutputStream::Write(const std::shared_ptr<Buffer>& buffer) {
323
+ return SafeCallIntoPython([=]() {
324
+ position_ += buffer->size();
325
+ return file_->Write(buffer);
326
+ });
327
+ }
328
+
329
+ // ----------------------------------------------------------------------
330
+ // Foreign buffer
331
+
332
+ Status PyForeignBuffer::Make(const uint8_t* data, int64_t size, PyObject* base,
333
+ std::shared_ptr<Buffer>* out) {
334
+ PyForeignBuffer* buf = new PyForeignBuffer(data, size, base);
335
+ if (buf == NULL) {
336
+ return Status::OutOfMemory("could not allocate foreign buffer object");
337
+ } else {
338
+ *out = std::shared_ptr<Buffer>(buf);
339
+ return Status::OK();
340
+ }
341
+ }
342
+
343
+ // ----------------------------------------------------------------------
344
+ // TransformInputStream::TransformFunc wrapper
345
+
346
+ struct TransformFunctionWrapper {
347
+ TransformFunctionWrapper(TransformCallback cb, PyObject* arg)
348
+ : cb_(std::move(cb)), arg_(std::make_shared<OwnedRefNoGIL>(arg)) {
349
+ Py_INCREF(arg);
350
+ }
351
+
352
+ Result<std::shared_ptr<Buffer>> operator()(const std::shared_ptr<Buffer>& src) {
353
+ return SafeCallIntoPython([=]() -> Result<std::shared_ptr<Buffer>> {
354
+ std::shared_ptr<Buffer> dest;
355
+ cb_(arg_->obj(), src, &dest);
356
+ RETURN_NOT_OK(CheckPyError());
357
+ return dest;
358
+ });
359
+ }
360
+
361
+ protected:
362
+ // Need to wrap OwnedRefNoGIL because std::function needs the callable
363
+ // to be copy-constructible...
364
+ TransformCallback cb_;
365
+ std::shared_ptr<OwnedRefNoGIL> arg_;
366
+ };
367
+
368
+ std::shared_ptr<::arrow::io::InputStream> MakeTransformInputStream(
369
+ std::shared_ptr<::arrow::io::InputStream> wrapped, TransformInputStreamVTable vtable,
370
+ PyObject* handler) {
371
+ TransformInputStream::TransformFunc transform(
372
+ TransformFunctionWrapper{std::move(vtable.transform), handler});
373
+ return std::make_shared<TransformInputStream>(std::move(wrapped), std::move(transform));
374
+ }
375
+
376
+ std::shared_ptr<StreamWrapFunc> MakeStreamTransformFunc(TransformInputStreamVTable vtable,
377
+ PyObject* handler) {
378
+ TransformInputStream::TransformFunc transform(
379
+ TransformFunctionWrapper{std::move(vtable.transform), handler});
380
+ StreamWrapFunc func = [transform](std::shared_ptr<::arrow::io::InputStream> wrapped) {
381
+ return std::make_shared<TransformInputStream>(wrapped, transform);
382
+ };
383
+ return std::make_shared<StreamWrapFunc>(func);
384
+ }
385
+
386
+ } // namespace py
387
+ } // namespace arrow
llmeval-env/lib/python3.10/site-packages/pyarrow/src/arrow/python/io.h ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <memory>
21
+
22
+ #include "arrow/io/interfaces.h"
23
+ #include "arrow/io/transform.h"
24
+
25
+ #include "arrow/python/common.h"
26
+ #include "arrow/python/visibility.h"
27
+
28
+ namespace arrow {
29
+ namespace py {
30
+
31
+ class ARROW_NO_EXPORT PythonFile;
32
+
33
+ class ARROW_PYTHON_EXPORT PyReadableFile : public io::RandomAccessFile {
34
+ public:
35
+ explicit PyReadableFile(PyObject* file);
36
+ ~PyReadableFile() override;
37
+
38
+ Status Close() override;
39
+ Status Abort() override;
40
+ bool closed() const override;
41
+
42
+ Result<int64_t> Read(int64_t nbytes, void* out) override;
43
+ Result<std::shared_ptr<Buffer>> Read(int64_t nbytes) override;
44
+
45
+ // Thread-safe version
46
+ Result<int64_t> ReadAt(int64_t position, int64_t nbytes, void* out) override;
47
+
48
+ // Thread-safe version
49
+ Result<std::shared_ptr<Buffer>> ReadAt(int64_t position, int64_t nbytes) override;
50
+
51
+ Result<int64_t> GetSize() override;
52
+
53
+ Status Seek(int64_t position) override;
54
+
55
+ Result<int64_t> Tell() const override;
56
+
57
+ private:
58
+ std::unique_ptr<PythonFile> file_;
59
+ };
60
+
61
+ class ARROW_PYTHON_EXPORT PyOutputStream : public io::OutputStream {
62
+ public:
63
+ explicit PyOutputStream(PyObject* file);
64
+ ~PyOutputStream() override;
65
+
66
+ Status Close() override;
67
+ Status Abort() override;
68
+ bool closed() const override;
69
+ Result<int64_t> Tell() const override;
70
+ Status Write(const void* data, int64_t nbytes) override;
71
+ Status Write(const std::shared_ptr<Buffer>& buffer) override;
72
+
73
+ private:
74
+ std::unique_ptr<PythonFile> file_;
75
+ int64_t position_;
76
+ };
77
+
78
+ // TODO(wesm): seekable output files
79
+
80
+ // A Buffer subclass that keeps a PyObject reference throughout its
81
+ // lifetime, such that the Python object is kept alive as long as the
82
+ // C++ buffer is still needed.
83
+ // Keeping the reference in a Python wrapper would be incorrect as
84
+ // the Python wrapper can get destroyed even though the wrapped C++
85
+ // buffer is still alive (ARROW-2270).
86
+ class ARROW_PYTHON_EXPORT PyForeignBuffer : public Buffer {
87
+ public:
88
+ static Status Make(const uint8_t* data, int64_t size, PyObject* base,
89
+ std::shared_ptr<Buffer>* out);
90
+
91
+ private:
92
+ PyForeignBuffer(const uint8_t* data, int64_t size, PyObject* base)
93
+ : Buffer(data, size) {
94
+ Py_INCREF(base);
95
+ base_.reset(base);
96
+ }
97
+
98
+ OwnedRefNoGIL base_;
99
+ };
100
+
101
+ // All this rigamarole because Cython is really poor with std::function<>
102
+
103
+ using TransformCallback = std::function<void(
104
+ PyObject*, const std::shared_ptr<Buffer>& src, std::shared_ptr<Buffer>* out)>;
105
+
106
+ struct TransformInputStreamVTable {
107
+ TransformCallback transform;
108
+ };
109
+
110
+ ARROW_PYTHON_EXPORT
111
+ std::shared_ptr<::arrow::io::InputStream> MakeTransformInputStream(
112
+ std::shared_ptr<::arrow::io::InputStream> wrapped, TransformInputStreamVTable vtable,
113
+ PyObject* arg);
114
+
115
+ using StreamWrapFunc = std::function<Result<std::shared_ptr<io::InputStream>>(
116
+ std::shared_ptr<io::InputStream>)>;
117
+ ARROW_PYTHON_EXPORT
118
+ std::shared_ptr<StreamWrapFunc> MakeStreamTransformFunc(TransformInputStreamVTable vtable,
119
+ PyObject* handler);
120
+ } // namespace py
121
+ } // namespace arrow
llmeval-env/lib/python3.10/site-packages/pyarrow/src/arrow/python/ipc.cc ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #include "ipc.h"
19
+
20
+ #include <memory>
21
+
22
+ #include "arrow/compute/cast.h"
23
+ #include "arrow/python/pyarrow.h"
24
+
25
+ namespace arrow {
26
+ namespace py {
27
+
28
+ PyRecordBatchReader::PyRecordBatchReader() {}
29
+
30
+ Status PyRecordBatchReader::Init(std::shared_ptr<Schema> schema, PyObject* iterable) {
31
+ schema_ = std::move(schema);
32
+
33
+ iterator_.reset(PyObject_GetIter(iterable));
34
+ return CheckPyError();
35
+ }
36
+
37
+ std::shared_ptr<Schema> PyRecordBatchReader::schema() const { return schema_; }
38
+
39
+ Status PyRecordBatchReader::ReadNext(std::shared_ptr<RecordBatch>* batch) {
40
+ PyAcquireGIL lock;
41
+
42
+ if (!iterator_) {
43
+ // End of stream
44
+ batch->reset();
45
+ return Status::OK();
46
+ }
47
+
48
+ OwnedRef py_batch(PyIter_Next(iterator_.obj()));
49
+ if (!py_batch) {
50
+ RETURN_IF_PYERROR();
51
+ // End of stream
52
+ batch->reset();
53
+ iterator_.reset();
54
+ return Status::OK();
55
+ }
56
+
57
+ return unwrap_batch(py_batch.obj()).Value(batch);
58
+ }
59
+
60
+ Result<std::shared_ptr<RecordBatchReader>> PyRecordBatchReader::Make(
61
+ std::shared_ptr<Schema> schema, PyObject* iterable) {
62
+ auto reader = std::shared_ptr<PyRecordBatchReader>(new PyRecordBatchReader());
63
+ RETURN_NOT_OK(reader->Init(std::move(schema), iterable));
64
+ return reader;
65
+ }
66
+
67
+ CastingRecordBatchReader::CastingRecordBatchReader() = default;
68
+
69
+ Status CastingRecordBatchReader::Init(std::shared_ptr<RecordBatchReader> parent,
70
+ std::shared_ptr<Schema> schema) {
71
+ std::shared_ptr<Schema> src = parent->schema();
72
+
73
+ // The check for names has already been done in Python where it's easier to
74
+ // generate a nice error message.
75
+ int num_fields = schema->num_fields();
76
+ if (src->num_fields() != num_fields) {
77
+ return Status::Invalid("Number of fields not equal");
78
+ }
79
+
80
+ // Ensure all columns can be cast before succeeding
81
+ for (int i = 0; i < num_fields; i++) {
82
+ if (!compute::CanCast(*src->field(i)->type(), *schema->field(i)->type())) {
83
+ return Status::TypeError("Field ", i, " cannot be cast from ",
84
+ src->field(i)->type()->ToString(), " to ",
85
+ schema->field(i)->type()->ToString());
86
+ }
87
+ }
88
+
89
+ parent_ = std::move(parent);
90
+ schema_ = std::move(schema);
91
+
92
+ return Status::OK();
93
+ }
94
+
95
+ std::shared_ptr<Schema> CastingRecordBatchReader::schema() const { return schema_; }
96
+
97
+ Status CastingRecordBatchReader::ReadNext(std::shared_ptr<RecordBatch>* batch) {
98
+ std::shared_ptr<RecordBatch> out;
99
+ ARROW_RETURN_NOT_OK(parent_->ReadNext(&out));
100
+ if (!out) {
101
+ batch->reset();
102
+ return Status::OK();
103
+ }
104
+
105
+ auto num_columns = out->num_columns();
106
+ auto options = compute::CastOptions::Safe();
107
+ ArrayVector columns(num_columns);
108
+ for (int i = 0; i < num_columns; i++) {
109
+ const Array& src = *out->column(i);
110
+ if (!schema_->field(i)->nullable() && src.null_count() > 0) {
111
+ return Status::Invalid(
112
+ "Can't cast array that contains nulls to non-nullable field at index ", i);
113
+ }
114
+
115
+ ARROW_ASSIGN_OR_RAISE(columns[i],
116
+ compute::Cast(src, schema_->field(i)->type(), options));
117
+ }
118
+
119
+ *batch = RecordBatch::Make(schema_, out->num_rows(), std::move(columns));
120
+ return Status::OK();
121
+ }
122
+
123
+ Result<std::shared_ptr<RecordBatchReader>> CastingRecordBatchReader::Make(
124
+ std::shared_ptr<RecordBatchReader> parent, std::shared_ptr<Schema> schema) {
125
+ auto reader = std::shared_ptr<CastingRecordBatchReader>(new CastingRecordBatchReader());
126
+ ARROW_RETURN_NOT_OK(reader->Init(parent, schema));
127
+ return reader;
128
+ }
129
+
130
+ Status CastingRecordBatchReader::Close() { return parent_->Close(); }
131
+
132
+ } // namespace py
133
+ } // namespace arrow
llmeval-env/lib/python3.10/site-packages/pyarrow/src/arrow/python/ipc.h ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <memory>
21
+
22
+ #include "arrow/python/common.h"
23
+ #include "arrow/python/visibility.h"
24
+ #include "arrow/record_batch.h"
25
+ #include "arrow/result.h"
26
+ #include "arrow/util/macros.h"
27
+
28
+ namespace arrow {
29
+ namespace py {
30
+
31
+ class ARROW_PYTHON_EXPORT PyRecordBatchReader : public RecordBatchReader {
32
+ public:
33
+ std::shared_ptr<Schema> schema() const override;
34
+
35
+ Status ReadNext(std::shared_ptr<RecordBatch>* batch) override;
36
+
37
+ // For use from Cython
38
+ // Assumes that `iterable` is borrowed
39
+ static Result<std::shared_ptr<RecordBatchReader>> Make(std::shared_ptr<Schema>,
40
+ PyObject* iterable);
41
+
42
+ protected:
43
+ PyRecordBatchReader();
44
+
45
+ Status Init(std::shared_ptr<Schema>, PyObject* iterable);
46
+
47
+ std::shared_ptr<Schema> schema_;
48
+ OwnedRefNoGIL iterator_;
49
+ };
50
+
51
+ class ARROW_PYTHON_EXPORT CastingRecordBatchReader : public RecordBatchReader {
52
+ public:
53
+ std::shared_ptr<Schema> schema() const override;
54
+
55
+ Status ReadNext(std::shared_ptr<RecordBatch>* batch) override;
56
+
57
+ static Result<std::shared_ptr<RecordBatchReader>> Make(
58
+ std::shared_ptr<RecordBatchReader> parent, std::shared_ptr<Schema> schema);
59
+
60
+ Status Close() override;
61
+
62
+ protected:
63
+ CastingRecordBatchReader();
64
+
65
+ Status Init(std::shared_ptr<RecordBatchReader> parent, std::shared_ptr<Schema> schema);
66
+
67
+ std::shared_ptr<RecordBatchReader> parent_;
68
+ std::shared_ptr<Schema> schema_;
69
+ };
70
+
71
+ } // namespace py
72
+ } // namespace arrow
llmeval-env/lib/python3.10/site-packages/pyarrow/src/arrow/python/iterators.h ADDED
@@ -0,0 +1,194 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <utility>
21
+
22
+ #include "arrow/array/array_primitive.h"
23
+
24
+ #include "arrow/python/common.h"
25
+ #include "arrow/python/numpy_internal.h"
26
+
27
+ namespace arrow {
28
+ namespace py {
29
+ namespace internal {
30
+
31
+ using arrow::internal::checked_cast;
32
+
33
+ // Visit the Python sequence, calling the given callable on each element. If
34
+ // the callable returns a non-OK status, iteration stops and the status is
35
+ // returned.
36
+ //
37
+ // The call signature for Visitor must be
38
+ //
39
+ // Visit(PyObject* obj, int64_t index, bool* keep_going)
40
+ //
41
+ // If keep_going is set to false, the iteration terminates
42
+ template <class VisitorFunc>
43
+ inline Status VisitSequenceGeneric(PyObject* obj, int64_t offset, VisitorFunc&& func) {
44
+ // VisitorFunc may set to false to terminate iteration
45
+ bool keep_going = true;
46
+
47
+ if (PyArray_Check(obj)) {
48
+ PyArrayObject* arr_obj = reinterpret_cast<PyArrayObject*>(obj);
49
+ if (PyArray_NDIM(arr_obj) != 1) {
50
+ return Status::Invalid("Only 1D arrays accepted");
51
+ }
52
+
53
+ if (PyArray_DESCR(arr_obj)->type_num == NPY_OBJECT) {
54
+ // It's an array object, we can fetch object pointers directly
55
+ const Ndarray1DIndexer<PyObject*> objects(arr_obj);
56
+ for (int64_t i = offset; keep_going && i < objects.size(); ++i) {
57
+ RETURN_NOT_OK(func(objects[i], i, &keep_going));
58
+ }
59
+ return Status::OK();
60
+ }
61
+ // It's a non-object array, fall back on regular sequence access.
62
+ // (note PyArray_GETITEM() is slightly different: it returns standard
63
+ // Python types, not Numpy scalar types)
64
+ // This code path is inefficient: callers should implement dedicated
65
+ // logic for non-object arrays.
66
+ }
67
+ if (PySequence_Check(obj)) {
68
+ if (PyList_Check(obj) || PyTuple_Check(obj)) {
69
+ // Use fast item access
70
+ const Py_ssize_t size = PySequence_Fast_GET_SIZE(obj);
71
+ for (Py_ssize_t i = offset; keep_going && i < size; ++i) {
72
+ PyObject* value = PySequence_Fast_GET_ITEM(obj, i);
73
+ RETURN_NOT_OK(func(value, static_cast<int64_t>(i), &keep_going));
74
+ }
75
+ } else {
76
+ // Regular sequence: avoid making a potentially large copy
77
+ const Py_ssize_t size = PySequence_Size(obj);
78
+ RETURN_IF_PYERROR();
79
+ for (Py_ssize_t i = offset; keep_going && i < size; ++i) {
80
+ OwnedRef value_ref(PySequence_ITEM(obj, i));
81
+ RETURN_IF_PYERROR();
82
+ RETURN_NOT_OK(func(value_ref.obj(), static_cast<int64_t>(i), &keep_going));
83
+ }
84
+ }
85
+ } else {
86
+ return Status::TypeError("Object is not a sequence");
87
+ }
88
+ return Status::OK();
89
+ }
90
+
91
+ // Visit sequence with no null mask
92
+ template <class VisitorFunc>
93
+ inline Status VisitSequence(PyObject* obj, int64_t offset, VisitorFunc&& func) {
94
+ return VisitSequenceGeneric(
95
+ obj, offset, [&func](PyObject* value, int64_t i /* unused */, bool* keep_going) {
96
+ return func(value, keep_going);
97
+ });
98
+ }
99
+
100
+ /// Visit sequence with null mask
101
+ template <class VisitorFunc>
102
+ inline Status VisitSequenceMasked(PyObject* obj, PyObject* mo, int64_t offset,
103
+ VisitorFunc&& func) {
104
+ if (PyArray_Check(mo)) {
105
+ PyArrayObject* mask = reinterpret_cast<PyArrayObject*>(mo);
106
+ if (PyArray_NDIM(mask) != 1) {
107
+ return Status::Invalid("Mask must be 1D array");
108
+ }
109
+ if (PyArray_SIZE(mask) != static_cast<int64_t>(PySequence_Size(obj))) {
110
+ return Status::Invalid("Mask was a different length from sequence being converted");
111
+ }
112
+
113
+ const int dtype = fix_numpy_type_num(PyArray_DESCR(mask)->type_num);
114
+ if (dtype == NPY_BOOL) {
115
+ Ndarray1DIndexer<uint8_t> mask_values(mask);
116
+
117
+ return VisitSequenceGeneric(
118
+ obj, offset,
119
+ [&func, &mask_values](PyObject* value, int64_t i, bool* keep_going) {
120
+ return func(value, mask_values[i], keep_going);
121
+ });
122
+ } else {
123
+ return Status::TypeError("Mask must be boolean dtype");
124
+ }
125
+ } else if (py::is_array(mo)) {
126
+ auto unwrap_mask_result = unwrap_array(mo);
127
+ ARROW_RETURN_NOT_OK(unwrap_mask_result);
128
+ std::shared_ptr<Array> mask_ = unwrap_mask_result.ValueOrDie();
129
+ if (mask_->type_id() != Type::type::BOOL) {
130
+ return Status::TypeError("Mask must be an array of booleans");
131
+ }
132
+
133
+ if (mask_->length() != PySequence_Size(obj)) {
134
+ return Status::Invalid("Mask was a different length from sequence being converted");
135
+ }
136
+
137
+ if (mask_->null_count() != 0) {
138
+ return Status::TypeError("Mask must be an array of booleans");
139
+ }
140
+
141
+ BooleanArray* boolmask = checked_cast<BooleanArray*>(mask_.get());
142
+ return VisitSequenceGeneric(
143
+ obj, offset, [&func, &boolmask](PyObject* value, int64_t i, bool* keep_going) {
144
+ return func(value, boolmask->Value(i), keep_going);
145
+ });
146
+ } else if (PySequence_Check(mo)) {
147
+ if (PySequence_Size(mo) != PySequence_Size(obj)) {
148
+ return Status::Invalid("Mask was a different length from sequence being converted");
149
+ }
150
+ RETURN_IF_PYERROR();
151
+
152
+ return VisitSequenceGeneric(
153
+ obj, offset, [&func, &mo](PyObject* value, int64_t i, bool* keep_going) {
154
+ OwnedRef value_ref(PySequence_ITEM(mo, i));
155
+ if (!PyBool_Check(value_ref.obj()))
156
+ return Status::TypeError("Mask must be a sequence of booleans");
157
+ return func(value, value_ref.obj() == Py_True, keep_going);
158
+ });
159
+ } else {
160
+ return Status::Invalid("Null mask must be a NumPy array, Arrow array or a Sequence");
161
+ }
162
+
163
+ return Status::OK();
164
+ }
165
+
166
+ // Like IterateSequence, but accepts any generic iterable (including
167
+ // non-restartable iterators, e.g. generators).
168
+ //
169
+ // The call signature for VisitorFunc must be Visit(PyObject*, bool*
170
+ // keep_going). If keep_going is set to false, the iteration terminates
171
+ template <class VisitorFunc>
172
+ inline Status VisitIterable(PyObject* obj, VisitorFunc&& func) {
173
+ if (PySequence_Check(obj)) {
174
+ // Numpy arrays fall here as well
175
+ return VisitSequence(obj, /*offset=*/0, std::forward<VisitorFunc>(func));
176
+ }
177
+ // Fall back on the iterator protocol
178
+ OwnedRef iter_ref(PyObject_GetIter(obj));
179
+ PyObject* iter = iter_ref.obj();
180
+ RETURN_IF_PYERROR();
181
+ PyObject* value;
182
+
183
+ bool keep_going = true;
184
+ while (keep_going && (value = PyIter_Next(iter))) {
185
+ OwnedRef value_ref(value);
186
+ RETURN_NOT_OK(func(value_ref.obj(), &keep_going));
187
+ }
188
+ RETURN_IF_PYERROR(); // __next__() might have raised
189
+ return Status::OK();
190
+ }
191
+
192
+ } // namespace internal
193
+ } // namespace py
194
+ } // namespace arrow