Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- env-llmeval/lib/python3.10/site-packages/pyarrow/__init__.py +466 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/_dataset_parquet_encryption.pyx +170 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/_fs.pyx +1631 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/_json.pyx +310 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/_orc.pyx +445 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/_parquet.pyx +2195 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/_s3fs.pyx +458 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/_substrait.cpython-310-x86_64-linux-gnu.so +0 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/flight.py +69 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/api.h +47 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/buffer.h +587 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/chunk_resolver.h +104 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/chunked_array.h +275 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/compare.h +145 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/config.h +98 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/csv/api.h +22 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/csv/chunker.h +36 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/csv/column_builder.h +78 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/csv/column_decoder.h +64 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/csv/converter.h +82 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/csv/invalid_row.h +55 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/csv/options.h +220 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/csv/parser.h +228 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/csv/reader.h +112 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/csv/test_common.h +55 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/csv/type_fwd.h +28 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/csv/writer.h +89 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/datum.h +311 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/extension/fixed_shape_tensor.h +119 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/extension_type.h +165 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/memory_pool_test.h +110 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/pch.h +30 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/pretty_print.h +157 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/python/api.h +30 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/python/async.h +60 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/python/benchmark.h +36 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/python/common.h +458 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/python/csv.h +42 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/python/datetime.h +231 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/python/deserialize.h +106 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/python/filesystem.h +126 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/python/flight.h +350 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/python/gdb.h +29 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/python/helpers.h +159 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/python/inference.h +64 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/python/io.h +121 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/python/ipc.h +52 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/python/iterators.h +194 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/python/lib.h +83 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/python/lib_api.h +201 -0
env-llmeval/lib/python3.10/site-packages/pyarrow/__init__.py
ADDED
@@ -0,0 +1,466 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
# flake8: noqa
|
19 |
+
|
20 |
+
"""
|
21 |
+
PyArrow is the python implementation of Apache Arrow.
|
22 |
+
|
23 |
+
Apache Arrow is a cross-language development platform for in-memory data.
|
24 |
+
It specifies a standardized language-independent columnar memory format for
|
25 |
+
flat and hierarchical data, organized for efficient analytic operations on
|
26 |
+
modern hardware. It also provides computational libraries and zero-copy
|
27 |
+
streaming messaging and interprocess communication.
|
28 |
+
|
29 |
+
For more information see the official page at https://arrow.apache.org
|
30 |
+
"""
|
31 |
+
|
32 |
+
import gc as _gc
|
33 |
+
import importlib as _importlib
|
34 |
+
import os as _os
|
35 |
+
import platform as _platform
|
36 |
+
import sys as _sys
|
37 |
+
import warnings as _warnings
|
38 |
+
|
39 |
+
try:
|
40 |
+
from ._generated_version import version as __version__
|
41 |
+
except ImportError:
|
42 |
+
# Package is not installed, parse git tag at runtime
|
43 |
+
try:
|
44 |
+
import setuptools_scm
|
45 |
+
# Code duplicated from setup.py to avoid a dependency on each other
|
46 |
+
|
47 |
+
def parse_git(root, **kwargs):
|
48 |
+
"""
|
49 |
+
Parse function for setuptools_scm that ignores tags for non-C++
|
50 |
+
subprojects, e.g. apache-arrow-js-XXX tags.
|
51 |
+
"""
|
52 |
+
from setuptools_scm.git import parse
|
53 |
+
kwargs['describe_command'] = \
|
54 |
+
"git describe --dirty --tags --long --match 'apache-arrow-[0-9]*.*'"
|
55 |
+
return parse(root, **kwargs)
|
56 |
+
__version__ = setuptools_scm.get_version('../',
|
57 |
+
parse=parse_git)
|
58 |
+
except ImportError:
|
59 |
+
__version__ = None
|
60 |
+
|
61 |
+
# ARROW-8684: Disable GC while initializing Cython extension module,
|
62 |
+
# to workaround Cython bug in https://github.com/cython/cython/issues/3603
|
63 |
+
_gc_enabled = _gc.isenabled()
|
64 |
+
_gc.disable()
|
65 |
+
import pyarrow.lib as _lib
|
66 |
+
if _gc_enabled:
|
67 |
+
_gc.enable()
|
68 |
+
|
69 |
+
from pyarrow.lib import (BuildInfo, RuntimeInfo, set_timezone_db_path,
|
70 |
+
MonthDayNano, VersionInfo, cpp_build_info,
|
71 |
+
cpp_version, cpp_version_info, runtime_info,
|
72 |
+
cpu_count, set_cpu_count, enable_signal_handlers,
|
73 |
+
io_thread_count, set_io_thread_count)
|
74 |
+
|
75 |
+
|
76 |
+
def show_versions():
|
77 |
+
"""
|
78 |
+
Print various version information, to help with error reporting.
|
79 |
+
"""
|
80 |
+
def print_entry(label, value):
|
81 |
+
print(f"{label: <26}: {value: <8}")
|
82 |
+
|
83 |
+
print("pyarrow version info\n--------------------")
|
84 |
+
print_entry("Package kind", cpp_build_info.package_kind
|
85 |
+
if len(cpp_build_info.package_kind) > 0
|
86 |
+
else "not indicated")
|
87 |
+
print_entry("Arrow C++ library version", cpp_build_info.version)
|
88 |
+
print_entry("Arrow C++ compiler",
|
89 |
+
f"{cpp_build_info.compiler_id} {cpp_build_info.compiler_version}")
|
90 |
+
print_entry("Arrow C++ compiler flags", cpp_build_info.compiler_flags)
|
91 |
+
print_entry("Arrow C++ git revision", cpp_build_info.git_id)
|
92 |
+
print_entry("Arrow C++ git description", cpp_build_info.git_description)
|
93 |
+
print_entry("Arrow C++ build type", cpp_build_info.build_type)
|
94 |
+
|
95 |
+
|
96 |
+
def _module_is_available(module):
|
97 |
+
try:
|
98 |
+
_importlib.import_module(f'pyarrow.{module}')
|
99 |
+
except ImportError:
|
100 |
+
return False
|
101 |
+
else:
|
102 |
+
return True
|
103 |
+
|
104 |
+
|
105 |
+
def _filesystem_is_available(fs):
|
106 |
+
try:
|
107 |
+
import pyarrow.fs
|
108 |
+
except ImportError:
|
109 |
+
return False
|
110 |
+
|
111 |
+
try:
|
112 |
+
getattr(pyarrow.fs, fs)
|
113 |
+
except (ImportError, AttributeError):
|
114 |
+
return False
|
115 |
+
else:
|
116 |
+
return True
|
117 |
+
|
118 |
+
|
119 |
+
def show_info():
|
120 |
+
"""
|
121 |
+
Print detailed version and platform information, for error reporting
|
122 |
+
"""
|
123 |
+
show_versions()
|
124 |
+
|
125 |
+
def print_entry(label, value):
|
126 |
+
print(f" {label: <20}: {value: <8}")
|
127 |
+
|
128 |
+
print("\nPlatform:")
|
129 |
+
print_entry("OS / Arch", f"{_platform.system()} {_platform.machine()}")
|
130 |
+
print_entry("SIMD Level", runtime_info().simd_level)
|
131 |
+
print_entry("Detected SIMD Level", runtime_info().detected_simd_level)
|
132 |
+
|
133 |
+
pool = default_memory_pool()
|
134 |
+
print("\nMemory:")
|
135 |
+
print_entry("Default backend", pool.backend_name)
|
136 |
+
print_entry("Bytes allocated", f"{pool.bytes_allocated()} bytes")
|
137 |
+
print_entry("Max memory", f"{pool.max_memory()} bytes")
|
138 |
+
print_entry("Supported Backends", ', '.join(supported_memory_backends()))
|
139 |
+
|
140 |
+
print("\nOptional modules:")
|
141 |
+
modules = ["csv", "cuda", "dataset", "feather", "flight", "fs", "gandiva", "json",
|
142 |
+
"orc", "parquet"]
|
143 |
+
for module in modules:
|
144 |
+
status = "Enabled" if _module_is_available(module) else "-"
|
145 |
+
print(f" {module: <20}: {status: <8}")
|
146 |
+
|
147 |
+
print("\nFilesystems:")
|
148 |
+
filesystems = ["GcsFileSystem", "HadoopFileSystem", "S3FileSystem"]
|
149 |
+
for fs in filesystems:
|
150 |
+
status = "Enabled" if _filesystem_is_available(fs) else "-"
|
151 |
+
print(f" {fs: <20}: {status: <8}")
|
152 |
+
|
153 |
+
print("\nCompression Codecs:")
|
154 |
+
codecs = ["brotli", "bz2", "gzip", "lz4_frame", "lz4", "snappy", "zstd"]
|
155 |
+
for codec in codecs:
|
156 |
+
status = "Enabled" if Codec.is_available(codec) else "-"
|
157 |
+
print(f" {codec: <20}: {status: <8}")
|
158 |
+
|
159 |
+
|
160 |
+
from pyarrow.lib import (null, bool_,
|
161 |
+
int8, int16, int32, int64,
|
162 |
+
uint8, uint16, uint32, uint64,
|
163 |
+
time32, time64, timestamp, date32, date64, duration,
|
164 |
+
month_day_nano_interval,
|
165 |
+
float16, float32, float64,
|
166 |
+
binary, string, utf8,
|
167 |
+
large_binary, large_string, large_utf8,
|
168 |
+
decimal128, decimal256,
|
169 |
+
list_, large_list, map_, struct,
|
170 |
+
union, sparse_union, dense_union,
|
171 |
+
dictionary,
|
172 |
+
run_end_encoded,
|
173 |
+
fixed_shape_tensor,
|
174 |
+
field,
|
175 |
+
type_for_alias,
|
176 |
+
DataType, DictionaryType, StructType,
|
177 |
+
ListType, LargeListType, MapType, FixedSizeListType,
|
178 |
+
UnionType, SparseUnionType, DenseUnionType,
|
179 |
+
TimestampType, Time32Type, Time64Type, DurationType,
|
180 |
+
FixedSizeBinaryType, Decimal128Type, Decimal256Type,
|
181 |
+
BaseExtensionType, ExtensionType,
|
182 |
+
RunEndEncodedType, FixedShapeTensorType,
|
183 |
+
PyExtensionType, UnknownExtensionType,
|
184 |
+
register_extension_type, unregister_extension_type,
|
185 |
+
DictionaryMemo,
|
186 |
+
KeyValueMetadata,
|
187 |
+
Field,
|
188 |
+
Schema,
|
189 |
+
schema,
|
190 |
+
unify_schemas,
|
191 |
+
Array, Tensor,
|
192 |
+
array, chunked_array, record_batch, nulls, repeat,
|
193 |
+
SparseCOOTensor, SparseCSRMatrix, SparseCSCMatrix,
|
194 |
+
SparseCSFTensor,
|
195 |
+
infer_type, from_numpy_dtype,
|
196 |
+
NullArray,
|
197 |
+
NumericArray, IntegerArray, FloatingPointArray,
|
198 |
+
BooleanArray,
|
199 |
+
Int8Array, UInt8Array,
|
200 |
+
Int16Array, UInt16Array,
|
201 |
+
Int32Array, UInt32Array,
|
202 |
+
Int64Array, UInt64Array,
|
203 |
+
HalfFloatArray, FloatArray, DoubleArray,
|
204 |
+
ListArray, LargeListArray, MapArray,
|
205 |
+
FixedSizeListArray, UnionArray,
|
206 |
+
BinaryArray, StringArray,
|
207 |
+
LargeBinaryArray, LargeStringArray,
|
208 |
+
FixedSizeBinaryArray,
|
209 |
+
DictionaryArray,
|
210 |
+
Date32Array, Date64Array, TimestampArray,
|
211 |
+
Time32Array, Time64Array, DurationArray,
|
212 |
+
MonthDayNanoIntervalArray,
|
213 |
+
Decimal128Array, Decimal256Array, StructArray, ExtensionArray,
|
214 |
+
RunEndEncodedArray, FixedShapeTensorArray,
|
215 |
+
scalar, NA, _NULL as NULL, Scalar,
|
216 |
+
NullScalar, BooleanScalar,
|
217 |
+
Int8Scalar, Int16Scalar, Int32Scalar, Int64Scalar,
|
218 |
+
UInt8Scalar, UInt16Scalar, UInt32Scalar, UInt64Scalar,
|
219 |
+
HalfFloatScalar, FloatScalar, DoubleScalar,
|
220 |
+
Decimal128Scalar, Decimal256Scalar,
|
221 |
+
ListScalar, LargeListScalar, FixedSizeListScalar,
|
222 |
+
Date32Scalar, Date64Scalar,
|
223 |
+
Time32Scalar, Time64Scalar,
|
224 |
+
TimestampScalar, DurationScalar,
|
225 |
+
MonthDayNanoIntervalScalar,
|
226 |
+
BinaryScalar, LargeBinaryScalar,
|
227 |
+
StringScalar, LargeStringScalar,
|
228 |
+
FixedSizeBinaryScalar, DictionaryScalar,
|
229 |
+
MapScalar, StructScalar, UnionScalar,
|
230 |
+
RunEndEncodedScalar, ExtensionScalar)
|
231 |
+
|
232 |
+
# Buffers, allocation
|
233 |
+
from pyarrow.lib import (Buffer, ResizableBuffer, foreign_buffer, py_buffer,
|
234 |
+
Codec, compress, decompress, allocate_buffer)
|
235 |
+
|
236 |
+
from pyarrow.lib import (MemoryPool, LoggingMemoryPool, ProxyMemoryPool,
|
237 |
+
total_allocated_bytes, set_memory_pool,
|
238 |
+
default_memory_pool, system_memory_pool,
|
239 |
+
jemalloc_memory_pool, mimalloc_memory_pool,
|
240 |
+
logging_memory_pool, proxy_memory_pool,
|
241 |
+
log_memory_allocations, jemalloc_set_decay_ms,
|
242 |
+
supported_memory_backends)
|
243 |
+
|
244 |
+
# I/O
|
245 |
+
from pyarrow.lib import (NativeFile, PythonFile,
|
246 |
+
BufferedInputStream, BufferedOutputStream, CacheOptions,
|
247 |
+
CompressedInputStream, CompressedOutputStream,
|
248 |
+
TransformInputStream, transcoding_input_stream,
|
249 |
+
FixedSizeBufferWriter,
|
250 |
+
BufferReader, BufferOutputStream,
|
251 |
+
OSFile, MemoryMappedFile, memory_map,
|
252 |
+
create_memory_map, MockOutputStream,
|
253 |
+
input_stream, output_stream)
|
254 |
+
|
255 |
+
from pyarrow._hdfsio import HdfsFile, have_libhdfs
|
256 |
+
|
257 |
+
from pyarrow.lib import (ChunkedArray, RecordBatch, Table, table,
|
258 |
+
concat_arrays, concat_tables, TableGroupBy,
|
259 |
+
RecordBatchReader)
|
260 |
+
|
261 |
+
# Exceptions
|
262 |
+
from pyarrow.lib import (ArrowCancelled,
|
263 |
+
ArrowCapacityError,
|
264 |
+
ArrowException,
|
265 |
+
ArrowKeyError,
|
266 |
+
ArrowIndexError,
|
267 |
+
ArrowInvalid,
|
268 |
+
ArrowIOError,
|
269 |
+
ArrowMemoryError,
|
270 |
+
ArrowNotImplementedError,
|
271 |
+
ArrowTypeError,
|
272 |
+
ArrowSerializationError)
|
273 |
+
|
274 |
+
import pyarrow.hdfs as hdfs
|
275 |
+
|
276 |
+
from pyarrow.ipc import serialize_pandas, deserialize_pandas
|
277 |
+
import pyarrow.ipc as ipc
|
278 |
+
|
279 |
+
import pyarrow.types as types
|
280 |
+
|
281 |
+
|
282 |
+
# deprecated top-level access
|
283 |
+
|
284 |
+
|
285 |
+
from pyarrow.filesystem import FileSystem as _FileSystem
|
286 |
+
from pyarrow.filesystem import LocalFileSystem as _LocalFileSystem
|
287 |
+
from pyarrow.hdfs import HadoopFileSystem as _HadoopFileSystem
|
288 |
+
|
289 |
+
|
290 |
+
_localfs = _LocalFileSystem._get_instance()
|
291 |
+
|
292 |
+
|
293 |
+
_msg = (
|
294 |
+
"pyarrow.{0} is deprecated as of 2.0.0, please use pyarrow.fs.{1} instead."
|
295 |
+
)
|
296 |
+
|
297 |
+
_serialization_msg = (
|
298 |
+
"'pyarrow.{0}' is deprecated and will be removed in a future version. "
|
299 |
+
"Use pickle or the pyarrow IPC functionality instead."
|
300 |
+
)
|
301 |
+
|
302 |
+
_deprecated = {
|
303 |
+
"localfs": (_localfs, "LocalFileSystem"),
|
304 |
+
"FileSystem": (_FileSystem, "FileSystem"),
|
305 |
+
"LocalFileSystem": (_LocalFileSystem, "LocalFileSystem"),
|
306 |
+
"HadoopFileSystem": (_HadoopFileSystem, "HadoopFileSystem"),
|
307 |
+
}
|
308 |
+
|
309 |
+
|
310 |
+
def __getattr__(name):
|
311 |
+
if name in _deprecated:
|
312 |
+
obj, new_name = _deprecated[name]
|
313 |
+
_warnings.warn(_msg.format(name, new_name),
|
314 |
+
FutureWarning, stacklevel=2)
|
315 |
+
return obj
|
316 |
+
|
317 |
+
raise AttributeError(
|
318 |
+
"module 'pyarrow' has no attribute '{0}'".format(name)
|
319 |
+
)
|
320 |
+
|
321 |
+
|
322 |
+
# ----------------------------------------------------------------------
|
323 |
+
# Deprecations
|
324 |
+
|
325 |
+
from pyarrow.util import _deprecate_api, _deprecate_class
|
326 |
+
|
327 |
+
|
328 |
+
# TODO: Deprecate these somehow in the pyarrow namespace
|
329 |
+
from pyarrow.ipc import (Message, MessageReader, MetadataVersion,
|
330 |
+
RecordBatchFileReader, RecordBatchFileWriter,
|
331 |
+
RecordBatchStreamReader, RecordBatchStreamWriter)
|
332 |
+
|
333 |
+
# ----------------------------------------------------------------------
|
334 |
+
# Returning absolute path to the pyarrow include directory (if bundled, e.g. in
|
335 |
+
# wheels)
|
336 |
+
|
337 |
+
|
338 |
+
def get_include():
|
339 |
+
"""
|
340 |
+
Return absolute path to directory containing Arrow C++ include
|
341 |
+
headers. Similar to numpy.get_include
|
342 |
+
"""
|
343 |
+
return _os.path.join(_os.path.dirname(__file__), 'include')
|
344 |
+
|
345 |
+
|
346 |
+
def _get_pkg_config_executable():
|
347 |
+
return _os.environ.get('PKG_CONFIG', 'pkg-config')
|
348 |
+
|
349 |
+
|
350 |
+
def _has_pkg_config(pkgname):
|
351 |
+
import subprocess
|
352 |
+
try:
|
353 |
+
return subprocess.call([_get_pkg_config_executable(),
|
354 |
+
'--exists', pkgname]) == 0
|
355 |
+
except FileNotFoundError:
|
356 |
+
return False
|
357 |
+
|
358 |
+
|
359 |
+
def _read_pkg_config_variable(pkgname, cli_args):
|
360 |
+
import subprocess
|
361 |
+
cmd = [_get_pkg_config_executable(), pkgname] + cli_args
|
362 |
+
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
|
363 |
+
stderr=subprocess.PIPE)
|
364 |
+
out, err = proc.communicate()
|
365 |
+
if proc.returncode != 0:
|
366 |
+
raise RuntimeError("pkg-config failed: " + err.decode('utf8'))
|
367 |
+
return out.rstrip().decode('utf8')
|
368 |
+
|
369 |
+
|
370 |
+
def get_libraries():
|
371 |
+
"""
|
372 |
+
Return list of library names to include in the `libraries` argument for C
|
373 |
+
or Cython extensions using pyarrow
|
374 |
+
"""
|
375 |
+
return ['arrow_python', 'arrow']
|
376 |
+
|
377 |
+
|
378 |
+
def create_library_symlinks():
|
379 |
+
"""
|
380 |
+
With Linux and macOS wheels, the bundled shared libraries have an embedded
|
381 |
+
ABI version like libarrow.so.17 or libarrow.17.dylib and so linking to them
|
382 |
+
with -larrow won't work unless we create symlinks at locations like
|
383 |
+
site-packages/pyarrow/libarrow.so. This unfortunate workaround addresses
|
384 |
+
prior problems we had with shipping two copies of the shared libraries to
|
385 |
+
permit third party projects like turbodbc to build their C++ extensions
|
386 |
+
against the pyarrow wheels.
|
387 |
+
|
388 |
+
This function must only be invoked once and only when the shared libraries
|
389 |
+
are bundled with the Python package, which should only apply to wheel-based
|
390 |
+
installs. It requires write access to the site-packages/pyarrow directory
|
391 |
+
and so depending on your system may need to be run with root.
|
392 |
+
"""
|
393 |
+
import glob
|
394 |
+
if _sys.platform == 'win32':
|
395 |
+
return
|
396 |
+
package_cwd = _os.path.dirname(__file__)
|
397 |
+
|
398 |
+
if _sys.platform == 'linux':
|
399 |
+
bundled_libs = glob.glob(_os.path.join(package_cwd, '*.so.*'))
|
400 |
+
|
401 |
+
def get_symlink_path(hard_path):
|
402 |
+
return hard_path.rsplit('.', 1)[0]
|
403 |
+
else:
|
404 |
+
bundled_libs = glob.glob(_os.path.join(package_cwd, '*.*.dylib'))
|
405 |
+
|
406 |
+
def get_symlink_path(hard_path):
|
407 |
+
return '.'.join((hard_path.rsplit('.', 2)[0], 'dylib'))
|
408 |
+
|
409 |
+
for lib_hard_path in bundled_libs:
|
410 |
+
symlink_path = get_symlink_path(lib_hard_path)
|
411 |
+
if _os.path.exists(symlink_path):
|
412 |
+
continue
|
413 |
+
try:
|
414 |
+
_os.symlink(lib_hard_path, symlink_path)
|
415 |
+
except PermissionError:
|
416 |
+
print("Tried creating symlink {}. If you need to link to "
|
417 |
+
"bundled shared libraries, run "
|
418 |
+
"pyarrow.create_library_symlinks() as root")
|
419 |
+
|
420 |
+
|
421 |
+
def get_library_dirs():
|
422 |
+
"""
|
423 |
+
Return lists of directories likely to contain Arrow C++ libraries for
|
424 |
+
linking C or Cython extensions using pyarrow
|
425 |
+
"""
|
426 |
+
package_cwd = _os.path.dirname(__file__)
|
427 |
+
library_dirs = [package_cwd]
|
428 |
+
|
429 |
+
def append_library_dir(library_dir):
|
430 |
+
if library_dir not in library_dirs:
|
431 |
+
library_dirs.append(library_dir)
|
432 |
+
|
433 |
+
# Search library paths via pkg-config. This is necessary if the user
|
434 |
+
# installed libarrow and the other shared libraries manually and they
|
435 |
+
# are not shipped inside the pyarrow package (see also ARROW-2976).
|
436 |
+
pkg_config_executable = _os.environ.get('PKG_CONFIG') or 'pkg-config'
|
437 |
+
for pkgname in ["arrow", "arrow_python"]:
|
438 |
+
if _has_pkg_config(pkgname):
|
439 |
+
library_dir = _read_pkg_config_variable(pkgname,
|
440 |
+
["--libs-only-L"])
|
441 |
+
# pkg-config output could be empty if Arrow is installed
|
442 |
+
# as a system package.
|
443 |
+
if library_dir:
|
444 |
+
if not library_dir.startswith("-L"):
|
445 |
+
raise ValueError(
|
446 |
+
"pkg-config --libs-only-L returned unexpected "
|
447 |
+
"value {!r}".format(library_dir))
|
448 |
+
append_library_dir(library_dir[2:])
|
449 |
+
|
450 |
+
if _sys.platform == 'win32':
|
451 |
+
# TODO(wesm): Is this necessary, or does setuptools within a conda
|
452 |
+
# installation add Library\lib to the linker path for MSVC?
|
453 |
+
python_base_install = _os.path.dirname(_sys.executable)
|
454 |
+
library_dir = _os.path.join(python_base_install, 'Library', 'lib')
|
455 |
+
|
456 |
+
if _os.path.exists(_os.path.join(library_dir, 'arrow.lib')):
|
457 |
+
append_library_dir(library_dir)
|
458 |
+
|
459 |
+
# ARROW-4074: Allow for ARROW_HOME to be set to some other directory
|
460 |
+
if _os.environ.get('ARROW_HOME'):
|
461 |
+
append_library_dir(_os.path.join(_os.environ['ARROW_HOME'], 'lib'))
|
462 |
+
else:
|
463 |
+
# Python wheels bundle the Arrow libraries in the pyarrow directory.
|
464 |
+
append_library_dir(_os.path.dirname(_os.path.abspath(__file__)))
|
465 |
+
|
466 |
+
return library_dirs
|
env-llmeval/lib/python3.10/site-packages/pyarrow/_dataset_parquet_encryption.pyx
ADDED
@@ -0,0 +1,170 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
# cython: language_level = 3
|
19 |
+
|
20 |
+
"""Dataset support for Parquet encryption."""
|
21 |
+
|
22 |
+
from pyarrow.includes.libarrow_dataset_parquet cimport *
|
23 |
+
from pyarrow._parquet_encryption cimport *
|
24 |
+
from pyarrow._dataset_parquet cimport ParquetFragmentScanOptions, ParquetFileWriteOptions
|
25 |
+
|
26 |
+
|
27 |
+
cdef class ParquetEncryptionConfig(_Weakrefable):
|
28 |
+
"""
|
29 |
+
Core configuration class encapsulating parameters for high-level encryption
|
30 |
+
within the Parquet framework.
|
31 |
+
|
32 |
+
The ParquetEncryptionConfig class serves as a bridge for passing encryption-related
|
33 |
+
parameters to the appropriate components within the Parquet library. It maintains references
|
34 |
+
to objects that define the encryption strategy, Key Management Service (KMS) configuration,
|
35 |
+
and specific encryption configurations for Parquet data.
|
36 |
+
|
37 |
+
Parameters
|
38 |
+
----------
|
39 |
+
crypto_factory : pyarrow.parquet.encryption.CryptoFactory
|
40 |
+
Shared pointer to a `CryptoFactory` object. The `CryptoFactory` is responsible for
|
41 |
+
creating cryptographic components, such as encryptors and decryptors.
|
42 |
+
kms_connection_config : pyarrow.parquet.encryption.KmsConnectionConfig
|
43 |
+
Shared pointer to a `KmsConnectionConfig` object. This object holds the configuration
|
44 |
+
parameters necessary for connecting to a Key Management Service (KMS).
|
45 |
+
encryption_config : pyarrow.parquet.encryption.EncryptionConfiguration
|
46 |
+
Shared pointer to an `EncryptionConfiguration` object. This object defines specific
|
47 |
+
encryption settings for Parquet data, including the keys assigned to different columns.
|
48 |
+
|
49 |
+
Raises
|
50 |
+
------
|
51 |
+
ValueError
|
52 |
+
Raised if `encryption_config` is None.
|
53 |
+
"""
|
54 |
+
cdef:
|
55 |
+
shared_ptr[CParquetEncryptionConfig] c_config
|
56 |
+
|
57 |
+
# Avoid mistakenly creating attributes
|
58 |
+
__slots__ = ()
|
59 |
+
|
60 |
+
def __cinit__(self, CryptoFactory crypto_factory, KmsConnectionConfig kms_connection_config,
|
61 |
+
EncryptionConfiguration encryption_config):
|
62 |
+
|
63 |
+
cdef shared_ptr[CEncryptionConfiguration] c_encryption_config
|
64 |
+
|
65 |
+
if crypto_factory is None:
|
66 |
+
raise ValueError("crypto_factory cannot be None")
|
67 |
+
|
68 |
+
if kms_connection_config is None:
|
69 |
+
raise ValueError("kms_connection_config cannot be None")
|
70 |
+
|
71 |
+
if encryption_config is None:
|
72 |
+
raise ValueError("encryption_config cannot be None")
|
73 |
+
|
74 |
+
self.c_config.reset(new CParquetEncryptionConfig())
|
75 |
+
|
76 |
+
c_encryption_config = pyarrow_unwrap_encryptionconfig(
|
77 |
+
encryption_config)
|
78 |
+
|
79 |
+
self.c_config.get().crypto_factory = pyarrow_unwrap_cryptofactory(crypto_factory)
|
80 |
+
self.c_config.get().kms_connection_config = pyarrow_unwrap_kmsconnectionconfig(
|
81 |
+
kms_connection_config)
|
82 |
+
self.c_config.get().encryption_config = c_encryption_config
|
83 |
+
|
84 |
+
@staticmethod
|
85 |
+
cdef wrap(shared_ptr[CParquetEncryptionConfig] c_config):
|
86 |
+
cdef ParquetEncryptionConfig python_config = ParquetEncryptionConfig.__new__(ParquetEncryptionConfig)
|
87 |
+
python_config.c_config = c_config
|
88 |
+
return python_config
|
89 |
+
|
90 |
+
cdef shared_ptr[CParquetEncryptionConfig] unwrap(self):
|
91 |
+
return self.c_config
|
92 |
+
|
93 |
+
|
94 |
+
cdef class ParquetDecryptionConfig(_Weakrefable):
|
95 |
+
"""
|
96 |
+
Core configuration class encapsulating parameters for high-level decryption
|
97 |
+
within the Parquet framework.
|
98 |
+
|
99 |
+
ParquetDecryptionConfig is designed to pass decryption-related parameters to
|
100 |
+
the appropriate decryption components within the Parquet library. It holds references to
|
101 |
+
objects that define the decryption strategy, Key Management Service (KMS) configuration,
|
102 |
+
and specific decryption configurations for reading encrypted Parquet data.
|
103 |
+
|
104 |
+
Parameters
|
105 |
+
----------
|
106 |
+
crypto_factory : pyarrow.parquet.encryption.CryptoFactory
|
107 |
+
Shared pointer to a `CryptoFactory` object, pivotal in creating cryptographic
|
108 |
+
components for the decryption process.
|
109 |
+
kms_connection_config : pyarrow.parquet.encryption.KmsConnectionConfig
|
110 |
+
Shared pointer to a `KmsConnectionConfig` object, containing parameters necessary
|
111 |
+
for connecting to a Key Management Service (KMS) during decryption.
|
112 |
+
decryption_config : pyarrow.parquet.encryption.DecryptionConfiguration
|
113 |
+
Shared pointer to a `DecryptionConfiguration` object, specifying decryption settings
|
114 |
+
for reading encrypted Parquet data.
|
115 |
+
|
116 |
+
Raises
|
117 |
+
------
|
118 |
+
ValueError
|
119 |
+
Raised if `decryption_config` is None.
|
120 |
+
"""
|
121 |
+
|
122 |
+
cdef:
|
123 |
+
shared_ptr[CParquetDecryptionConfig] c_config
|
124 |
+
|
125 |
+
# Avoid mistakingly creating attributes
|
126 |
+
__slots__ = ()
|
127 |
+
|
128 |
+
def __cinit__(self, CryptoFactory crypto_factory, KmsConnectionConfig kms_connection_config,
|
129 |
+
DecryptionConfiguration decryption_config):
|
130 |
+
|
131 |
+
cdef shared_ptr[CDecryptionConfiguration] c_decryption_config
|
132 |
+
|
133 |
+
if decryption_config is None:
|
134 |
+
raise ValueError(
|
135 |
+
"decryption_config cannot be None")
|
136 |
+
|
137 |
+
self.c_config.reset(new CParquetDecryptionConfig())
|
138 |
+
|
139 |
+
c_decryption_config = pyarrow_unwrap_decryptionconfig(
|
140 |
+
decryption_config)
|
141 |
+
|
142 |
+
self.c_config.get().crypto_factory = pyarrow_unwrap_cryptofactory(crypto_factory)
|
143 |
+
self.c_config.get().kms_connection_config = pyarrow_unwrap_kmsconnectionconfig(
|
144 |
+
kms_connection_config)
|
145 |
+
self.c_config.get().decryption_config = c_decryption_config
|
146 |
+
|
147 |
+
@staticmethod
|
148 |
+
cdef wrap(shared_ptr[CParquetDecryptionConfig] c_config):
|
149 |
+
cdef ParquetDecryptionConfig python_config = ParquetDecryptionConfig.__new__(ParquetDecryptionConfig)
|
150 |
+
python_config.c_config = c_config
|
151 |
+
return python_config
|
152 |
+
|
153 |
+
cdef shared_ptr[CParquetDecryptionConfig] unwrap(self):
|
154 |
+
return self.c_config
|
155 |
+
|
156 |
+
|
157 |
+
def set_encryption_config(
|
158 |
+
ParquetFileWriteOptions opts not None,
|
159 |
+
ParquetEncryptionConfig config not None
|
160 |
+
):
|
161 |
+
cdef shared_ptr[CParquetEncryptionConfig] c_config = config.unwrap()
|
162 |
+
opts.parquet_options.parquet_encryption_config = c_config
|
163 |
+
|
164 |
+
|
165 |
+
def set_decryption_config(
|
166 |
+
ParquetFragmentScanOptions opts not None,
|
167 |
+
ParquetDecryptionConfig config not None
|
168 |
+
):
|
169 |
+
cdef shared_ptr[CParquetDecryptionConfig] c_config = config.unwrap()
|
170 |
+
opts.parquet_options.parquet_decryption_config = c_config
|
env-llmeval/lib/python3.10/site-packages/pyarrow/_fs.pyx
ADDED
@@ -0,0 +1,1631 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
# cython: language_level = 3
|
19 |
+
|
20 |
+
from cpython.datetime cimport datetime, PyDateTime_DateTime
|
21 |
+
from cython cimport binding
|
22 |
+
|
23 |
+
from pyarrow.includes.common cimport *
|
24 |
+
from pyarrow.includes.libarrow_python cimport PyDateTime_to_TimePoint
|
25 |
+
from pyarrow.lib import _detect_compression, frombytes, tobytes
|
26 |
+
from pyarrow.lib cimport *
|
27 |
+
from pyarrow.util import _stringify_path
|
28 |
+
|
29 |
+
from abc import ABC, abstractmethod
|
30 |
+
from datetime import datetime, timezone
|
31 |
+
import os
|
32 |
+
import pathlib
|
33 |
+
import sys
|
34 |
+
|
35 |
+
|
36 |
+
cdef _init_ca_paths():
|
37 |
+
cdef CFileSystemGlobalOptions options
|
38 |
+
|
39 |
+
import ssl
|
40 |
+
paths = ssl.get_default_verify_paths()
|
41 |
+
if paths.cafile:
|
42 |
+
options.tls_ca_file_path = os.fsencode(paths.cafile)
|
43 |
+
if paths.capath:
|
44 |
+
options.tls_ca_dir_path = os.fsencode(paths.capath)
|
45 |
+
check_status(CFileSystemsInitialize(options))
|
46 |
+
|
47 |
+
|
48 |
+
if sys.platform == 'linux':
|
49 |
+
# ARROW-9261: On Linux, we may need to fixup the paths to TLS CA certs
|
50 |
+
# (especially in manylinux packages) since the values hardcoded at
|
51 |
+
# compile-time in libcurl may be wrong.
|
52 |
+
_init_ca_paths()
|
53 |
+
|
54 |
+
|
55 |
+
cdef inline c_string _path_as_bytes(path) except *:
|
56 |
+
# handle only abstract paths, not bound to any filesystem like pathlib is,
|
57 |
+
# so we only accept plain strings
|
58 |
+
if not isinstance(path, (bytes, str)):
|
59 |
+
raise TypeError('Path must be a string')
|
60 |
+
# tobytes always uses utf-8, which is more or less ok, at least on Windows
|
61 |
+
# since the C++ side then decodes from utf-8. On Unix, os.fsencode may be
|
62 |
+
# better.
|
63 |
+
return tobytes(path)
|
64 |
+
|
65 |
+
|
66 |
+
cdef object _wrap_file_type(CFileType ty):
|
67 |
+
return FileType(<int8_t> ty)
|
68 |
+
|
69 |
+
|
70 |
+
cdef CFileType _unwrap_file_type(FileType ty) except *:
|
71 |
+
if ty == FileType.Unknown:
|
72 |
+
return CFileType_Unknown
|
73 |
+
elif ty == FileType.NotFound:
|
74 |
+
return CFileType_NotFound
|
75 |
+
elif ty == FileType.File:
|
76 |
+
return CFileType_File
|
77 |
+
elif ty == FileType.Directory:
|
78 |
+
return CFileType_Directory
|
79 |
+
assert 0
|
80 |
+
|
81 |
+
|
82 |
+
def _file_type_to_string(ty):
|
83 |
+
# Python 3.11 changed str(IntEnum) to return the string representation
|
84 |
+
# of the integer value: https://github.com/python/cpython/issues/94763
|
85 |
+
return f"{ty.__class__.__name__}.{ty._name_}"
|
86 |
+
|
87 |
+
|
88 |
+
cdef class FileInfo(_Weakrefable):
|
89 |
+
"""
|
90 |
+
FileSystem entry info.
|
91 |
+
|
92 |
+
Parameters
|
93 |
+
----------
|
94 |
+
path : str
|
95 |
+
The full path to the filesystem entry.
|
96 |
+
type : FileType
|
97 |
+
The type of the filesystem entry.
|
98 |
+
mtime : datetime or float, default None
|
99 |
+
If given, the modification time of the filesystem entry.
|
100 |
+
If a float is given, it is the number of seconds since the
|
101 |
+
Unix epoch.
|
102 |
+
mtime_ns : int, default None
|
103 |
+
If given, the modification time of the filesystem entry,
|
104 |
+
in nanoseconds since the Unix epoch.
|
105 |
+
`mtime` and `mtime_ns` are mutually exclusive.
|
106 |
+
size : int, default None
|
107 |
+
If given, the filesystem entry size in bytes. This should only
|
108 |
+
be given if `type` is `FileType.File`.
|
109 |
+
|
110 |
+
Examples
|
111 |
+
--------
|
112 |
+
Generate a file:
|
113 |
+
|
114 |
+
>>> from pyarrow import fs
|
115 |
+
>>> local = fs.LocalFileSystem()
|
116 |
+
>>> path_fs = local_path + '/pyarrow-fs-example.dat'
|
117 |
+
>>> with local.open_output_stream(path_fs) as stream:
|
118 |
+
... stream.write(b'data')
|
119 |
+
4
|
120 |
+
|
121 |
+
Get FileInfo object using ``get_file_info()``:
|
122 |
+
|
123 |
+
>>> file_info = local.get_file_info(path_fs)
|
124 |
+
>>> file_info
|
125 |
+
<FileInfo for '.../pyarrow-fs-example.dat': type=FileType.File, size=4>
|
126 |
+
|
127 |
+
Inspect FileInfo attributes:
|
128 |
+
|
129 |
+
>>> file_info.type
|
130 |
+
<FileType.File: 2>
|
131 |
+
|
132 |
+
>>> file_info.is_file
|
133 |
+
True
|
134 |
+
|
135 |
+
>>> file_info.path
|
136 |
+
'/.../pyarrow-fs-example.dat'
|
137 |
+
|
138 |
+
>>> file_info.base_name
|
139 |
+
'pyarrow-fs-example.dat'
|
140 |
+
|
141 |
+
>>> file_info.size
|
142 |
+
4
|
143 |
+
|
144 |
+
>>> file_info.extension
|
145 |
+
'dat'
|
146 |
+
|
147 |
+
>>> file_info.mtime # doctest: +SKIP
|
148 |
+
datetime.datetime(2022, 6, 29, 7, 56, 10, 873922, tzinfo=datetime.timezone.utc)
|
149 |
+
|
150 |
+
>>> file_info.mtime_ns # doctest: +SKIP
|
151 |
+
1656489370873922073
|
152 |
+
"""
|
153 |
+
|
154 |
+
def __init__(self, path, FileType type=FileType.Unknown, *,
|
155 |
+
mtime=None, mtime_ns=None, size=None):
|
156 |
+
self.info.set_path(tobytes(path))
|
157 |
+
self.info.set_type(_unwrap_file_type(type))
|
158 |
+
if mtime is not None:
|
159 |
+
if mtime_ns is not None:
|
160 |
+
raise TypeError("Only one of mtime and mtime_ns "
|
161 |
+
"can be given")
|
162 |
+
if isinstance(mtime, datetime):
|
163 |
+
self.info.set_mtime(PyDateTime_to_TimePoint(
|
164 |
+
<PyDateTime_DateTime*> mtime))
|
165 |
+
else:
|
166 |
+
self.info.set_mtime(TimePoint_from_s(mtime))
|
167 |
+
elif mtime_ns is not None:
|
168 |
+
self.info.set_mtime(TimePoint_from_ns(mtime_ns))
|
169 |
+
if size is not None:
|
170 |
+
self.info.set_size(size)
|
171 |
+
|
172 |
+
@staticmethod
|
173 |
+
cdef wrap(CFileInfo info):
|
174 |
+
cdef FileInfo self = FileInfo.__new__(FileInfo)
|
175 |
+
self.info = move(info)
|
176 |
+
return self
|
177 |
+
|
178 |
+
cdef inline CFileInfo unwrap(self) nogil:
|
179 |
+
return self.info
|
180 |
+
|
181 |
+
@staticmethod
|
182 |
+
cdef CFileInfo unwrap_safe(obj):
|
183 |
+
if not isinstance(obj, FileInfo):
|
184 |
+
raise TypeError("Expected FileInfo instance, got {0}"
|
185 |
+
.format(type(obj)))
|
186 |
+
return (<FileInfo> obj).unwrap()
|
187 |
+
|
188 |
+
def __repr__(self):
|
189 |
+
def getvalue(attr):
|
190 |
+
try:
|
191 |
+
return getattr(self, attr)
|
192 |
+
except ValueError:
|
193 |
+
return ''
|
194 |
+
|
195 |
+
s = (f'<FileInfo for {self.path!r}: '
|
196 |
+
f'type={_file_type_to_string(self.type)}')
|
197 |
+
if self.is_file:
|
198 |
+
s += f', size={self.size}'
|
199 |
+
s += '>'
|
200 |
+
return s
|
201 |
+
|
202 |
+
@property
|
203 |
+
def type(self):
|
204 |
+
"""
|
205 |
+
Type of the file.
|
206 |
+
|
207 |
+
The returned enum values can be the following:
|
208 |
+
|
209 |
+
- FileType.NotFound: target does not exist
|
210 |
+
- FileType.Unknown: target exists but its type is unknown (could be a
|
211 |
+
special file such as a Unix socket or character device, or
|
212 |
+
Windows NUL / CON / ...)
|
213 |
+
- FileType.File: target is a regular file
|
214 |
+
- FileType.Directory: target is a regular directory
|
215 |
+
|
216 |
+
Returns
|
217 |
+
-------
|
218 |
+
type : FileType
|
219 |
+
"""
|
220 |
+
return _wrap_file_type(self.info.type())
|
221 |
+
|
222 |
+
@property
|
223 |
+
def is_file(self):
|
224 |
+
"""
|
225 |
+
"""
|
226 |
+
return self.type == FileType.File
|
227 |
+
|
228 |
+
@property
|
229 |
+
def path(self):
|
230 |
+
"""
|
231 |
+
The full file path in the filesystem.
|
232 |
+
|
233 |
+
Examples
|
234 |
+
--------
|
235 |
+
>>> file_info = local.get_file_info(path)
|
236 |
+
>>> file_info.path
|
237 |
+
'/.../pyarrow-fs-example.dat'
|
238 |
+
"""
|
239 |
+
return frombytes(self.info.path())
|
240 |
+
|
241 |
+
@property
|
242 |
+
def base_name(self):
|
243 |
+
"""
|
244 |
+
The file base name.
|
245 |
+
|
246 |
+
Component after the last directory separator.
|
247 |
+
|
248 |
+
Examples
|
249 |
+
--------
|
250 |
+
>>> file_info = local.get_file_info(path)
|
251 |
+
>>> file_info.base_name
|
252 |
+
'pyarrow-fs-example.dat'
|
253 |
+
"""
|
254 |
+
return frombytes(self.info.base_name())
|
255 |
+
|
256 |
+
@property
|
257 |
+
def size(self):
|
258 |
+
"""
|
259 |
+
The size in bytes, if available.
|
260 |
+
|
261 |
+
Only regular files are guaranteed to have a size.
|
262 |
+
|
263 |
+
Returns
|
264 |
+
-------
|
265 |
+
size : int or None
|
266 |
+
"""
|
267 |
+
cdef int64_t size
|
268 |
+
size = self.info.size()
|
269 |
+
return (size if size != -1 else None)
|
270 |
+
|
271 |
+
@property
|
272 |
+
def extension(self):
|
273 |
+
"""
|
274 |
+
The file extension.
|
275 |
+
|
276 |
+
Examples
|
277 |
+
--------
|
278 |
+
>>> file_info = local.get_file_info(path)
|
279 |
+
>>> file_info.extension
|
280 |
+
'dat'
|
281 |
+
"""
|
282 |
+
return frombytes(self.info.extension())
|
283 |
+
|
284 |
+
@property
|
285 |
+
def mtime(self):
|
286 |
+
"""
|
287 |
+
The time of last modification, if available.
|
288 |
+
|
289 |
+
Returns
|
290 |
+
-------
|
291 |
+
mtime : datetime.datetime or None
|
292 |
+
|
293 |
+
Examples
|
294 |
+
--------
|
295 |
+
>>> file_info = local.get_file_info(path)
|
296 |
+
>>> file_info.mtime # doctest: +SKIP
|
297 |
+
datetime.datetime(2022, 6, 29, 7, 56, 10, 873922, tzinfo=datetime.timezone.utc)
|
298 |
+
"""
|
299 |
+
cdef int64_t nanoseconds
|
300 |
+
nanoseconds = TimePoint_to_ns(self.info.mtime())
|
301 |
+
return (datetime.fromtimestamp(nanoseconds / 1.0e9, timezone.utc)
|
302 |
+
if nanoseconds != -1 else None)
|
303 |
+
|
304 |
+
@property
|
305 |
+
def mtime_ns(self):
|
306 |
+
"""
|
307 |
+
The time of last modification, if available, expressed in nanoseconds
|
308 |
+
since the Unix epoch.
|
309 |
+
|
310 |
+
Returns
|
311 |
+
-------
|
312 |
+
mtime_ns : int or None
|
313 |
+
|
314 |
+
Examples
|
315 |
+
--------
|
316 |
+
>>> file_info = local.get_file_info(path)
|
317 |
+
>>> file_info.mtime_ns # doctest: +SKIP
|
318 |
+
1656489370873922073
|
319 |
+
"""
|
320 |
+
cdef int64_t nanoseconds
|
321 |
+
nanoseconds = TimePoint_to_ns(self.info.mtime())
|
322 |
+
return (nanoseconds if nanoseconds != -1 else None)
|
323 |
+
|
324 |
+
|
325 |
+
cdef class FileSelector(_Weakrefable):
|
326 |
+
"""
|
327 |
+
File and directory selector.
|
328 |
+
|
329 |
+
It contains a set of options that describes how to search for files and
|
330 |
+
directories.
|
331 |
+
|
332 |
+
Parameters
|
333 |
+
----------
|
334 |
+
base_dir : str
|
335 |
+
The directory in which to select files. Relative paths also work, use
|
336 |
+
'.' for the current directory and '..' for the parent.
|
337 |
+
allow_not_found : bool, default False
|
338 |
+
The behavior if `base_dir` doesn't exist in the filesystem.
|
339 |
+
If false, an error is returned.
|
340 |
+
If true, an empty selection is returned.
|
341 |
+
recursive : bool, default False
|
342 |
+
Whether to recurse into subdirectories.
|
343 |
+
|
344 |
+
Examples
|
345 |
+
--------
|
346 |
+
List the contents of a directory and subdirectories:
|
347 |
+
|
348 |
+
>>> selector_1 = fs.FileSelector(local_path, recursive=True)
|
349 |
+
>>> local.get_file_info(selector_1) # doctest: +SKIP
|
350 |
+
[<FileInfo for 'tmp/alphabet/example.dat': type=FileType.File, size=4>,
|
351 |
+
<FileInfo for 'tmp/alphabet/subdir': type=FileType.Directory>,
|
352 |
+
<FileInfo for 'tmp/alphabet/subdir/example_copy.dat': type=FileType.File, size=4>]
|
353 |
+
|
354 |
+
List only the contents of the base directory:
|
355 |
+
|
356 |
+
>>> selector_2 = fs.FileSelector(local_path)
|
357 |
+
>>> local.get_file_info(selector_2) # doctest: +SKIP
|
358 |
+
[<FileInfo for 'tmp/alphabet/example.dat': type=FileType.File, size=4>,
|
359 |
+
<FileInfo for 'tmp/alphabet/subdir': type=FileType.Directory>]
|
360 |
+
|
361 |
+
Return empty selection if the directory doesn't exist:
|
362 |
+
|
363 |
+
>>> selector_not_found = fs.FileSelector(local_path + '/missing',
|
364 |
+
... recursive=True,
|
365 |
+
... allow_not_found=True)
|
366 |
+
>>> local.get_file_info(selector_not_found)
|
367 |
+
[]
|
368 |
+
"""
|
369 |
+
|
370 |
+
def __init__(self, base_dir, bint allow_not_found=False,
|
371 |
+
bint recursive=False):
|
372 |
+
self.base_dir = base_dir
|
373 |
+
self.recursive = recursive
|
374 |
+
self.allow_not_found = allow_not_found
|
375 |
+
|
376 |
+
@staticmethod
|
377 |
+
cdef FileSelector wrap(CFileSelector wrapped):
|
378 |
+
cdef FileSelector self = FileSelector.__new__(FileSelector)
|
379 |
+
self.selector = move(wrapped)
|
380 |
+
return self
|
381 |
+
|
382 |
+
cdef inline CFileSelector unwrap(self) nogil:
|
383 |
+
return self.selector
|
384 |
+
|
385 |
+
@property
|
386 |
+
def base_dir(self):
|
387 |
+
return frombytes(self.selector.base_dir)
|
388 |
+
|
389 |
+
@base_dir.setter
|
390 |
+
def base_dir(self, base_dir):
|
391 |
+
self.selector.base_dir = _path_as_bytes(base_dir)
|
392 |
+
|
393 |
+
@property
|
394 |
+
def allow_not_found(self):
|
395 |
+
return self.selector.allow_not_found
|
396 |
+
|
397 |
+
@allow_not_found.setter
|
398 |
+
def allow_not_found(self, bint allow_not_found):
|
399 |
+
self.selector.allow_not_found = allow_not_found
|
400 |
+
|
401 |
+
@property
|
402 |
+
def recursive(self):
|
403 |
+
return self.selector.recursive
|
404 |
+
|
405 |
+
@recursive.setter
|
406 |
+
def recursive(self, bint recursive):
|
407 |
+
self.selector.recursive = recursive
|
408 |
+
|
409 |
+
def __repr__(self):
|
410 |
+
return ("<FileSelector base_dir={0.base_dir!r} "
|
411 |
+
"recursive={0.recursive}>".format(self))
|
412 |
+
|
413 |
+
|
414 |
+
cdef class FileSystem(_Weakrefable):
|
415 |
+
"""
|
416 |
+
Abstract file system API.
|
417 |
+
"""
|
418 |
+
|
419 |
+
def __init__(self):
|
420 |
+
raise TypeError("FileSystem is an abstract class, instantiate one of "
|
421 |
+
"the subclasses instead: LocalFileSystem or "
|
422 |
+
"SubTreeFileSystem")
|
423 |
+
|
424 |
+
@staticmethod
|
425 |
+
def from_uri(uri):
|
426 |
+
"""
|
427 |
+
Create a new FileSystem from URI or Path.
|
428 |
+
|
429 |
+
Recognized URI schemes are "file", "mock", "s3fs", "gs", "gcs", "hdfs" and "viewfs".
|
430 |
+
In addition, the argument can be a pathlib.Path object, or a string
|
431 |
+
describing an absolute local path.
|
432 |
+
|
433 |
+
Parameters
|
434 |
+
----------
|
435 |
+
uri : string
|
436 |
+
URI-based path, for example: file:///some/local/path.
|
437 |
+
|
438 |
+
Returns
|
439 |
+
-------
|
440 |
+
tuple of (FileSystem, str path)
|
441 |
+
With (filesystem, path) tuple where path is the abstract path
|
442 |
+
inside the FileSystem instance.
|
443 |
+
|
444 |
+
Examples
|
445 |
+
--------
|
446 |
+
Create a new FileSystem subclass from a URI:
|
447 |
+
|
448 |
+
>>> uri = 'file:///{}/pyarrow-fs-example.dat'.format(local_path)
|
449 |
+
>>> local_new, path_new = fs.FileSystem.from_uri(uri)
|
450 |
+
>>> local_new
|
451 |
+
<pyarrow._fs.LocalFileSystem object at ...
|
452 |
+
>>> path_new
|
453 |
+
'/.../pyarrow-fs-example.dat'
|
454 |
+
|
455 |
+
Or from a s3 bucket:
|
456 |
+
|
457 |
+
>>> fs.FileSystem.from_uri("s3://usgs-landsat/collection02/")
|
458 |
+
(<pyarrow._s3fs.S3FileSystem object at ...>, 'usgs-landsat/collection02')
|
459 |
+
"""
|
460 |
+
cdef:
|
461 |
+
c_string c_path
|
462 |
+
c_string c_uri
|
463 |
+
CResult[shared_ptr[CFileSystem]] result
|
464 |
+
|
465 |
+
if isinstance(uri, pathlib.Path):
|
466 |
+
# Make absolute
|
467 |
+
uri = uri.resolve().absolute()
|
468 |
+
c_uri = tobytes(_stringify_path(uri))
|
469 |
+
with nogil:
|
470 |
+
result = CFileSystemFromUriOrPath(c_uri, &c_path)
|
471 |
+
return FileSystem.wrap(GetResultValue(result)), frombytes(c_path)
|
472 |
+
|
473 |
+
cdef init(self, const shared_ptr[CFileSystem]& wrapped):
|
474 |
+
self.wrapped = wrapped
|
475 |
+
self.fs = wrapped.get()
|
476 |
+
|
477 |
+
@staticmethod
|
478 |
+
cdef wrap(const shared_ptr[CFileSystem]& sp):
|
479 |
+
cdef FileSystem self
|
480 |
+
|
481 |
+
typ = frombytes(sp.get().type_name())
|
482 |
+
if typ == 'local':
|
483 |
+
self = LocalFileSystem.__new__(LocalFileSystem)
|
484 |
+
elif typ == 'mock':
|
485 |
+
self = _MockFileSystem.__new__(_MockFileSystem)
|
486 |
+
elif typ == 'subtree':
|
487 |
+
self = SubTreeFileSystem.__new__(SubTreeFileSystem)
|
488 |
+
elif typ == 's3':
|
489 |
+
from pyarrow._s3fs import S3FileSystem
|
490 |
+
self = S3FileSystem.__new__(S3FileSystem)
|
491 |
+
elif typ == 'gcs':
|
492 |
+
from pyarrow._gcsfs import GcsFileSystem
|
493 |
+
self = GcsFileSystem.__new__(GcsFileSystem)
|
494 |
+
elif typ == 'hdfs':
|
495 |
+
from pyarrow._hdfs import HadoopFileSystem
|
496 |
+
self = HadoopFileSystem.__new__(HadoopFileSystem)
|
497 |
+
elif typ.startswith('py::'):
|
498 |
+
self = PyFileSystem.__new__(PyFileSystem)
|
499 |
+
else:
|
500 |
+
raise TypeError('Cannot wrap FileSystem pointer')
|
501 |
+
|
502 |
+
self.init(sp)
|
503 |
+
return self
|
504 |
+
|
505 |
+
cdef inline shared_ptr[CFileSystem] unwrap(self) nogil:
|
506 |
+
return self.wrapped
|
507 |
+
|
508 |
+
def equals(self, FileSystem other not None):
|
509 |
+
"""
|
510 |
+
Parameters
|
511 |
+
----------
|
512 |
+
other : pyarrow.fs.FileSystem
|
513 |
+
|
514 |
+
Returns
|
515 |
+
-------
|
516 |
+
bool
|
517 |
+
"""
|
518 |
+
return self.fs.Equals(other.unwrap())
|
519 |
+
|
520 |
+
def __eq__(self, other):
|
521 |
+
try:
|
522 |
+
return self.equals(other)
|
523 |
+
except TypeError:
|
524 |
+
return NotImplemented
|
525 |
+
|
526 |
+
@property
|
527 |
+
def type_name(self):
|
528 |
+
"""
|
529 |
+
The filesystem's type name.
|
530 |
+
"""
|
531 |
+
return frombytes(self.fs.type_name())
|
532 |
+
|
533 |
+
def get_file_info(self, paths_or_selector):
|
534 |
+
"""
|
535 |
+
Get info for the given files.
|
536 |
+
|
537 |
+
Any symlink is automatically dereferenced, recursively. A non-existing
|
538 |
+
or unreachable file returns a FileStat object and has a FileType of
|
539 |
+
value NotFound. An exception indicates a truly exceptional condition
|
540 |
+
(low-level I/O error, etc.).
|
541 |
+
|
542 |
+
Parameters
|
543 |
+
----------
|
544 |
+
paths_or_selector : FileSelector, path-like or list of path-likes
|
545 |
+
Either a selector object, a path-like object or a list of
|
546 |
+
path-like objects. The selector's base directory will not be
|
547 |
+
part of the results, even if it exists. If it doesn't exist,
|
548 |
+
use `allow_not_found`.
|
549 |
+
|
550 |
+
Returns
|
551 |
+
-------
|
552 |
+
FileInfo or list of FileInfo
|
553 |
+
Single FileInfo object is returned for a single path, otherwise
|
554 |
+
a list of FileInfo objects is returned.
|
555 |
+
|
556 |
+
Examples
|
557 |
+
--------
|
558 |
+
>>> local
|
559 |
+
<pyarrow._fs.LocalFileSystem object at ...>
|
560 |
+
>>> local.get_file_info("/{}/pyarrow-fs-example.dat".format(local_path))
|
561 |
+
<FileInfo for '/.../pyarrow-fs-example.dat': type=FileType.File, size=4>
|
562 |
+
"""
|
563 |
+
cdef:
|
564 |
+
CFileInfo info
|
565 |
+
c_string path
|
566 |
+
vector[CFileInfo] infos
|
567 |
+
vector[c_string] paths
|
568 |
+
CFileSelector selector
|
569 |
+
|
570 |
+
if isinstance(paths_or_selector, FileSelector):
|
571 |
+
with nogil:
|
572 |
+
selector = (<FileSelector>paths_or_selector).selector
|
573 |
+
infos = GetResultValue(self.fs.GetFileInfo(selector))
|
574 |
+
elif isinstance(paths_or_selector, (list, tuple)):
|
575 |
+
paths = [_path_as_bytes(s) for s in paths_or_selector]
|
576 |
+
with nogil:
|
577 |
+
infos = GetResultValue(self.fs.GetFileInfo(paths))
|
578 |
+
elif isinstance(paths_or_selector, (bytes, str)):
|
579 |
+
path =_path_as_bytes(paths_or_selector)
|
580 |
+
with nogil:
|
581 |
+
info = GetResultValue(self.fs.GetFileInfo(path))
|
582 |
+
return FileInfo.wrap(info)
|
583 |
+
else:
|
584 |
+
raise TypeError('Must pass either path(s) or a FileSelector')
|
585 |
+
|
586 |
+
return [FileInfo.wrap(info) for info in infos]
|
587 |
+
|
588 |
+
def create_dir(self, path, *, bint recursive=True):
|
589 |
+
"""
|
590 |
+
Create a directory and subdirectories.
|
591 |
+
|
592 |
+
This function succeeds if the directory already exists.
|
593 |
+
|
594 |
+
Parameters
|
595 |
+
----------
|
596 |
+
path : str
|
597 |
+
The path of the new directory.
|
598 |
+
recursive : bool, default True
|
599 |
+
Create nested directories as well.
|
600 |
+
"""
|
601 |
+
cdef c_string directory = _path_as_bytes(path)
|
602 |
+
with nogil:
|
603 |
+
check_status(self.fs.CreateDir(directory, recursive=recursive))
|
604 |
+
|
605 |
+
def delete_dir(self, path):
|
606 |
+
"""
|
607 |
+
Delete a directory and its contents, recursively.
|
608 |
+
|
609 |
+
Parameters
|
610 |
+
----------
|
611 |
+
path : str
|
612 |
+
The path of the directory to be deleted.
|
613 |
+
"""
|
614 |
+
cdef c_string directory = _path_as_bytes(path)
|
615 |
+
with nogil:
|
616 |
+
check_status(self.fs.DeleteDir(directory))
|
617 |
+
|
618 |
+
def delete_dir_contents(self, path, *,
|
619 |
+
bint accept_root_dir=False,
|
620 |
+
bint missing_dir_ok=False):
|
621 |
+
"""
|
622 |
+
Delete a directory's contents, recursively.
|
623 |
+
|
624 |
+
Like delete_dir, but doesn't delete the directory itself.
|
625 |
+
|
626 |
+
Parameters
|
627 |
+
----------
|
628 |
+
path : str
|
629 |
+
The path of the directory to be deleted.
|
630 |
+
accept_root_dir : boolean, default False
|
631 |
+
Allow deleting the root directory's contents
|
632 |
+
(if path is empty or "/")
|
633 |
+
missing_dir_ok : boolean, default False
|
634 |
+
If False then an error is raised if path does
|
635 |
+
not exist
|
636 |
+
"""
|
637 |
+
cdef c_string directory = _path_as_bytes(path)
|
638 |
+
if accept_root_dir and directory.strip(b"/") == b"":
|
639 |
+
with nogil:
|
640 |
+
check_status(self.fs.DeleteRootDirContents())
|
641 |
+
else:
|
642 |
+
with nogil:
|
643 |
+
check_status(self.fs.DeleteDirContents(directory,
|
644 |
+
missing_dir_ok))
|
645 |
+
|
646 |
+
def move(self, src, dest):
|
647 |
+
"""
|
648 |
+
Move / rename a file or directory.
|
649 |
+
|
650 |
+
If the destination exists:
|
651 |
+
- if it is a non-empty directory, an error is returned
|
652 |
+
- otherwise, if it has the same type as the source, it is replaced
|
653 |
+
- otherwise, behavior is unspecified (implementation-dependent).
|
654 |
+
|
655 |
+
Parameters
|
656 |
+
----------
|
657 |
+
src : str
|
658 |
+
The path of the file or the directory to be moved.
|
659 |
+
dest : str
|
660 |
+
The destination path where the file or directory is moved to.
|
661 |
+
|
662 |
+
Examples
|
663 |
+
--------
|
664 |
+
Create a new folder with a file:
|
665 |
+
|
666 |
+
>>> local.create_dir('/tmp/other_dir')
|
667 |
+
>>> local.copy_file(path,'/tmp/move_example.dat')
|
668 |
+
|
669 |
+
Move the file:
|
670 |
+
|
671 |
+
>>> local.move('/tmp/move_example.dat',
|
672 |
+
... '/tmp/other_dir/move_example_2.dat')
|
673 |
+
|
674 |
+
Inspect the file info:
|
675 |
+
|
676 |
+
>>> local.get_file_info('/tmp/other_dir/move_example_2.dat')
|
677 |
+
<FileInfo for '/tmp/other_dir/move_example_2.dat': type=FileType.File, size=4>
|
678 |
+
>>> local.get_file_info('/tmp/move_example.dat')
|
679 |
+
<FileInfo for '/tmp/move_example.dat': type=FileType.NotFound>
|
680 |
+
|
681 |
+
Delete the folder:
|
682 |
+
>>> local.delete_dir('/tmp/other_dir')
|
683 |
+
"""
|
684 |
+
cdef:
|
685 |
+
c_string source = _path_as_bytes(src)
|
686 |
+
c_string destination = _path_as_bytes(dest)
|
687 |
+
with nogil:
|
688 |
+
check_status(self.fs.Move(source, destination))
|
689 |
+
|
690 |
+
def copy_file(self, src, dest):
|
691 |
+
"""
|
692 |
+
Copy a file.
|
693 |
+
|
694 |
+
If the destination exists and is a directory, an error is returned.
|
695 |
+
Otherwise, it is replaced.
|
696 |
+
|
697 |
+
Parameters
|
698 |
+
----------
|
699 |
+
src : str
|
700 |
+
The path of the file to be copied from.
|
701 |
+
dest : str
|
702 |
+
The destination path where the file is copied to.
|
703 |
+
|
704 |
+
Examples
|
705 |
+
--------
|
706 |
+
>>> local.copy_file(path,
|
707 |
+
... local_path + '/pyarrow-fs-example_copy.dat')
|
708 |
+
|
709 |
+
Inspect the file info:
|
710 |
+
|
711 |
+
>>> local.get_file_info(local_path + '/pyarrow-fs-example_copy.dat')
|
712 |
+
<FileInfo for '/.../pyarrow-fs-example_copy.dat': type=FileType.File, size=4>
|
713 |
+
>>> local.get_file_info(path)
|
714 |
+
<FileInfo for '/.../pyarrow-fs-example.dat': type=FileType.File, size=4>
|
715 |
+
"""
|
716 |
+
cdef:
|
717 |
+
c_string source = _path_as_bytes(src)
|
718 |
+
c_string destination = _path_as_bytes(dest)
|
719 |
+
with nogil:
|
720 |
+
check_status(self.fs.CopyFile(source, destination))
|
721 |
+
|
722 |
+
def delete_file(self, path):
|
723 |
+
"""
|
724 |
+
Delete a file.
|
725 |
+
|
726 |
+
Parameters
|
727 |
+
----------
|
728 |
+
path : str
|
729 |
+
The path of the file to be deleted.
|
730 |
+
"""
|
731 |
+
cdef c_string file = _path_as_bytes(path)
|
732 |
+
with nogil:
|
733 |
+
check_status(self.fs.DeleteFile(file))
|
734 |
+
|
735 |
+
def _wrap_input_stream(self, stream, path, compression, buffer_size):
|
736 |
+
if buffer_size is not None and buffer_size != 0:
|
737 |
+
stream = BufferedInputStream(stream, buffer_size)
|
738 |
+
if compression == 'detect':
|
739 |
+
compression = _detect_compression(path)
|
740 |
+
if compression is not None:
|
741 |
+
stream = CompressedInputStream(stream, compression)
|
742 |
+
return stream
|
743 |
+
|
744 |
+
def _wrap_output_stream(self, stream, path, compression, buffer_size):
|
745 |
+
if buffer_size is not None and buffer_size != 0:
|
746 |
+
stream = BufferedOutputStream(stream, buffer_size)
|
747 |
+
if compression == 'detect':
|
748 |
+
compression = _detect_compression(path)
|
749 |
+
if compression is not None:
|
750 |
+
stream = CompressedOutputStream(stream, compression)
|
751 |
+
return stream
|
752 |
+
|
753 |
+
def open_input_file(self, path):
|
754 |
+
"""
|
755 |
+
Open an input file for random access reading.
|
756 |
+
|
757 |
+
Parameters
|
758 |
+
----------
|
759 |
+
path : str
|
760 |
+
The source to open for reading.
|
761 |
+
|
762 |
+
Returns
|
763 |
+
-------
|
764 |
+
stream : NativeFile
|
765 |
+
|
766 |
+
Examples
|
767 |
+
--------
|
768 |
+
Print the data from the file with `open_input_file()`:
|
769 |
+
|
770 |
+
>>> with local.open_input_file(path) as f:
|
771 |
+
... print(f.readall())
|
772 |
+
b'data'
|
773 |
+
"""
|
774 |
+
cdef:
|
775 |
+
c_string pathstr = _path_as_bytes(path)
|
776 |
+
NativeFile stream = NativeFile()
|
777 |
+
shared_ptr[CRandomAccessFile] in_handle
|
778 |
+
|
779 |
+
with nogil:
|
780 |
+
in_handle = GetResultValue(self.fs.OpenInputFile(pathstr))
|
781 |
+
|
782 |
+
stream.set_random_access_file(in_handle)
|
783 |
+
stream.is_readable = True
|
784 |
+
return stream
|
785 |
+
|
786 |
+
def open_input_stream(self, path, compression='detect', buffer_size=None):
|
787 |
+
"""
|
788 |
+
Open an input stream for sequential reading.
|
789 |
+
|
790 |
+
Parameters
|
791 |
+
----------
|
792 |
+
path : str
|
793 |
+
The source to open for reading.
|
794 |
+
compression : str optional, default 'detect'
|
795 |
+
The compression algorithm to use for on-the-fly decompression.
|
796 |
+
If "detect" and source is a file path, then compression will be
|
797 |
+
chosen based on the file extension.
|
798 |
+
If None, no compression will be applied. Otherwise, a well-known
|
799 |
+
algorithm name must be supplied (e.g. "gzip").
|
800 |
+
buffer_size : int optional, default None
|
801 |
+
If None or 0, no buffering will happen. Otherwise the size of the
|
802 |
+
temporary read buffer.
|
803 |
+
|
804 |
+
Returns
|
805 |
+
-------
|
806 |
+
stream : NativeFile
|
807 |
+
|
808 |
+
Examples
|
809 |
+
--------
|
810 |
+
Print the data from the file with `open_input_stream()`:
|
811 |
+
|
812 |
+
>>> with local.open_input_stream(path) as f:
|
813 |
+
... print(f.readall())
|
814 |
+
b'data'
|
815 |
+
"""
|
816 |
+
cdef:
|
817 |
+
c_string pathstr = _path_as_bytes(path)
|
818 |
+
NativeFile stream = NativeFile()
|
819 |
+
shared_ptr[CInputStream] in_handle
|
820 |
+
|
821 |
+
with nogil:
|
822 |
+
in_handle = GetResultValue(self.fs.OpenInputStream(pathstr))
|
823 |
+
|
824 |
+
stream.set_input_stream(in_handle)
|
825 |
+
stream.is_readable = True
|
826 |
+
|
827 |
+
return self._wrap_input_stream(
|
828 |
+
stream, path=path, compression=compression, buffer_size=buffer_size
|
829 |
+
)
|
830 |
+
|
831 |
+
def open_output_stream(self, path, compression='detect',
|
832 |
+
buffer_size=None, metadata=None):
|
833 |
+
"""
|
834 |
+
Open an output stream for sequential writing.
|
835 |
+
|
836 |
+
If the target already exists, existing data is truncated.
|
837 |
+
|
838 |
+
Parameters
|
839 |
+
----------
|
840 |
+
path : str
|
841 |
+
The source to open for writing.
|
842 |
+
compression : str optional, default 'detect'
|
843 |
+
The compression algorithm to use for on-the-fly compression.
|
844 |
+
If "detect" and source is a file path, then compression will be
|
845 |
+
chosen based on the file extension.
|
846 |
+
If None, no compression will be applied. Otherwise, a well-known
|
847 |
+
algorithm name must be supplied (e.g. "gzip").
|
848 |
+
buffer_size : int optional, default None
|
849 |
+
If None or 0, no buffering will happen. Otherwise the size of the
|
850 |
+
temporary write buffer.
|
851 |
+
metadata : dict optional, default None
|
852 |
+
If not None, a mapping of string keys to string values.
|
853 |
+
Some filesystems support storing metadata along the file
|
854 |
+
(such as "Content-Type").
|
855 |
+
Unsupported metadata keys will be ignored.
|
856 |
+
|
857 |
+
Returns
|
858 |
+
-------
|
859 |
+
stream : NativeFile
|
860 |
+
|
861 |
+
Examples
|
862 |
+
--------
|
863 |
+
>>> local = fs.LocalFileSystem()
|
864 |
+
>>> with local.open_output_stream(path) as stream:
|
865 |
+
... stream.write(b'data')
|
866 |
+
4
|
867 |
+
"""
|
868 |
+
cdef:
|
869 |
+
c_string pathstr = _path_as_bytes(path)
|
870 |
+
NativeFile stream = NativeFile()
|
871 |
+
shared_ptr[COutputStream] out_handle
|
872 |
+
shared_ptr[const CKeyValueMetadata] c_metadata
|
873 |
+
|
874 |
+
if metadata is not None:
|
875 |
+
c_metadata = pyarrow_unwrap_metadata(KeyValueMetadata(metadata))
|
876 |
+
|
877 |
+
with nogil:
|
878 |
+
out_handle = GetResultValue(
|
879 |
+
self.fs.OpenOutputStream(pathstr, c_metadata))
|
880 |
+
|
881 |
+
stream.set_output_stream(out_handle)
|
882 |
+
stream.is_writable = True
|
883 |
+
|
884 |
+
return self._wrap_output_stream(
|
885 |
+
stream, path=path, compression=compression, buffer_size=buffer_size
|
886 |
+
)
|
887 |
+
|
888 |
+
def open_append_stream(self, path, compression='detect',
|
889 |
+
buffer_size=None, metadata=None):
|
890 |
+
"""
|
891 |
+
Open an output stream for appending.
|
892 |
+
|
893 |
+
If the target doesn't exist, a new empty file is created.
|
894 |
+
|
895 |
+
.. note::
|
896 |
+
Some filesystem implementations do not support efficient
|
897 |
+
appending to an existing file, in which case this method will
|
898 |
+
raise NotImplementedError.
|
899 |
+
Consider writing to multiple files (using e.g. the dataset layer)
|
900 |
+
instead.
|
901 |
+
|
902 |
+
Parameters
|
903 |
+
----------
|
904 |
+
path : str
|
905 |
+
The source to open for writing.
|
906 |
+
compression : str optional, default 'detect'
|
907 |
+
The compression algorithm to use for on-the-fly compression.
|
908 |
+
If "detect" and source is a file path, then compression will be
|
909 |
+
chosen based on the file extension.
|
910 |
+
If None, no compression will be applied. Otherwise, a well-known
|
911 |
+
algorithm name must be supplied (e.g. "gzip").
|
912 |
+
buffer_size : int optional, default None
|
913 |
+
If None or 0, no buffering will happen. Otherwise the size of the
|
914 |
+
temporary write buffer.
|
915 |
+
metadata : dict optional, default None
|
916 |
+
If not None, a mapping of string keys to string values.
|
917 |
+
Some filesystems support storing metadata along the file
|
918 |
+
(such as "Content-Type").
|
919 |
+
Unsupported metadata keys will be ignored.
|
920 |
+
|
921 |
+
Returns
|
922 |
+
-------
|
923 |
+
stream : NativeFile
|
924 |
+
|
925 |
+
Examples
|
926 |
+
--------
|
927 |
+
Append new data to a FileSystem subclass with nonempty file:
|
928 |
+
|
929 |
+
>>> with local.open_append_stream(path) as f:
|
930 |
+
... f.write(b'+newly added')
|
931 |
+
12
|
932 |
+
|
933 |
+
Print out the content fo the file:
|
934 |
+
|
935 |
+
>>> with local.open_input_file(path) as f:
|
936 |
+
... print(f.readall())
|
937 |
+
b'data+newly added'
|
938 |
+
"""
|
939 |
+
cdef:
|
940 |
+
c_string pathstr = _path_as_bytes(path)
|
941 |
+
NativeFile stream = NativeFile()
|
942 |
+
shared_ptr[COutputStream] out_handle
|
943 |
+
shared_ptr[const CKeyValueMetadata] c_metadata
|
944 |
+
|
945 |
+
if metadata is not None:
|
946 |
+
c_metadata = pyarrow_unwrap_metadata(KeyValueMetadata(metadata))
|
947 |
+
|
948 |
+
with nogil:
|
949 |
+
out_handle = GetResultValue(
|
950 |
+
self.fs.OpenAppendStream(pathstr, c_metadata))
|
951 |
+
|
952 |
+
stream.set_output_stream(out_handle)
|
953 |
+
stream.is_writable = True
|
954 |
+
|
955 |
+
return self._wrap_output_stream(
|
956 |
+
stream, path=path, compression=compression, buffer_size=buffer_size
|
957 |
+
)
|
958 |
+
|
959 |
+
def normalize_path(self, path):
|
960 |
+
"""
|
961 |
+
Normalize filesystem path.
|
962 |
+
|
963 |
+
Parameters
|
964 |
+
----------
|
965 |
+
path : str
|
966 |
+
The path to normalize
|
967 |
+
|
968 |
+
Returns
|
969 |
+
-------
|
970 |
+
normalized_path : str
|
971 |
+
The normalized path
|
972 |
+
"""
|
973 |
+
cdef:
|
974 |
+
c_string c_path = _path_as_bytes(path)
|
975 |
+
c_string c_path_normalized
|
976 |
+
|
977 |
+
c_path_normalized = GetResultValue(self.fs.NormalizePath(c_path))
|
978 |
+
return frombytes(c_path_normalized)
|
979 |
+
|
980 |
+
|
981 |
+
cdef class LocalFileSystem(FileSystem):
|
982 |
+
"""
|
983 |
+
A FileSystem implementation accessing files on the local machine.
|
984 |
+
|
985 |
+
Details such as symlinks are abstracted away (symlinks are always followed,
|
986 |
+
except when deleting an entry).
|
987 |
+
|
988 |
+
Parameters
|
989 |
+
----------
|
990 |
+
use_mmap : bool, default False
|
991 |
+
Whether open_input_stream and open_input_file should return
|
992 |
+
a mmap'ed file or a regular file.
|
993 |
+
|
994 |
+
Examples
|
995 |
+
--------
|
996 |
+
Create a FileSystem object with LocalFileSystem constructor:
|
997 |
+
|
998 |
+
>>> from pyarrow import fs
|
999 |
+
>>> local = fs.LocalFileSystem()
|
1000 |
+
>>> local
|
1001 |
+
<pyarrow._fs.LocalFileSystem object at ...>
|
1002 |
+
|
1003 |
+
and write data on to the file:
|
1004 |
+
|
1005 |
+
>>> with local.open_output_stream('/tmp/local_fs.dat') as stream:
|
1006 |
+
... stream.write(b'data')
|
1007 |
+
4
|
1008 |
+
>>> with local.open_input_stream('/tmp/local_fs.dat') as stream:
|
1009 |
+
... print(stream.readall())
|
1010 |
+
b'data'
|
1011 |
+
|
1012 |
+
Create a FileSystem object inferred from a URI of the saved file:
|
1013 |
+
|
1014 |
+
>>> local_new, path = fs.LocalFileSystem().from_uri('/tmp/local_fs.dat')
|
1015 |
+
>>> local_new
|
1016 |
+
<pyarrow._fs.LocalFileSystem object at ...
|
1017 |
+
>>> path
|
1018 |
+
'/tmp/local_fs.dat'
|
1019 |
+
|
1020 |
+
Check if FileSystems `local` and `local_new` are equal:
|
1021 |
+
|
1022 |
+
>>> local.equals(local_new)
|
1023 |
+
True
|
1024 |
+
|
1025 |
+
Compare two different FileSystems:
|
1026 |
+
|
1027 |
+
>>> local2 = fs.LocalFileSystem(use_mmap=True)
|
1028 |
+
>>> local.equals(local2)
|
1029 |
+
False
|
1030 |
+
|
1031 |
+
Copy a file and print out the data:
|
1032 |
+
|
1033 |
+
>>> local.copy_file('/tmp/local_fs.dat', '/tmp/local_fs-copy.dat')
|
1034 |
+
>>> with local.open_input_stream('/tmp/local_fs-copy.dat') as stream:
|
1035 |
+
... print(stream.readall())
|
1036 |
+
...
|
1037 |
+
b'data'
|
1038 |
+
|
1039 |
+
Open an output stream for appending, add text and print the new data:
|
1040 |
+
|
1041 |
+
>>> with local.open_append_stream('/tmp/local_fs-copy.dat') as f:
|
1042 |
+
... f.write(b'+newly added')
|
1043 |
+
12
|
1044 |
+
|
1045 |
+
>>> with local.open_input_stream('/tmp/local_fs-copy.dat') as f:
|
1046 |
+
... print(f.readall())
|
1047 |
+
b'data+newly added'
|
1048 |
+
|
1049 |
+
Create a directory, copy a file into it and then delete the whole directory:
|
1050 |
+
|
1051 |
+
>>> local.create_dir('/tmp/new_folder')
|
1052 |
+
>>> local.copy_file('/tmp/local_fs.dat', '/tmp/new_folder/local_fs.dat')
|
1053 |
+
>>> local.get_file_info('/tmp/new_folder')
|
1054 |
+
<FileInfo for '/tmp/new_folder': type=FileType.Directory>
|
1055 |
+
>>> local.delete_dir('/tmp/new_folder')
|
1056 |
+
>>> local.get_file_info('/tmp/new_folder')
|
1057 |
+
<FileInfo for '/tmp/new_folder': type=FileType.NotFound>
|
1058 |
+
|
1059 |
+
Create a directory, copy a file into it and then delete
|
1060 |
+
the content of the directory:
|
1061 |
+
|
1062 |
+
>>> local.create_dir('/tmp/new_folder')
|
1063 |
+
>>> local.copy_file('/tmp/local_fs.dat', '/tmp/new_folder/local_fs.dat')
|
1064 |
+
>>> local.get_file_info('/tmp/new_folder/local_fs.dat')
|
1065 |
+
<FileInfo for '/tmp/new_folder/local_fs.dat': type=FileType.File, size=4>
|
1066 |
+
>>> local.delete_dir_contents('/tmp/new_folder')
|
1067 |
+
>>> local.get_file_info('/tmp/new_folder')
|
1068 |
+
<FileInfo for '/tmp/new_folder': type=FileType.Directory>
|
1069 |
+
>>> local.get_file_info('/tmp/new_folder/local_fs.dat')
|
1070 |
+
<FileInfo for '/tmp/new_folder/local_fs.dat': type=FileType.NotFound>
|
1071 |
+
|
1072 |
+
Create a directory, copy a file into it and then delete
|
1073 |
+
the file from the directory:
|
1074 |
+
|
1075 |
+
>>> local.create_dir('/tmp/new_folder')
|
1076 |
+
>>> local.copy_file('/tmp/local_fs.dat', '/tmp/new_folder/local_fs.dat')
|
1077 |
+
>>> local.delete_file('/tmp/new_folder/local_fs.dat')
|
1078 |
+
>>> local.get_file_info('/tmp/new_folder/local_fs.dat')
|
1079 |
+
<FileInfo for '/tmp/new_folder/local_fs.dat': type=FileType.NotFound>
|
1080 |
+
>>> local.get_file_info('/tmp/new_folder')
|
1081 |
+
<FileInfo for '/tmp/new_folder': type=FileType.Directory>
|
1082 |
+
|
1083 |
+
Move the file:
|
1084 |
+
|
1085 |
+
>>> local.move('/tmp/local_fs-copy.dat', '/tmp/new_folder/local_fs-copy.dat')
|
1086 |
+
>>> local.get_file_info('/tmp/new_folder/local_fs-copy.dat')
|
1087 |
+
<FileInfo for '/tmp/new_folder/local_fs-copy.dat': type=FileType.File, size=16>
|
1088 |
+
>>> local.get_file_info('/tmp/local_fs-copy.dat')
|
1089 |
+
<FileInfo for '/tmp/local_fs-copy.dat': type=FileType.NotFound>
|
1090 |
+
|
1091 |
+
To finish delete the file left:
|
1092 |
+
>>> local.delete_file('/tmp/local_fs.dat')
|
1093 |
+
"""
|
1094 |
+
|
1095 |
+
def __init__(self, *, use_mmap=False):
|
1096 |
+
cdef:
|
1097 |
+
CLocalFileSystemOptions opts
|
1098 |
+
shared_ptr[CLocalFileSystem] fs
|
1099 |
+
|
1100 |
+
opts = CLocalFileSystemOptions.Defaults()
|
1101 |
+
opts.use_mmap = use_mmap
|
1102 |
+
|
1103 |
+
fs = make_shared[CLocalFileSystem](opts)
|
1104 |
+
self.init(<shared_ptr[CFileSystem]> fs)
|
1105 |
+
|
1106 |
+
cdef init(self, const shared_ptr[CFileSystem]& c_fs):
|
1107 |
+
FileSystem.init(self, c_fs)
|
1108 |
+
self.localfs = <CLocalFileSystem*> c_fs.get()
|
1109 |
+
|
1110 |
+
@staticmethod
|
1111 |
+
@binding(True) # Required for cython < 3
|
1112 |
+
def _reconstruct(kwargs):
|
1113 |
+
# __reduce__ doesn't allow passing named arguments directly to the
|
1114 |
+
# reconstructor, hence this wrapper.
|
1115 |
+
return LocalFileSystem(**kwargs)
|
1116 |
+
|
1117 |
+
def __reduce__(self):
|
1118 |
+
cdef CLocalFileSystemOptions opts = self.localfs.options()
|
1119 |
+
return LocalFileSystem._reconstruct, (dict(
|
1120 |
+
use_mmap=opts.use_mmap),)
|
1121 |
+
|
1122 |
+
|
1123 |
+
cdef class SubTreeFileSystem(FileSystem):
|
1124 |
+
"""
|
1125 |
+
Delegates to another implementation after prepending a fixed base path.
|
1126 |
+
|
1127 |
+
This is useful to expose a logical view of a subtree of a filesystem,
|
1128 |
+
for example a directory in a LocalFileSystem.
|
1129 |
+
|
1130 |
+
Note, that this makes no security guarantee. For example, symlinks may
|
1131 |
+
allow to "escape" the subtree and access other parts of the underlying
|
1132 |
+
filesystem.
|
1133 |
+
|
1134 |
+
Parameters
|
1135 |
+
----------
|
1136 |
+
base_path : str
|
1137 |
+
The root of the subtree.
|
1138 |
+
base_fs : FileSystem
|
1139 |
+
FileSystem object the operations delegated to.
|
1140 |
+
|
1141 |
+
Examples
|
1142 |
+
--------
|
1143 |
+
Create a LocalFileSystem instance:
|
1144 |
+
|
1145 |
+
>>> from pyarrow import fs
|
1146 |
+
>>> local = fs.LocalFileSystem()
|
1147 |
+
>>> with local.open_output_stream('/tmp/local_fs.dat') as stream:
|
1148 |
+
... stream.write(b'data')
|
1149 |
+
4
|
1150 |
+
|
1151 |
+
Create a directory and a SubTreeFileSystem instance:
|
1152 |
+
|
1153 |
+
>>> local.create_dir('/tmp/sub_tree')
|
1154 |
+
>>> subtree = fs.SubTreeFileSystem('/tmp/sub_tree', local)
|
1155 |
+
|
1156 |
+
Write data into the existing file:
|
1157 |
+
|
1158 |
+
>>> with subtree.open_append_stream('sub_tree_fs.dat') as f:
|
1159 |
+
... f.write(b'+newly added')
|
1160 |
+
12
|
1161 |
+
|
1162 |
+
Print out the attributes:
|
1163 |
+
|
1164 |
+
>>> subtree.base_fs
|
1165 |
+
<pyarrow._fs.LocalFileSystem object at ...>
|
1166 |
+
>>> subtree.base_path
|
1167 |
+
'/tmp/sub_tree/'
|
1168 |
+
|
1169 |
+
Get info for the given directory or given file:
|
1170 |
+
|
1171 |
+
>>> subtree.get_file_info('')
|
1172 |
+
<FileInfo for '': type=FileType.Directory>
|
1173 |
+
>>> subtree.get_file_info('sub_tree_fs.dat')
|
1174 |
+
<FileInfo for 'sub_tree_fs.dat': type=FileType.File, size=12>
|
1175 |
+
|
1176 |
+
Delete the file and directory:
|
1177 |
+
|
1178 |
+
>>> subtree.delete_file('sub_tree_fs.dat')
|
1179 |
+
>>> local.delete_dir('/tmp/sub_tree')
|
1180 |
+
>>> local.delete_file('/tmp/local_fs.dat')
|
1181 |
+
|
1182 |
+
For usage of the methods see examples for :func:`~pyarrow.fs.LocalFileSystem`.
|
1183 |
+
"""
|
1184 |
+
|
1185 |
+
def __init__(self, base_path, FileSystem base_fs):
|
1186 |
+
cdef:
|
1187 |
+
c_string pathstr
|
1188 |
+
shared_ptr[CSubTreeFileSystem] wrapped
|
1189 |
+
|
1190 |
+
pathstr = _path_as_bytes(base_path)
|
1191 |
+
wrapped = make_shared[CSubTreeFileSystem](pathstr, base_fs.wrapped)
|
1192 |
+
|
1193 |
+
self.init(<shared_ptr[CFileSystem]> wrapped)
|
1194 |
+
|
1195 |
+
cdef init(self, const shared_ptr[CFileSystem]& wrapped):
|
1196 |
+
FileSystem.init(self, wrapped)
|
1197 |
+
self.subtreefs = <CSubTreeFileSystem*> wrapped.get()
|
1198 |
+
|
1199 |
+
def __repr__(self):
|
1200 |
+
return ("SubTreeFileSystem(base_path={}, base_fs={}"
|
1201 |
+
.format(self.base_path, self.base_fs))
|
1202 |
+
|
1203 |
+
def __reduce__(self):
|
1204 |
+
return SubTreeFileSystem, (
|
1205 |
+
frombytes(self.subtreefs.base_path()),
|
1206 |
+
FileSystem.wrap(self.subtreefs.base_fs())
|
1207 |
+
)
|
1208 |
+
|
1209 |
+
@property
|
1210 |
+
def base_path(self):
|
1211 |
+
return frombytes(self.subtreefs.base_path())
|
1212 |
+
|
1213 |
+
@property
|
1214 |
+
def base_fs(self):
|
1215 |
+
return FileSystem.wrap(self.subtreefs.base_fs())
|
1216 |
+
|
1217 |
+
|
1218 |
+
cdef class _MockFileSystem(FileSystem):
|
1219 |
+
|
1220 |
+
def __init__(self, datetime current_time=None):
|
1221 |
+
cdef shared_ptr[CMockFileSystem] wrapped
|
1222 |
+
|
1223 |
+
current_time = current_time or datetime.now()
|
1224 |
+
wrapped = make_shared[CMockFileSystem](
|
1225 |
+
PyDateTime_to_TimePoint(<PyDateTime_DateTime*> current_time)
|
1226 |
+
)
|
1227 |
+
|
1228 |
+
self.init(<shared_ptr[CFileSystem]> wrapped)
|
1229 |
+
|
1230 |
+
cdef init(self, const shared_ptr[CFileSystem]& wrapped):
|
1231 |
+
FileSystem.init(self, wrapped)
|
1232 |
+
self.mockfs = <CMockFileSystem*> wrapped.get()
|
1233 |
+
|
1234 |
+
|
1235 |
+
cdef class PyFileSystem(FileSystem):
|
1236 |
+
"""
|
1237 |
+
A FileSystem with behavior implemented in Python.
|
1238 |
+
|
1239 |
+
Parameters
|
1240 |
+
----------
|
1241 |
+
handler : FileSystemHandler
|
1242 |
+
The handler object implementing custom filesystem behavior.
|
1243 |
+
|
1244 |
+
Examples
|
1245 |
+
--------
|
1246 |
+
Create an fsspec-based filesystem object for GitHub:
|
1247 |
+
|
1248 |
+
>>> from fsspec.implementations import github
|
1249 |
+
>>> gfs = github.GithubFileSystem('apache', 'arrow') # doctest: +SKIP
|
1250 |
+
|
1251 |
+
Get a PyArrow FileSystem object:
|
1252 |
+
|
1253 |
+
>>> from pyarrow.fs import PyFileSystem, FSSpecHandler
|
1254 |
+
>>> pa_fs = PyFileSystem(FSSpecHandler(gfs)) # doctest: +SKIP
|
1255 |
+
|
1256 |
+
Use :func:`~pyarrow.fs.FileSystem` functionality ``get_file_info()``:
|
1257 |
+
|
1258 |
+
>>> pa_fs.get_file_info('README.md') # doctest: +SKIP
|
1259 |
+
<FileInfo for 'README.md': type=FileType.File, size=...>
|
1260 |
+
"""
|
1261 |
+
|
1262 |
+
def __init__(self, handler):
|
1263 |
+
cdef:
|
1264 |
+
CPyFileSystemVtable vtable
|
1265 |
+
shared_ptr[CPyFileSystem] wrapped
|
1266 |
+
|
1267 |
+
if not isinstance(handler, FileSystemHandler):
|
1268 |
+
raise TypeError("Expected a FileSystemHandler instance, got {0}"
|
1269 |
+
.format(type(handler)))
|
1270 |
+
|
1271 |
+
vtable.get_type_name = _cb_get_type_name
|
1272 |
+
vtable.equals = _cb_equals
|
1273 |
+
vtable.get_file_info = _cb_get_file_info
|
1274 |
+
vtable.get_file_info_vector = _cb_get_file_info_vector
|
1275 |
+
vtable.get_file_info_selector = _cb_get_file_info_selector
|
1276 |
+
vtable.create_dir = _cb_create_dir
|
1277 |
+
vtable.delete_dir = _cb_delete_dir
|
1278 |
+
vtable.delete_dir_contents = _cb_delete_dir_contents
|
1279 |
+
vtable.delete_root_dir_contents = _cb_delete_root_dir_contents
|
1280 |
+
vtable.delete_file = _cb_delete_file
|
1281 |
+
vtable.move = _cb_move
|
1282 |
+
vtable.copy_file = _cb_copy_file
|
1283 |
+
vtable.open_input_stream = _cb_open_input_stream
|
1284 |
+
vtable.open_input_file = _cb_open_input_file
|
1285 |
+
vtable.open_output_stream = _cb_open_output_stream
|
1286 |
+
vtable.open_append_stream = _cb_open_append_stream
|
1287 |
+
vtable.normalize_path = _cb_normalize_path
|
1288 |
+
|
1289 |
+
wrapped = CPyFileSystem.Make(handler, move(vtable))
|
1290 |
+
self.init(<shared_ptr[CFileSystem]> wrapped)
|
1291 |
+
|
1292 |
+
cdef init(self, const shared_ptr[CFileSystem]& wrapped):
|
1293 |
+
FileSystem.init(self, wrapped)
|
1294 |
+
self.pyfs = <CPyFileSystem*> wrapped.get()
|
1295 |
+
|
1296 |
+
@property
|
1297 |
+
def handler(self):
|
1298 |
+
"""
|
1299 |
+
The filesystem's underlying handler.
|
1300 |
+
|
1301 |
+
Returns
|
1302 |
+
-------
|
1303 |
+
handler : FileSystemHandler
|
1304 |
+
"""
|
1305 |
+
return <object> self.pyfs.handler()
|
1306 |
+
|
1307 |
+
def __reduce__(self):
|
1308 |
+
return PyFileSystem, (self.handler,)
|
1309 |
+
|
1310 |
+
|
1311 |
+
class FileSystemHandler(ABC):
|
1312 |
+
"""
|
1313 |
+
An abstract class exposing methods to implement PyFileSystem's behavior.
|
1314 |
+
"""
|
1315 |
+
|
1316 |
+
@abstractmethod
|
1317 |
+
def get_type_name(self):
|
1318 |
+
"""
|
1319 |
+
Implement PyFileSystem.type_name.
|
1320 |
+
"""
|
1321 |
+
|
1322 |
+
@abstractmethod
|
1323 |
+
def get_file_info(self, paths):
|
1324 |
+
"""
|
1325 |
+
Implement PyFileSystem.get_file_info(paths).
|
1326 |
+
|
1327 |
+
Parameters
|
1328 |
+
----------
|
1329 |
+
paths : list of str
|
1330 |
+
paths for which we want to retrieve the info.
|
1331 |
+
"""
|
1332 |
+
|
1333 |
+
@abstractmethod
|
1334 |
+
def get_file_info_selector(self, selector):
|
1335 |
+
"""
|
1336 |
+
Implement PyFileSystem.get_file_info(selector).
|
1337 |
+
|
1338 |
+
Parameters
|
1339 |
+
----------
|
1340 |
+
selector : FileSelector
|
1341 |
+
selector for which we want to retrieve the info.
|
1342 |
+
"""
|
1343 |
+
|
1344 |
+
@abstractmethod
|
1345 |
+
def create_dir(self, path, recursive):
|
1346 |
+
"""
|
1347 |
+
Implement PyFileSystem.create_dir(...).
|
1348 |
+
|
1349 |
+
Parameters
|
1350 |
+
----------
|
1351 |
+
path : str
|
1352 |
+
path of the directory.
|
1353 |
+
recursive : bool
|
1354 |
+
if the parent directories should be created too.
|
1355 |
+
"""
|
1356 |
+
|
1357 |
+
@abstractmethod
|
1358 |
+
def delete_dir(self, path):
|
1359 |
+
"""
|
1360 |
+
Implement PyFileSystem.delete_dir(...).
|
1361 |
+
|
1362 |
+
Parameters
|
1363 |
+
----------
|
1364 |
+
path : str
|
1365 |
+
path of the directory.
|
1366 |
+
"""
|
1367 |
+
|
1368 |
+
@abstractmethod
|
1369 |
+
def delete_dir_contents(self, path, missing_dir_ok=False):
|
1370 |
+
"""
|
1371 |
+
Implement PyFileSystem.delete_dir_contents(...).
|
1372 |
+
|
1373 |
+
Parameters
|
1374 |
+
----------
|
1375 |
+
path : str
|
1376 |
+
path of the directory.
|
1377 |
+
missing_dir_ok : bool
|
1378 |
+
if False an error should be raised if path does not exist
|
1379 |
+
"""
|
1380 |
+
|
1381 |
+
@abstractmethod
|
1382 |
+
def delete_root_dir_contents(self):
|
1383 |
+
"""
|
1384 |
+
Implement PyFileSystem.delete_dir_contents("/", accept_root_dir=True).
|
1385 |
+
"""
|
1386 |
+
|
1387 |
+
@abstractmethod
|
1388 |
+
def delete_file(self, path):
|
1389 |
+
"""
|
1390 |
+
Implement PyFileSystem.delete_file(...).
|
1391 |
+
|
1392 |
+
Parameters
|
1393 |
+
----------
|
1394 |
+
path : str
|
1395 |
+
path of the file.
|
1396 |
+
"""
|
1397 |
+
|
1398 |
+
@abstractmethod
|
1399 |
+
def move(self, src, dest):
|
1400 |
+
"""
|
1401 |
+
Implement PyFileSystem.move(...).
|
1402 |
+
|
1403 |
+
Parameters
|
1404 |
+
----------
|
1405 |
+
src : str
|
1406 |
+
path of what should be moved.
|
1407 |
+
dest : str
|
1408 |
+
path of where it should be moved to.
|
1409 |
+
"""
|
1410 |
+
|
1411 |
+
@abstractmethod
|
1412 |
+
def copy_file(self, src, dest):
|
1413 |
+
"""
|
1414 |
+
Implement PyFileSystem.copy_file(...).
|
1415 |
+
|
1416 |
+
Parameters
|
1417 |
+
----------
|
1418 |
+
src : str
|
1419 |
+
path of what should be copied.
|
1420 |
+
dest : str
|
1421 |
+
path of where it should be copied to.
|
1422 |
+
"""
|
1423 |
+
|
1424 |
+
@abstractmethod
|
1425 |
+
def open_input_stream(self, path):
|
1426 |
+
"""
|
1427 |
+
Implement PyFileSystem.open_input_stream(...).
|
1428 |
+
|
1429 |
+
Parameters
|
1430 |
+
----------
|
1431 |
+
path : str
|
1432 |
+
path of what should be opened.
|
1433 |
+
"""
|
1434 |
+
|
1435 |
+
@abstractmethod
|
1436 |
+
def open_input_file(self, path):
|
1437 |
+
"""
|
1438 |
+
Implement PyFileSystem.open_input_file(...).
|
1439 |
+
|
1440 |
+
Parameters
|
1441 |
+
----------
|
1442 |
+
path : str
|
1443 |
+
path of what should be opened.
|
1444 |
+
"""
|
1445 |
+
|
1446 |
+
@abstractmethod
|
1447 |
+
def open_output_stream(self, path, metadata):
|
1448 |
+
"""
|
1449 |
+
Implement PyFileSystem.open_output_stream(...).
|
1450 |
+
|
1451 |
+
Parameters
|
1452 |
+
----------
|
1453 |
+
path : str
|
1454 |
+
path of what should be opened.
|
1455 |
+
metadata : mapping
|
1456 |
+
Mapping of string keys to string values.
|
1457 |
+
Some filesystems support storing metadata along the file
|
1458 |
+
(such as "Content-Type").
|
1459 |
+
"""
|
1460 |
+
|
1461 |
+
@abstractmethod
|
1462 |
+
def open_append_stream(self, path, metadata):
|
1463 |
+
"""
|
1464 |
+
Implement PyFileSystem.open_append_stream(...).
|
1465 |
+
|
1466 |
+
Parameters
|
1467 |
+
----------
|
1468 |
+
path : str
|
1469 |
+
path of what should be opened.
|
1470 |
+
metadata : mapping
|
1471 |
+
Mapping of string keys to string values.
|
1472 |
+
Some filesystems support storing metadata along the file
|
1473 |
+
(such as "Content-Type").
|
1474 |
+
"""
|
1475 |
+
|
1476 |
+
@abstractmethod
|
1477 |
+
def normalize_path(self, path):
|
1478 |
+
"""
|
1479 |
+
Implement PyFileSystem.normalize_path(...).
|
1480 |
+
|
1481 |
+
Parameters
|
1482 |
+
----------
|
1483 |
+
path : str
|
1484 |
+
path of what should be normalized.
|
1485 |
+
"""
|
1486 |
+
|
1487 |
+
# Callback definitions for CPyFileSystemVtable
|
1488 |
+
|
1489 |
+
|
1490 |
+
cdef void _cb_get_type_name(handler, c_string* out) except *:
|
1491 |
+
out[0] = tobytes("py::" + handler.get_type_name())
|
1492 |
+
|
1493 |
+
cdef c_bool _cb_equals(handler, const CFileSystem& c_other) except False:
|
1494 |
+
if c_other.type_name().startswith(b"py::"):
|
1495 |
+
return <object> (<const CPyFileSystem&> c_other).handler() == handler
|
1496 |
+
|
1497 |
+
return False
|
1498 |
+
|
1499 |
+
cdef void _cb_get_file_info(handler, const c_string& path,
|
1500 |
+
CFileInfo* out) except *:
|
1501 |
+
infos = handler.get_file_info([frombytes(path)])
|
1502 |
+
if not isinstance(infos, list) or len(infos) != 1:
|
1503 |
+
raise TypeError("get_file_info should have returned a 1-element list")
|
1504 |
+
out[0] = FileInfo.unwrap_safe(infos[0])
|
1505 |
+
|
1506 |
+
cdef void _cb_get_file_info_vector(handler, const vector[c_string]& paths,
|
1507 |
+
vector[CFileInfo]* out) except *:
|
1508 |
+
py_paths = [frombytes(paths[i]) for i in range(len(paths))]
|
1509 |
+
infos = handler.get_file_info(py_paths)
|
1510 |
+
if not isinstance(infos, list):
|
1511 |
+
raise TypeError("get_file_info should have returned a list")
|
1512 |
+
out[0].clear()
|
1513 |
+
out[0].reserve(len(infos))
|
1514 |
+
for info in infos:
|
1515 |
+
out[0].push_back(FileInfo.unwrap_safe(info))
|
1516 |
+
|
1517 |
+
cdef void _cb_get_file_info_selector(handler, const CFileSelector& selector,
|
1518 |
+
vector[CFileInfo]* out) except *:
|
1519 |
+
infos = handler.get_file_info_selector(FileSelector.wrap(selector))
|
1520 |
+
if not isinstance(infos, list):
|
1521 |
+
raise TypeError("get_file_info_selector should have returned a list")
|
1522 |
+
out[0].clear()
|
1523 |
+
out[0].reserve(len(infos))
|
1524 |
+
for info in infos:
|
1525 |
+
out[0].push_back(FileInfo.unwrap_safe(info))
|
1526 |
+
|
1527 |
+
cdef void _cb_create_dir(handler, const c_string& path,
|
1528 |
+
c_bool recursive) except *:
|
1529 |
+
handler.create_dir(frombytes(path), recursive)
|
1530 |
+
|
1531 |
+
cdef void _cb_delete_dir(handler, const c_string& path) except *:
|
1532 |
+
handler.delete_dir(frombytes(path))
|
1533 |
+
|
1534 |
+
cdef void _cb_delete_dir_contents(handler, const c_string& path,
|
1535 |
+
c_bool missing_dir_ok) except *:
|
1536 |
+
handler.delete_dir_contents(frombytes(path), missing_dir_ok)
|
1537 |
+
|
1538 |
+
cdef void _cb_delete_root_dir_contents(handler) except *:
|
1539 |
+
handler.delete_root_dir_contents()
|
1540 |
+
|
1541 |
+
cdef void _cb_delete_file(handler, const c_string& path) except *:
|
1542 |
+
handler.delete_file(frombytes(path))
|
1543 |
+
|
1544 |
+
cdef void _cb_move(handler, const c_string& src,
|
1545 |
+
const c_string& dest) except *:
|
1546 |
+
handler.move(frombytes(src), frombytes(dest))
|
1547 |
+
|
1548 |
+
cdef void _cb_copy_file(handler, const c_string& src,
|
1549 |
+
const c_string& dest) except *:
|
1550 |
+
handler.copy_file(frombytes(src), frombytes(dest))
|
1551 |
+
|
1552 |
+
cdef void _cb_open_input_stream(handler, const c_string& path,
|
1553 |
+
shared_ptr[CInputStream]* out) except *:
|
1554 |
+
stream = handler.open_input_stream(frombytes(path))
|
1555 |
+
if not isinstance(stream, NativeFile):
|
1556 |
+
raise TypeError("open_input_stream should have returned "
|
1557 |
+
"a PyArrow file")
|
1558 |
+
out[0] = (<NativeFile> stream).get_input_stream()
|
1559 |
+
|
1560 |
+
cdef void _cb_open_input_file(handler, const c_string& path,
|
1561 |
+
shared_ptr[CRandomAccessFile]* out) except *:
|
1562 |
+
stream = handler.open_input_file(frombytes(path))
|
1563 |
+
if not isinstance(stream, NativeFile):
|
1564 |
+
raise TypeError("open_input_file should have returned "
|
1565 |
+
"a PyArrow file")
|
1566 |
+
out[0] = (<NativeFile> stream).get_random_access_file()
|
1567 |
+
|
1568 |
+
cdef void _cb_open_output_stream(
|
1569 |
+
handler, const c_string& path,
|
1570 |
+
const shared_ptr[const CKeyValueMetadata]& metadata,
|
1571 |
+
shared_ptr[COutputStream]* out) except *:
|
1572 |
+
stream = handler.open_output_stream(
|
1573 |
+
frombytes(path), pyarrow_wrap_metadata(metadata))
|
1574 |
+
if not isinstance(stream, NativeFile):
|
1575 |
+
raise TypeError("open_output_stream should have returned "
|
1576 |
+
"a PyArrow file")
|
1577 |
+
out[0] = (<NativeFile> stream).get_output_stream()
|
1578 |
+
|
1579 |
+
cdef void _cb_open_append_stream(
|
1580 |
+
handler, const c_string& path,
|
1581 |
+
const shared_ptr[const CKeyValueMetadata]& metadata,
|
1582 |
+
shared_ptr[COutputStream]* out) except *:
|
1583 |
+
stream = handler.open_append_stream(
|
1584 |
+
frombytes(path), pyarrow_wrap_metadata(metadata))
|
1585 |
+
if not isinstance(stream, NativeFile):
|
1586 |
+
raise TypeError("open_append_stream should have returned "
|
1587 |
+
"a PyArrow file")
|
1588 |
+
out[0] = (<NativeFile> stream).get_output_stream()
|
1589 |
+
|
1590 |
+
cdef void _cb_normalize_path(handler, const c_string& path,
|
1591 |
+
c_string* out) except *:
|
1592 |
+
out[0] = tobytes(handler.normalize_path(frombytes(path)))
|
1593 |
+
|
1594 |
+
|
1595 |
+
def _copy_files(FileSystem source_fs, str source_path,
|
1596 |
+
FileSystem destination_fs, str destination_path,
|
1597 |
+
int64_t chunk_size, c_bool use_threads):
|
1598 |
+
# low-level helper exposed through pyarrow/fs.py::copy_files
|
1599 |
+
cdef:
|
1600 |
+
CFileLocator c_source
|
1601 |
+
vector[CFileLocator] c_sources
|
1602 |
+
CFileLocator c_destination
|
1603 |
+
vector[CFileLocator] c_destinations
|
1604 |
+
|
1605 |
+
c_source.filesystem = source_fs.unwrap()
|
1606 |
+
c_source.path = tobytes(source_path)
|
1607 |
+
c_sources.push_back(c_source)
|
1608 |
+
|
1609 |
+
c_destination.filesystem = destination_fs.unwrap()
|
1610 |
+
c_destination.path = tobytes(destination_path)
|
1611 |
+
c_destinations.push_back(c_destination)
|
1612 |
+
|
1613 |
+
with nogil:
|
1614 |
+
check_status(CCopyFiles(
|
1615 |
+
c_sources, c_destinations,
|
1616 |
+
c_default_io_context(), chunk_size, use_threads,
|
1617 |
+
))
|
1618 |
+
|
1619 |
+
|
1620 |
+
def _copy_files_selector(FileSystem source_fs, FileSelector source_sel,
|
1621 |
+
FileSystem destination_fs, str destination_base_dir,
|
1622 |
+
int64_t chunk_size, c_bool use_threads):
|
1623 |
+
# low-level helper exposed through pyarrow/fs.py::copy_files
|
1624 |
+
cdef c_string c_destination_base_dir = tobytes(destination_base_dir)
|
1625 |
+
|
1626 |
+
with nogil:
|
1627 |
+
check_status(CCopyFilesWithSelector(
|
1628 |
+
source_fs.unwrap(), source_sel.unwrap(),
|
1629 |
+
destination_fs.unwrap(), c_destination_base_dir,
|
1630 |
+
c_default_io_context(), chunk_size, use_threads,
|
1631 |
+
))
|
env-llmeval/lib/python3.10/site-packages/pyarrow/_json.pyx
ADDED
@@ -0,0 +1,310 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
# cython: profile=False
|
19 |
+
# distutils: language = c++
|
20 |
+
# cython: language_level = 3
|
21 |
+
|
22 |
+
from pyarrow.includes.common cimport *
|
23 |
+
from pyarrow.includes.libarrow cimport *
|
24 |
+
from pyarrow.lib cimport (_Weakrefable, MemoryPool,
|
25 |
+
maybe_unbox_memory_pool,
|
26 |
+
get_input_stream, pyarrow_wrap_table,
|
27 |
+
pyarrow_wrap_schema, pyarrow_unwrap_schema)
|
28 |
+
|
29 |
+
|
30 |
+
cdef class ReadOptions(_Weakrefable):
|
31 |
+
"""
|
32 |
+
Options for reading JSON files.
|
33 |
+
|
34 |
+
Parameters
|
35 |
+
----------
|
36 |
+
use_threads : bool, optional (default True)
|
37 |
+
Whether to use multiple threads to accelerate reading
|
38 |
+
block_size : int, optional
|
39 |
+
How much bytes to process at a time from the input stream.
|
40 |
+
This will determine multi-threading granularity as well as
|
41 |
+
the size of individual chunks in the Table.
|
42 |
+
"""
|
43 |
+
|
44 |
+
# Avoid mistakingly creating attributes
|
45 |
+
__slots__ = ()
|
46 |
+
|
47 |
+
def __init__(self, use_threads=None, block_size=None):
|
48 |
+
self.options = CJSONReadOptions.Defaults()
|
49 |
+
if use_threads is not None:
|
50 |
+
self.use_threads = use_threads
|
51 |
+
if block_size is not None:
|
52 |
+
self.block_size = block_size
|
53 |
+
|
54 |
+
@property
|
55 |
+
def use_threads(self):
|
56 |
+
"""
|
57 |
+
Whether to use multiple threads to accelerate reading.
|
58 |
+
"""
|
59 |
+
return self.options.use_threads
|
60 |
+
|
61 |
+
@use_threads.setter
|
62 |
+
def use_threads(self, value):
|
63 |
+
self.options.use_threads = value
|
64 |
+
|
65 |
+
@property
|
66 |
+
def block_size(self):
|
67 |
+
"""
|
68 |
+
How much bytes to process at a time from the input stream.
|
69 |
+
|
70 |
+
This will determine multi-threading granularity as well as the size of
|
71 |
+
individual chunks in the Table.
|
72 |
+
"""
|
73 |
+
return self.options.block_size
|
74 |
+
|
75 |
+
@block_size.setter
|
76 |
+
def block_size(self, value):
|
77 |
+
self.options.block_size = value
|
78 |
+
|
79 |
+
def __reduce__(self):
|
80 |
+
return ReadOptions, (
|
81 |
+
self.use_threads,
|
82 |
+
self.block_size
|
83 |
+
)
|
84 |
+
|
85 |
+
def equals(self, ReadOptions other):
|
86 |
+
"""
|
87 |
+
Parameters
|
88 |
+
----------
|
89 |
+
other : pyarrow.json.ReadOptions
|
90 |
+
|
91 |
+
Returns
|
92 |
+
-------
|
93 |
+
bool
|
94 |
+
"""
|
95 |
+
return (
|
96 |
+
self.use_threads == other.use_threads and
|
97 |
+
self.block_size == other.block_size
|
98 |
+
)
|
99 |
+
|
100 |
+
def __eq__(self, other):
|
101 |
+
try:
|
102 |
+
return self.equals(other)
|
103 |
+
except TypeError:
|
104 |
+
return False
|
105 |
+
|
106 |
+
@staticmethod
|
107 |
+
cdef ReadOptions wrap(CJSONReadOptions options):
|
108 |
+
out = ReadOptions()
|
109 |
+
out.options = options # shallow copy
|
110 |
+
return out
|
111 |
+
|
112 |
+
|
113 |
+
cdef class ParseOptions(_Weakrefable):
|
114 |
+
"""
|
115 |
+
Options for parsing JSON files.
|
116 |
+
|
117 |
+
Parameters
|
118 |
+
----------
|
119 |
+
explicit_schema : Schema, optional (default None)
|
120 |
+
Optional explicit schema (no type inference, ignores other fields).
|
121 |
+
newlines_in_values : bool, optional (default False)
|
122 |
+
Whether objects may be printed across multiple lines (for example
|
123 |
+
pretty printed). If false, input must end with an empty line.
|
124 |
+
unexpected_field_behavior : str, default "infer"
|
125 |
+
How JSON fields outside of explicit_schema (if given) are treated.
|
126 |
+
|
127 |
+
Possible behaviors:
|
128 |
+
|
129 |
+
- "ignore": unexpected JSON fields are ignored
|
130 |
+
- "error": error out on unexpected JSON fields
|
131 |
+
- "infer": unexpected JSON fields are type-inferred and included in
|
132 |
+
the output
|
133 |
+
"""
|
134 |
+
|
135 |
+
__slots__ = ()
|
136 |
+
|
137 |
+
def __init__(self, explicit_schema=None, newlines_in_values=None,
|
138 |
+
unexpected_field_behavior=None):
|
139 |
+
self.options = CJSONParseOptions.Defaults()
|
140 |
+
if explicit_schema is not None:
|
141 |
+
self.explicit_schema = explicit_schema
|
142 |
+
if newlines_in_values is not None:
|
143 |
+
self.newlines_in_values = newlines_in_values
|
144 |
+
if unexpected_field_behavior is not None:
|
145 |
+
self.unexpected_field_behavior = unexpected_field_behavior
|
146 |
+
|
147 |
+
def __reduce__(self):
|
148 |
+
return ParseOptions, (
|
149 |
+
self.explicit_schema,
|
150 |
+
self.newlines_in_values,
|
151 |
+
self.unexpected_field_behavior
|
152 |
+
)
|
153 |
+
|
154 |
+
@property
|
155 |
+
def explicit_schema(self):
|
156 |
+
"""
|
157 |
+
Optional explicit schema (no type inference, ignores other fields)
|
158 |
+
"""
|
159 |
+
if self.options.explicit_schema.get() == NULL:
|
160 |
+
return None
|
161 |
+
else:
|
162 |
+
return pyarrow_wrap_schema(self.options.explicit_schema)
|
163 |
+
|
164 |
+
@explicit_schema.setter
|
165 |
+
def explicit_schema(self, value):
|
166 |
+
self.options.explicit_schema = pyarrow_unwrap_schema(value)
|
167 |
+
|
168 |
+
@property
|
169 |
+
def newlines_in_values(self):
|
170 |
+
"""
|
171 |
+
Whether newline characters are allowed in JSON values.
|
172 |
+
Setting this to True reduces the performance of multi-threaded
|
173 |
+
JSON reading.
|
174 |
+
"""
|
175 |
+
return self.options.newlines_in_values
|
176 |
+
|
177 |
+
@newlines_in_values.setter
|
178 |
+
def newlines_in_values(self, value):
|
179 |
+
self.options.newlines_in_values = value
|
180 |
+
|
181 |
+
@property
|
182 |
+
def unexpected_field_behavior(self):
|
183 |
+
"""
|
184 |
+
How JSON fields outside of explicit_schema (if given) are treated.
|
185 |
+
|
186 |
+
Possible behaviors:
|
187 |
+
|
188 |
+
- "ignore": unexpected JSON fields are ignored
|
189 |
+
- "error": error out on unexpected JSON fields
|
190 |
+
- "infer": unexpected JSON fields are type-inferred and included in
|
191 |
+
the output
|
192 |
+
|
193 |
+
Set to "infer" by default.
|
194 |
+
"""
|
195 |
+
v = self.options.unexpected_field_behavior
|
196 |
+
if v == CUnexpectedFieldBehavior_Ignore:
|
197 |
+
return "ignore"
|
198 |
+
elif v == CUnexpectedFieldBehavior_Error:
|
199 |
+
return "error"
|
200 |
+
elif v == CUnexpectedFieldBehavior_InferType:
|
201 |
+
return "infer"
|
202 |
+
else:
|
203 |
+
raise ValueError('Unexpected value for unexpected_field_behavior')
|
204 |
+
|
205 |
+
@unexpected_field_behavior.setter
|
206 |
+
def unexpected_field_behavior(self, value):
|
207 |
+
cdef CUnexpectedFieldBehavior v
|
208 |
+
|
209 |
+
if value == "ignore":
|
210 |
+
v = CUnexpectedFieldBehavior_Ignore
|
211 |
+
elif value == "error":
|
212 |
+
v = CUnexpectedFieldBehavior_Error
|
213 |
+
elif value == "infer":
|
214 |
+
v = CUnexpectedFieldBehavior_InferType
|
215 |
+
else:
|
216 |
+
raise ValueError(
|
217 |
+
"Unexpected value `{}` for `unexpected_field_behavior`, pass "
|
218 |
+
"either `ignore`, `error` or `infer`.".format(value)
|
219 |
+
)
|
220 |
+
|
221 |
+
self.options.unexpected_field_behavior = v
|
222 |
+
|
223 |
+
def equals(self, ParseOptions other):
|
224 |
+
"""
|
225 |
+
Parameters
|
226 |
+
----------
|
227 |
+
other : pyarrow.json.ParseOptions
|
228 |
+
|
229 |
+
Returns
|
230 |
+
-------
|
231 |
+
bool
|
232 |
+
"""
|
233 |
+
return (
|
234 |
+
self.explicit_schema == other.explicit_schema and
|
235 |
+
self.newlines_in_values == other.newlines_in_values and
|
236 |
+
self.unexpected_field_behavior == other.unexpected_field_behavior
|
237 |
+
)
|
238 |
+
|
239 |
+
def __eq__(self, other):
|
240 |
+
try:
|
241 |
+
return self.equals(other)
|
242 |
+
except TypeError:
|
243 |
+
return False
|
244 |
+
|
245 |
+
@staticmethod
|
246 |
+
cdef ParseOptions wrap(CJSONParseOptions options):
|
247 |
+
out = ParseOptions()
|
248 |
+
out.options = options # shallow copy
|
249 |
+
return out
|
250 |
+
|
251 |
+
|
252 |
+
cdef _get_reader(input_file, shared_ptr[CInputStream]* out):
|
253 |
+
use_memory_map = False
|
254 |
+
get_input_stream(input_file, use_memory_map, out)
|
255 |
+
|
256 |
+
cdef _get_read_options(ReadOptions read_options, CJSONReadOptions* out):
|
257 |
+
if read_options is None:
|
258 |
+
out[0] = CJSONReadOptions.Defaults()
|
259 |
+
else:
|
260 |
+
out[0] = read_options.options
|
261 |
+
|
262 |
+
cdef _get_parse_options(ParseOptions parse_options, CJSONParseOptions* out):
|
263 |
+
if parse_options is None:
|
264 |
+
out[0] = CJSONParseOptions.Defaults()
|
265 |
+
else:
|
266 |
+
out[0] = parse_options.options
|
267 |
+
|
268 |
+
|
269 |
+
def read_json(input_file, read_options=None, parse_options=None,
|
270 |
+
MemoryPool memory_pool=None):
|
271 |
+
"""
|
272 |
+
Read a Table from a stream of JSON data.
|
273 |
+
|
274 |
+
Parameters
|
275 |
+
----------
|
276 |
+
input_file : str, path or file-like object
|
277 |
+
The location of JSON data. Currently only the line-delimited JSON
|
278 |
+
format is supported.
|
279 |
+
read_options : pyarrow.json.ReadOptions, optional
|
280 |
+
Options for the JSON reader (see ReadOptions constructor for defaults).
|
281 |
+
parse_options : pyarrow.json.ParseOptions, optional
|
282 |
+
Options for the JSON parser
|
283 |
+
(see ParseOptions constructor for defaults).
|
284 |
+
memory_pool : MemoryPool, optional
|
285 |
+
Pool to allocate Table memory from.
|
286 |
+
|
287 |
+
Returns
|
288 |
+
-------
|
289 |
+
:class:`pyarrow.Table`
|
290 |
+
Contents of the JSON file as a in-memory table.
|
291 |
+
"""
|
292 |
+
cdef:
|
293 |
+
shared_ptr[CInputStream] stream
|
294 |
+
CJSONReadOptions c_read_options
|
295 |
+
CJSONParseOptions c_parse_options
|
296 |
+
shared_ptr[CJSONReader] reader
|
297 |
+
shared_ptr[CTable] table
|
298 |
+
|
299 |
+
_get_reader(input_file, &stream)
|
300 |
+
_get_read_options(read_options, &c_read_options)
|
301 |
+
_get_parse_options(parse_options, &c_parse_options)
|
302 |
+
|
303 |
+
reader = GetResultValue(
|
304 |
+
CJSONReader.Make(maybe_unbox_memory_pool(memory_pool),
|
305 |
+
stream, c_read_options, c_parse_options))
|
306 |
+
|
307 |
+
with nogil:
|
308 |
+
table = GetResultValue(reader.get().Read())
|
309 |
+
|
310 |
+
return pyarrow_wrap_table(table)
|
env-llmeval/lib/python3.10/site-packages/pyarrow/_orc.pyx
ADDED
@@ -0,0 +1,445 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
# cython: profile=False
|
19 |
+
# distutils: language = c++
|
20 |
+
|
21 |
+
from cython.operator cimport dereference as deref
|
22 |
+
from libcpp.vector cimport vector as std_vector
|
23 |
+
from libcpp.utility cimport move
|
24 |
+
from pyarrow.includes.common cimport *
|
25 |
+
from pyarrow.includes.libarrow cimport *
|
26 |
+
from pyarrow.lib cimport (check_status, _Weakrefable,
|
27 |
+
MemoryPool, maybe_unbox_memory_pool,
|
28 |
+
pyarrow_wrap_schema,
|
29 |
+
pyarrow_wrap_batch,
|
30 |
+
Table,
|
31 |
+
pyarrow_wrap_table,
|
32 |
+
pyarrow_wrap_metadata,
|
33 |
+
pyarrow_unwrap_table,
|
34 |
+
get_reader,
|
35 |
+
get_writer)
|
36 |
+
from pyarrow.lib import frombytes, tobytes
|
37 |
+
from pyarrow.util import _stringify_path
|
38 |
+
|
39 |
+
|
40 |
+
cdef compression_type_from_enum(CCompressionType compression_type):
|
41 |
+
compression_map = {
|
42 |
+
CCompressionType_UNCOMPRESSED: 'UNCOMPRESSED',
|
43 |
+
CCompressionType_GZIP: 'ZLIB',
|
44 |
+
CCompressionType_SNAPPY: 'SNAPPY',
|
45 |
+
CCompressionType_LZ4: 'LZ4',
|
46 |
+
CCompressionType_ZSTD: 'ZSTD',
|
47 |
+
}
|
48 |
+
if compression_type in compression_map:
|
49 |
+
return compression_map[compression_type]
|
50 |
+
raise ValueError('Unsupported compression')
|
51 |
+
|
52 |
+
|
53 |
+
cdef CCompressionType compression_type_from_name(name) except *:
|
54 |
+
if not isinstance(name, str):
|
55 |
+
raise TypeError('compression must be a string')
|
56 |
+
name = name.upper()
|
57 |
+
if name == 'ZLIB':
|
58 |
+
return CCompressionType_GZIP
|
59 |
+
elif name == 'SNAPPY':
|
60 |
+
return CCompressionType_SNAPPY
|
61 |
+
elif name == 'LZ4':
|
62 |
+
return CCompressionType_LZ4
|
63 |
+
elif name == 'ZSTD':
|
64 |
+
return CCompressionType_ZSTD
|
65 |
+
elif name == 'UNCOMPRESSED':
|
66 |
+
return CCompressionType_UNCOMPRESSED
|
67 |
+
raise ValueError(f'Unknown CompressionKind: {name}')
|
68 |
+
|
69 |
+
|
70 |
+
cdef compression_strategy_from_enum(
|
71 |
+
CompressionStrategy compression_strategy
|
72 |
+
):
|
73 |
+
compression_strategy_map = {
|
74 |
+
_CompressionStrategy_SPEED: 'SPEED',
|
75 |
+
_CompressionStrategy_COMPRESSION: 'COMPRESSION',
|
76 |
+
}
|
77 |
+
if compression_strategy in compression_strategy_map:
|
78 |
+
return compression_strategy_map[compression_strategy]
|
79 |
+
raise ValueError('Unsupported compression strategy')
|
80 |
+
|
81 |
+
|
82 |
+
cdef CompressionStrategy compression_strategy_from_name(name) except *:
|
83 |
+
if not isinstance(name, str):
|
84 |
+
raise TypeError('compression strategy must be a string')
|
85 |
+
name = name.upper()
|
86 |
+
if name == 'COMPRESSION':
|
87 |
+
return _CompressionStrategy_COMPRESSION
|
88 |
+
elif name == 'SPEED':
|
89 |
+
return _CompressionStrategy_SPEED
|
90 |
+
raise ValueError(f'Unknown CompressionStrategy: {name}')
|
91 |
+
|
92 |
+
|
93 |
+
cdef file_version_from_class(FileVersion file_version):
|
94 |
+
return frombytes(file_version.ToString())
|
95 |
+
|
96 |
+
|
97 |
+
cdef writer_id_from_enum(WriterId writer_id):
|
98 |
+
writer_id_map = {
|
99 |
+
_WriterId_ORC_JAVA_WRITER: 'ORC_JAVA',
|
100 |
+
_WriterId_ORC_CPP_WRITER: 'ORC_CPP',
|
101 |
+
_WriterId_PRESTO_WRITER: 'PRESTO',
|
102 |
+
_WriterId_SCRITCHLEY_GO: 'SCRITCHLEY_GO',
|
103 |
+
_WriterId_TRINO_WRITER: 'TRINO',
|
104 |
+
}
|
105 |
+
if writer_id in writer_id_map:
|
106 |
+
return writer_id_map[writer_id]
|
107 |
+
raise ValueError('Unsupported writer ID')
|
108 |
+
|
109 |
+
|
110 |
+
cdef writer_version_from_enum(WriterVersion writer_version):
|
111 |
+
writer_version_map = {
|
112 |
+
_WriterVersion_ORIGINAL: 'ORIGINAL',
|
113 |
+
_WriterVersion_HIVE_8732: 'HIVE_8732',
|
114 |
+
_WriterVersion_HIVE_4243: 'HIVE_4243',
|
115 |
+
_WriterVersion_HIVE_12055: 'HIVE_12055',
|
116 |
+
_WriterVersion_HIVE_13083: 'HIVE_13083',
|
117 |
+
_WriterVersion_ORC_101: 'ORC_101',
|
118 |
+
_WriterVersion_ORC_135: 'ORC_135',
|
119 |
+
_WriterVersion_ORC_517: 'ORC_517',
|
120 |
+
_WriterVersion_ORC_203: 'ORC_203',
|
121 |
+
_WriterVersion_ORC_14: 'ORC_14',
|
122 |
+
}
|
123 |
+
if writer_version in writer_version_map:
|
124 |
+
return writer_version_map[writer_version]
|
125 |
+
raise ValueError('Unsupported writer version')
|
126 |
+
|
127 |
+
|
128 |
+
cdef shared_ptr[WriteOptions] _create_write_options(
|
129 |
+
file_version=None,
|
130 |
+
batch_size=None,
|
131 |
+
stripe_size=None,
|
132 |
+
compression=None,
|
133 |
+
compression_block_size=None,
|
134 |
+
compression_strategy=None,
|
135 |
+
row_index_stride=None,
|
136 |
+
padding_tolerance=None,
|
137 |
+
dictionary_key_size_threshold=None,
|
138 |
+
bloom_filter_columns=None,
|
139 |
+
bloom_filter_fpp=None
|
140 |
+
) except *:
|
141 |
+
"""General writer options"""
|
142 |
+
cdef:
|
143 |
+
shared_ptr[WriteOptions] options
|
144 |
+
options = make_shared[WriteOptions]()
|
145 |
+
# batch_size
|
146 |
+
if batch_size is not None:
|
147 |
+
if isinstance(batch_size, int) and batch_size > 0:
|
148 |
+
deref(options).batch_size = batch_size
|
149 |
+
else:
|
150 |
+
raise ValueError(f"Invalid ORC writer batch size: {batch_size}")
|
151 |
+
# file_version
|
152 |
+
if file_version is not None:
|
153 |
+
if file_version == "0.12":
|
154 |
+
deref(options).file_version = FileVersion(0, 12)
|
155 |
+
elif file_version == "0.11":
|
156 |
+
deref(options).file_version = FileVersion(0, 11)
|
157 |
+
else:
|
158 |
+
raise ValueError(f"Unsupported ORC file version: {file_version}")
|
159 |
+
# stripe_size
|
160 |
+
if stripe_size is not None:
|
161 |
+
if isinstance(stripe_size, int) and stripe_size > 0:
|
162 |
+
deref(options).stripe_size = stripe_size
|
163 |
+
else:
|
164 |
+
raise ValueError(f"Invalid ORC stripe size: {stripe_size}")
|
165 |
+
# compression
|
166 |
+
if compression is not None:
|
167 |
+
if isinstance(compression, str):
|
168 |
+
deref(options).compression = compression_type_from_name(
|
169 |
+
compression)
|
170 |
+
else:
|
171 |
+
raise TypeError("Unsupported ORC compression type: "
|
172 |
+
f"{compression}")
|
173 |
+
# compression_block_size
|
174 |
+
if compression_block_size is not None:
|
175 |
+
if (isinstance(compression_block_size, int) and
|
176 |
+
compression_block_size > 0):
|
177 |
+
deref(options).compression_block_size = compression_block_size
|
178 |
+
else:
|
179 |
+
raise ValueError("Invalid ORC compression block size: "
|
180 |
+
f"{compression_block_size}")
|
181 |
+
# compression_strategy
|
182 |
+
if compression_strategy is not None:
|
183 |
+
if isinstance(compression, str):
|
184 |
+
deref(options).compression_strategy = \
|
185 |
+
compression_strategy_from_name(compression_strategy)
|
186 |
+
else:
|
187 |
+
raise TypeError("Unsupported ORC compression strategy: "
|
188 |
+
f"{compression_strategy}")
|
189 |
+
# row_index_stride
|
190 |
+
if row_index_stride is not None:
|
191 |
+
if isinstance(row_index_stride, int) and row_index_stride > 0:
|
192 |
+
deref(options).row_index_stride = row_index_stride
|
193 |
+
else:
|
194 |
+
raise ValueError("Invalid ORC row index stride: "
|
195 |
+
f"{row_index_stride}")
|
196 |
+
# padding_tolerance
|
197 |
+
if padding_tolerance is not None:
|
198 |
+
try:
|
199 |
+
padding_tolerance = float(padding_tolerance)
|
200 |
+
deref(options).padding_tolerance = padding_tolerance
|
201 |
+
except Exception:
|
202 |
+
raise ValueError("Invalid ORC padding tolerance: "
|
203 |
+
f"{padding_tolerance}")
|
204 |
+
# dictionary_key_size_threshold
|
205 |
+
if dictionary_key_size_threshold is not None:
|
206 |
+
try:
|
207 |
+
dictionary_key_size_threshold = float(
|
208 |
+
dictionary_key_size_threshold)
|
209 |
+
assert 0 <= dictionary_key_size_threshold <= 1
|
210 |
+
deref(options).dictionary_key_size_threshold = \
|
211 |
+
dictionary_key_size_threshold
|
212 |
+
except Exception:
|
213 |
+
raise ValueError("Invalid ORC dictionary key size threshold: "
|
214 |
+
f"{dictionary_key_size_threshold}")
|
215 |
+
# bloom_filter_columns
|
216 |
+
if bloom_filter_columns is not None:
|
217 |
+
try:
|
218 |
+
bloom_filter_columns = list(bloom_filter_columns)
|
219 |
+
for col in bloom_filter_columns:
|
220 |
+
assert isinstance(col, int) and col >= 0
|
221 |
+
deref(options).bloom_filter_columns = bloom_filter_columns
|
222 |
+
except Exception:
|
223 |
+
raise ValueError("Invalid ORC BloomFilter columns: "
|
224 |
+
f"{bloom_filter_columns}")
|
225 |
+
# Max false positive rate of the Bloom Filter
|
226 |
+
if bloom_filter_fpp is not None:
|
227 |
+
try:
|
228 |
+
bloom_filter_fpp = float(bloom_filter_fpp)
|
229 |
+
assert 0 <= bloom_filter_fpp <= 1
|
230 |
+
deref(options).bloom_filter_fpp = bloom_filter_fpp
|
231 |
+
except Exception:
|
232 |
+
raise ValueError("Invalid ORC BloomFilter false positive rate: "
|
233 |
+
f"{bloom_filter_fpp}")
|
234 |
+
return options
|
235 |
+
|
236 |
+
|
237 |
+
cdef class ORCReader(_Weakrefable):
|
238 |
+
cdef:
|
239 |
+
object source
|
240 |
+
CMemoryPool* allocator
|
241 |
+
unique_ptr[ORCFileReader] reader
|
242 |
+
|
243 |
+
def __cinit__(self, MemoryPool memory_pool=None):
|
244 |
+
self.allocator = maybe_unbox_memory_pool(memory_pool)
|
245 |
+
|
246 |
+
def open(self, object source, c_bool use_memory_map=True):
|
247 |
+
cdef:
|
248 |
+
shared_ptr[CRandomAccessFile] rd_handle
|
249 |
+
|
250 |
+
self.source = source
|
251 |
+
|
252 |
+
get_reader(source, use_memory_map, &rd_handle)
|
253 |
+
with nogil:
|
254 |
+
self.reader = move(GetResultValue(
|
255 |
+
ORCFileReader.Open(rd_handle, self.allocator)
|
256 |
+
))
|
257 |
+
|
258 |
+
def metadata(self):
|
259 |
+
"""
|
260 |
+
The arrow metadata for this file.
|
261 |
+
|
262 |
+
Returns
|
263 |
+
-------
|
264 |
+
metadata : pyarrow.KeyValueMetadata
|
265 |
+
"""
|
266 |
+
cdef:
|
267 |
+
shared_ptr[const CKeyValueMetadata] sp_arrow_metadata
|
268 |
+
|
269 |
+
with nogil:
|
270 |
+
sp_arrow_metadata = GetResultValue(
|
271 |
+
deref(self.reader).ReadMetadata()
|
272 |
+
)
|
273 |
+
|
274 |
+
return pyarrow_wrap_metadata(sp_arrow_metadata)
|
275 |
+
|
276 |
+
def schema(self):
|
277 |
+
"""
|
278 |
+
The arrow schema for this file.
|
279 |
+
|
280 |
+
Returns
|
281 |
+
-------
|
282 |
+
schema : pyarrow.Schema
|
283 |
+
"""
|
284 |
+
cdef:
|
285 |
+
shared_ptr[CSchema] sp_arrow_schema
|
286 |
+
|
287 |
+
with nogil:
|
288 |
+
sp_arrow_schema = GetResultValue(deref(self.reader).ReadSchema())
|
289 |
+
|
290 |
+
return pyarrow_wrap_schema(sp_arrow_schema)
|
291 |
+
|
292 |
+
def nrows(self):
|
293 |
+
return deref(self.reader).NumberOfRows()
|
294 |
+
|
295 |
+
def nstripes(self):
|
296 |
+
return deref(self.reader).NumberOfStripes()
|
297 |
+
|
298 |
+
def file_version(self):
|
299 |
+
return file_version_from_class(deref(self.reader).GetFileVersion())
|
300 |
+
|
301 |
+
def software_version(self):
|
302 |
+
return frombytes(deref(self.reader).GetSoftwareVersion())
|
303 |
+
|
304 |
+
def compression(self):
|
305 |
+
return compression_type_from_enum(
|
306 |
+
GetResultValue(deref(self.reader).GetCompression()))
|
307 |
+
|
308 |
+
def compression_size(self):
|
309 |
+
return deref(self.reader).GetCompressionSize()
|
310 |
+
|
311 |
+
def row_index_stride(self):
|
312 |
+
return deref(self.reader).GetRowIndexStride()
|
313 |
+
|
314 |
+
def writer(self):
|
315 |
+
writer_name = writer_id_from_enum(deref(self.reader).GetWriterId())
|
316 |
+
if writer_name == 'UNKNOWN':
|
317 |
+
return deref(self.reader).GetWriterIdValue()
|
318 |
+
else:
|
319 |
+
return writer_name
|
320 |
+
|
321 |
+
def writer_version(self):
|
322 |
+
return writer_version_from_enum(deref(self.reader).GetWriterVersion())
|
323 |
+
|
324 |
+
def nstripe_statistics(self):
|
325 |
+
return deref(self.reader).GetNumberOfStripeStatistics()
|
326 |
+
|
327 |
+
def content_length(self):
|
328 |
+
return deref(self.reader).GetContentLength()
|
329 |
+
|
330 |
+
def stripe_statistics_length(self):
|
331 |
+
return deref(self.reader).GetStripeStatisticsLength()
|
332 |
+
|
333 |
+
def file_footer_length(self):
|
334 |
+
return deref(self.reader).GetFileFooterLength()
|
335 |
+
|
336 |
+
def file_postscript_length(self):
|
337 |
+
return deref(self.reader).GetFilePostscriptLength()
|
338 |
+
|
339 |
+
def file_length(self):
|
340 |
+
return deref(self.reader).GetFileLength()
|
341 |
+
|
342 |
+
def serialized_file_tail(self):
|
343 |
+
return deref(self.reader).GetSerializedFileTail()
|
344 |
+
|
345 |
+
def read_stripe(self, n, columns=None):
|
346 |
+
cdef:
|
347 |
+
shared_ptr[CRecordBatch] sp_record_batch
|
348 |
+
int64_t stripe
|
349 |
+
std_vector[c_string] c_names
|
350 |
+
|
351 |
+
stripe = n
|
352 |
+
|
353 |
+
if columns is None:
|
354 |
+
with nogil:
|
355 |
+
sp_record_batch = GetResultValue(
|
356 |
+
deref(self.reader).ReadStripe(stripe)
|
357 |
+
)
|
358 |
+
else:
|
359 |
+
c_names = [tobytes(name) for name in columns]
|
360 |
+
with nogil:
|
361 |
+
sp_record_batch = GetResultValue(
|
362 |
+
deref(self.reader).ReadStripe(stripe, c_names)
|
363 |
+
)
|
364 |
+
|
365 |
+
return pyarrow_wrap_batch(sp_record_batch)
|
366 |
+
|
367 |
+
def read(self, columns=None):
|
368 |
+
cdef:
|
369 |
+
shared_ptr[CTable] sp_table
|
370 |
+
std_vector[c_string] c_names
|
371 |
+
|
372 |
+
if columns is None:
|
373 |
+
with nogil:
|
374 |
+
sp_table = GetResultValue(deref(self.reader).Read())
|
375 |
+
else:
|
376 |
+
c_names = [tobytes(name) for name in columns]
|
377 |
+
with nogil:
|
378 |
+
sp_table = GetResultValue(deref(self.reader).Read(c_names))
|
379 |
+
|
380 |
+
return pyarrow_wrap_table(sp_table)
|
381 |
+
|
382 |
+
|
383 |
+
cdef class ORCWriter(_Weakrefable):
|
384 |
+
cdef:
|
385 |
+
unique_ptr[ORCFileWriter] writer
|
386 |
+
shared_ptr[COutputStream] sink
|
387 |
+
c_bool own_sink
|
388 |
+
|
389 |
+
def open(self, object where, *,
|
390 |
+
file_version=None,
|
391 |
+
batch_size=None,
|
392 |
+
stripe_size=None,
|
393 |
+
compression=None,
|
394 |
+
compression_block_size=None,
|
395 |
+
compression_strategy=None,
|
396 |
+
row_index_stride=None,
|
397 |
+
padding_tolerance=None,
|
398 |
+
dictionary_key_size_threshold=None,
|
399 |
+
bloom_filter_columns=None,
|
400 |
+
bloom_filter_fpp=None):
|
401 |
+
cdef:
|
402 |
+
shared_ptr[WriteOptions] write_options
|
403 |
+
c_string c_where
|
404 |
+
try:
|
405 |
+
where = _stringify_path(where)
|
406 |
+
except TypeError:
|
407 |
+
get_writer(where, &self.sink)
|
408 |
+
self.own_sink = False
|
409 |
+
else:
|
410 |
+
c_where = tobytes(where)
|
411 |
+
with nogil:
|
412 |
+
self.sink = GetResultValue(FileOutputStream.Open(c_where))
|
413 |
+
self.own_sink = True
|
414 |
+
|
415 |
+
write_options = _create_write_options(
|
416 |
+
file_version=file_version,
|
417 |
+
batch_size=batch_size,
|
418 |
+
stripe_size=stripe_size,
|
419 |
+
compression=compression,
|
420 |
+
compression_block_size=compression_block_size,
|
421 |
+
compression_strategy=compression_strategy,
|
422 |
+
row_index_stride=row_index_stride,
|
423 |
+
padding_tolerance=padding_tolerance,
|
424 |
+
dictionary_key_size_threshold=dictionary_key_size_threshold,
|
425 |
+
bloom_filter_columns=bloom_filter_columns,
|
426 |
+
bloom_filter_fpp=bloom_filter_fpp
|
427 |
+
)
|
428 |
+
|
429 |
+
with nogil:
|
430 |
+
self.writer = move(GetResultValue(
|
431 |
+
ORCFileWriter.Open(self.sink.get(),
|
432 |
+
deref(write_options))))
|
433 |
+
|
434 |
+
def write(self, Table table):
|
435 |
+
cdef:
|
436 |
+
shared_ptr[CTable] sp_table
|
437 |
+
sp_table = pyarrow_unwrap_table(table)
|
438 |
+
with nogil:
|
439 |
+
check_status(deref(self.writer).Write(deref(sp_table)))
|
440 |
+
|
441 |
+
def close(self):
|
442 |
+
with nogil:
|
443 |
+
check_status(deref(self.writer).Close())
|
444 |
+
if self.own_sink:
|
445 |
+
check_status(deref(self.sink).Close())
|
env-llmeval/lib/python3.10/site-packages/pyarrow/_parquet.pyx
ADDED
@@ -0,0 +1,2195 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
# cython: profile=False
|
19 |
+
# distutils: language = c++
|
20 |
+
|
21 |
+
from collections.abc import Sequence
|
22 |
+
from textwrap import indent
|
23 |
+
import warnings
|
24 |
+
|
25 |
+
from cython.operator cimport dereference as deref
|
26 |
+
from pyarrow.includes.common cimport *
|
27 |
+
from pyarrow.includes.libarrow cimport *
|
28 |
+
from pyarrow.includes.libarrow_python cimport *
|
29 |
+
from pyarrow.lib cimport (_Weakrefable, Buffer, Schema,
|
30 |
+
check_status,
|
31 |
+
MemoryPool, maybe_unbox_memory_pool,
|
32 |
+
Table, NativeFile,
|
33 |
+
pyarrow_wrap_chunked_array,
|
34 |
+
pyarrow_wrap_schema,
|
35 |
+
pyarrow_unwrap_schema,
|
36 |
+
pyarrow_wrap_table,
|
37 |
+
pyarrow_wrap_batch,
|
38 |
+
pyarrow_wrap_scalar,
|
39 |
+
NativeFile, get_reader, get_writer,
|
40 |
+
string_to_timeunit)
|
41 |
+
|
42 |
+
from pyarrow.lib import (ArrowException, NativeFile, BufferOutputStream,
|
43 |
+
_stringify_path,
|
44 |
+
tobytes, frombytes)
|
45 |
+
|
46 |
+
cimport cpython as cp
|
47 |
+
|
48 |
+
_DEFAULT_ROW_GROUP_SIZE = 1024*1024
|
49 |
+
_MAX_ROW_GROUP_SIZE = 64*1024*1024
|
50 |
+
|
51 |
+
cdef class Statistics(_Weakrefable):
|
52 |
+
"""Statistics for a single column in a single row group."""
|
53 |
+
|
54 |
+
def __cinit__(self):
|
55 |
+
pass
|
56 |
+
|
57 |
+
def __repr__(self):
|
58 |
+
return """{}
|
59 |
+
has_min_max: {}
|
60 |
+
min: {}
|
61 |
+
max: {}
|
62 |
+
null_count: {}
|
63 |
+
distinct_count: {}
|
64 |
+
num_values: {}
|
65 |
+
physical_type: {}
|
66 |
+
logical_type: {}
|
67 |
+
converted_type (legacy): {}""".format(object.__repr__(self),
|
68 |
+
self.has_min_max,
|
69 |
+
self.min,
|
70 |
+
self.max,
|
71 |
+
self.null_count,
|
72 |
+
self.distinct_count,
|
73 |
+
self.num_values,
|
74 |
+
self.physical_type,
|
75 |
+
str(self.logical_type),
|
76 |
+
self.converted_type)
|
77 |
+
|
78 |
+
def to_dict(self):
|
79 |
+
"""
|
80 |
+
Get dictionary representation of statistics.
|
81 |
+
|
82 |
+
Returns
|
83 |
+
-------
|
84 |
+
dict
|
85 |
+
Dictionary with a key for each attribute of this class.
|
86 |
+
"""
|
87 |
+
d = dict(
|
88 |
+
has_min_max=self.has_min_max,
|
89 |
+
min=self.min,
|
90 |
+
max=self.max,
|
91 |
+
null_count=self.null_count,
|
92 |
+
distinct_count=self.distinct_count,
|
93 |
+
num_values=self.num_values,
|
94 |
+
physical_type=self.physical_type
|
95 |
+
)
|
96 |
+
return d
|
97 |
+
|
98 |
+
def __eq__(self, other):
|
99 |
+
try:
|
100 |
+
return self.equals(other)
|
101 |
+
except TypeError:
|
102 |
+
return NotImplemented
|
103 |
+
|
104 |
+
def equals(self, Statistics other):
|
105 |
+
"""
|
106 |
+
Return whether the two column statistics objects are equal.
|
107 |
+
|
108 |
+
Parameters
|
109 |
+
----------
|
110 |
+
other : Statistics
|
111 |
+
Statistics to compare against.
|
112 |
+
|
113 |
+
Returns
|
114 |
+
-------
|
115 |
+
are_equal : bool
|
116 |
+
"""
|
117 |
+
return self.statistics.get().Equals(deref(other.statistics.get()))
|
118 |
+
|
119 |
+
@property
|
120 |
+
def has_min_max(self):
|
121 |
+
"""Whether min and max are present (bool)."""
|
122 |
+
return self.statistics.get().HasMinMax()
|
123 |
+
|
124 |
+
@property
|
125 |
+
def has_null_count(self):
|
126 |
+
"""Whether null count is present (bool)."""
|
127 |
+
return self.statistics.get().HasNullCount()
|
128 |
+
|
129 |
+
@property
|
130 |
+
def has_distinct_count(self):
|
131 |
+
"""Whether distinct count is preset (bool)."""
|
132 |
+
return self.statistics.get().HasDistinctCount()
|
133 |
+
|
134 |
+
@property
|
135 |
+
def min_raw(self):
|
136 |
+
"""Min value as physical type (bool, int, float, or bytes)."""
|
137 |
+
if self.has_min_max:
|
138 |
+
return _cast_statistic_raw_min(self.statistics.get())
|
139 |
+
else:
|
140 |
+
return None
|
141 |
+
|
142 |
+
@property
|
143 |
+
def max_raw(self):
|
144 |
+
"""Max value as physical type (bool, int, float, or bytes)."""
|
145 |
+
if self.has_min_max:
|
146 |
+
return _cast_statistic_raw_max(self.statistics.get())
|
147 |
+
else:
|
148 |
+
return None
|
149 |
+
|
150 |
+
@property
|
151 |
+
def min(self):
|
152 |
+
"""
|
153 |
+
Min value as logical type.
|
154 |
+
|
155 |
+
Returned as the Python equivalent of logical type, such as datetime.date
|
156 |
+
for dates and decimal.Decimal for decimals.
|
157 |
+
"""
|
158 |
+
if self.has_min_max:
|
159 |
+
min_scalar, _ = _cast_statistics(self.statistics.get())
|
160 |
+
return min_scalar.as_py()
|
161 |
+
else:
|
162 |
+
return None
|
163 |
+
|
164 |
+
@property
|
165 |
+
def max(self):
|
166 |
+
"""
|
167 |
+
Max value as logical type.
|
168 |
+
|
169 |
+
Returned as the Python equivalent of logical type, such as datetime.date
|
170 |
+
for dates and decimal.Decimal for decimals.
|
171 |
+
"""
|
172 |
+
if self.has_min_max:
|
173 |
+
_, max_scalar = _cast_statistics(self.statistics.get())
|
174 |
+
return max_scalar.as_py()
|
175 |
+
else:
|
176 |
+
return None
|
177 |
+
|
178 |
+
@property
|
179 |
+
def null_count(self):
|
180 |
+
"""Number of null values in chunk (int)."""
|
181 |
+
if self.has_null_count:
|
182 |
+
return self.statistics.get().null_count()
|
183 |
+
else:
|
184 |
+
return None
|
185 |
+
|
186 |
+
@property
|
187 |
+
def distinct_count(self):
|
188 |
+
"""Distinct number of values in chunk (int)."""
|
189 |
+
if self.has_distinct_count:
|
190 |
+
return self.statistics.get().distinct_count()
|
191 |
+
else:
|
192 |
+
return None
|
193 |
+
|
194 |
+
@property
|
195 |
+
def num_values(self):
|
196 |
+
"""Number of non-null values (int)."""
|
197 |
+
return self.statistics.get().num_values()
|
198 |
+
|
199 |
+
@property
|
200 |
+
def physical_type(self):
|
201 |
+
"""Physical type of column (str)."""
|
202 |
+
raw_physical_type = self.statistics.get().physical_type()
|
203 |
+
return physical_type_name_from_enum(raw_physical_type)
|
204 |
+
|
205 |
+
@property
|
206 |
+
def logical_type(self):
|
207 |
+
"""Logical type of column (:class:`ParquetLogicalType`)."""
|
208 |
+
return wrap_logical_type(self.statistics.get().descr().logical_type())
|
209 |
+
|
210 |
+
@property
|
211 |
+
def converted_type(self):
|
212 |
+
"""Legacy converted type (str or None)."""
|
213 |
+
raw_converted_type = self.statistics.get().descr().converted_type()
|
214 |
+
return converted_type_name_from_enum(raw_converted_type)
|
215 |
+
|
216 |
+
|
217 |
+
cdef class ParquetLogicalType(_Weakrefable):
|
218 |
+
"""Logical type of parquet type."""
|
219 |
+
cdef:
|
220 |
+
shared_ptr[const CParquetLogicalType] type
|
221 |
+
|
222 |
+
def __cinit__(self):
|
223 |
+
pass
|
224 |
+
|
225 |
+
cdef init(self, const shared_ptr[const CParquetLogicalType]& type):
|
226 |
+
self.type = type
|
227 |
+
|
228 |
+
def __repr__(self):
|
229 |
+
return "{}\n {}".format(object.__repr__(self), str(self))
|
230 |
+
|
231 |
+
def __str__(self):
|
232 |
+
return frombytes(self.type.get().ToString(), safe=True)
|
233 |
+
|
234 |
+
def to_json(self):
|
235 |
+
"""
|
236 |
+
Get a JSON string containing type and type parameters.
|
237 |
+
|
238 |
+
Returns
|
239 |
+
-------
|
240 |
+
json : str
|
241 |
+
JSON representation of type, with at least a field called 'Type'
|
242 |
+
which contains the type name. If the type is parameterized, such
|
243 |
+
as a decimal with scale and precision, will contain those as fields
|
244 |
+
as well.
|
245 |
+
"""
|
246 |
+
return frombytes(self.type.get().ToJSON())
|
247 |
+
|
248 |
+
@property
|
249 |
+
def type(self):
|
250 |
+
"""Name of the logical type (str)."""
|
251 |
+
return logical_type_name_from_enum(self.type.get().type())
|
252 |
+
|
253 |
+
|
254 |
+
cdef wrap_logical_type(const shared_ptr[const CParquetLogicalType]& type):
|
255 |
+
cdef ParquetLogicalType out = ParquetLogicalType()
|
256 |
+
out.init(type)
|
257 |
+
return out
|
258 |
+
|
259 |
+
|
260 |
+
cdef _cast_statistic_raw_min(CStatistics* statistics):
|
261 |
+
cdef ParquetType physical_type = statistics.physical_type()
|
262 |
+
cdef uint32_t type_length = statistics.descr().type_length()
|
263 |
+
if physical_type == ParquetType_BOOLEAN:
|
264 |
+
return (<CBoolStatistics*> statistics).min()
|
265 |
+
elif physical_type == ParquetType_INT32:
|
266 |
+
return (<CInt32Statistics*> statistics).min()
|
267 |
+
elif physical_type == ParquetType_INT64:
|
268 |
+
return (<CInt64Statistics*> statistics).min()
|
269 |
+
elif physical_type == ParquetType_FLOAT:
|
270 |
+
return (<CFloatStatistics*> statistics).min()
|
271 |
+
elif physical_type == ParquetType_DOUBLE:
|
272 |
+
return (<CDoubleStatistics*> statistics).min()
|
273 |
+
elif physical_type == ParquetType_BYTE_ARRAY:
|
274 |
+
return _box_byte_array((<CByteArrayStatistics*> statistics).min())
|
275 |
+
elif physical_type == ParquetType_FIXED_LEN_BYTE_ARRAY:
|
276 |
+
return _box_flba((<CFLBAStatistics*> statistics).min(), type_length)
|
277 |
+
|
278 |
+
|
279 |
+
cdef _cast_statistic_raw_max(CStatistics* statistics):
|
280 |
+
cdef ParquetType physical_type = statistics.physical_type()
|
281 |
+
cdef uint32_t type_length = statistics.descr().type_length()
|
282 |
+
if physical_type == ParquetType_BOOLEAN:
|
283 |
+
return (<CBoolStatistics*> statistics).max()
|
284 |
+
elif physical_type == ParquetType_INT32:
|
285 |
+
return (<CInt32Statistics*> statistics).max()
|
286 |
+
elif physical_type == ParquetType_INT64:
|
287 |
+
return (<CInt64Statistics*> statistics).max()
|
288 |
+
elif physical_type == ParquetType_FLOAT:
|
289 |
+
return (<CFloatStatistics*> statistics).max()
|
290 |
+
elif physical_type == ParquetType_DOUBLE:
|
291 |
+
return (<CDoubleStatistics*> statistics).max()
|
292 |
+
elif physical_type == ParquetType_BYTE_ARRAY:
|
293 |
+
return _box_byte_array((<CByteArrayStatistics*> statistics).max())
|
294 |
+
elif physical_type == ParquetType_FIXED_LEN_BYTE_ARRAY:
|
295 |
+
return _box_flba((<CFLBAStatistics*> statistics).max(), type_length)
|
296 |
+
|
297 |
+
|
298 |
+
cdef _cast_statistics(CStatistics* statistics):
|
299 |
+
cdef:
|
300 |
+
shared_ptr[CScalar] c_min
|
301 |
+
shared_ptr[CScalar] c_max
|
302 |
+
check_status(StatisticsAsScalars(statistics[0], &c_min, &c_max))
|
303 |
+
return (pyarrow_wrap_scalar(c_min), pyarrow_wrap_scalar(c_max))
|
304 |
+
|
305 |
+
|
306 |
+
cdef _box_byte_array(ParquetByteArray val):
|
307 |
+
return cp.PyBytes_FromStringAndSize(<char*> val.ptr, <Py_ssize_t> val.len)
|
308 |
+
|
309 |
+
|
310 |
+
cdef _box_flba(ParquetFLBA val, uint32_t len):
|
311 |
+
return cp.PyBytes_FromStringAndSize(<char*> val.ptr, <Py_ssize_t> len)
|
312 |
+
|
313 |
+
|
314 |
+
cdef class ColumnChunkMetaData(_Weakrefable):
|
315 |
+
"""Column metadata for a single row group."""
|
316 |
+
|
317 |
+
def __cinit__(self):
|
318 |
+
pass
|
319 |
+
|
320 |
+
def __repr__(self):
|
321 |
+
statistics = indent(repr(self.statistics), 4 * ' ')
|
322 |
+
return """{0}
|
323 |
+
file_offset: {1}
|
324 |
+
file_path: {2}
|
325 |
+
physical_type: {3}
|
326 |
+
num_values: {4}
|
327 |
+
path_in_schema: {5}
|
328 |
+
is_stats_set: {6}
|
329 |
+
statistics:
|
330 |
+
{7}
|
331 |
+
compression: {8}
|
332 |
+
encodings: {9}
|
333 |
+
has_dictionary_page: {10}
|
334 |
+
dictionary_page_offset: {11}
|
335 |
+
data_page_offset: {12}
|
336 |
+
total_compressed_size: {13}
|
337 |
+
total_uncompressed_size: {14}""".format(object.__repr__(self),
|
338 |
+
self.file_offset,
|
339 |
+
self.file_path,
|
340 |
+
self.physical_type,
|
341 |
+
self.num_values,
|
342 |
+
self.path_in_schema,
|
343 |
+
self.is_stats_set,
|
344 |
+
statistics,
|
345 |
+
self.compression,
|
346 |
+
self.encodings,
|
347 |
+
self.has_dictionary_page,
|
348 |
+
self.dictionary_page_offset,
|
349 |
+
self.data_page_offset,
|
350 |
+
self.total_compressed_size,
|
351 |
+
self.total_uncompressed_size)
|
352 |
+
|
353 |
+
def to_dict(self):
|
354 |
+
"""
|
355 |
+
Get dictionary representation of the column chunk metadata.
|
356 |
+
|
357 |
+
Returns
|
358 |
+
-------
|
359 |
+
dict
|
360 |
+
Dictionary with a key for each attribute of this class.
|
361 |
+
"""
|
362 |
+
statistics = self.statistics.to_dict() if self.is_stats_set else None
|
363 |
+
d = dict(
|
364 |
+
file_offset=self.file_offset,
|
365 |
+
file_path=self.file_path,
|
366 |
+
physical_type=self.physical_type,
|
367 |
+
num_values=self.num_values,
|
368 |
+
path_in_schema=self.path_in_schema,
|
369 |
+
is_stats_set=self.is_stats_set,
|
370 |
+
statistics=statistics,
|
371 |
+
compression=self.compression,
|
372 |
+
encodings=self.encodings,
|
373 |
+
has_dictionary_page=self.has_dictionary_page,
|
374 |
+
dictionary_page_offset=self.dictionary_page_offset,
|
375 |
+
data_page_offset=self.data_page_offset,
|
376 |
+
total_compressed_size=self.total_compressed_size,
|
377 |
+
total_uncompressed_size=self.total_uncompressed_size
|
378 |
+
)
|
379 |
+
return d
|
380 |
+
|
381 |
+
def __eq__(self, other):
|
382 |
+
try:
|
383 |
+
return self.equals(other)
|
384 |
+
except TypeError:
|
385 |
+
return NotImplemented
|
386 |
+
|
387 |
+
def equals(self, ColumnChunkMetaData other):
|
388 |
+
"""
|
389 |
+
Return whether the two column chunk metadata objects are equal.
|
390 |
+
|
391 |
+
Parameters
|
392 |
+
----------
|
393 |
+
other : ColumnChunkMetaData
|
394 |
+
Metadata to compare against.
|
395 |
+
|
396 |
+
Returns
|
397 |
+
-------
|
398 |
+
are_equal : bool
|
399 |
+
"""
|
400 |
+
return self.metadata.Equals(deref(other.metadata))
|
401 |
+
|
402 |
+
@property
|
403 |
+
def file_offset(self):
|
404 |
+
"""Offset into file where column chunk is located (int)."""
|
405 |
+
return self.metadata.file_offset()
|
406 |
+
|
407 |
+
@property
|
408 |
+
def file_path(self):
|
409 |
+
"""Optional file path if set (str or None)."""
|
410 |
+
return frombytes(self.metadata.file_path())
|
411 |
+
|
412 |
+
@property
|
413 |
+
def physical_type(self):
|
414 |
+
"""Physical type of column (str)."""
|
415 |
+
return physical_type_name_from_enum(self.metadata.type())
|
416 |
+
|
417 |
+
@property
|
418 |
+
def num_values(self):
|
419 |
+
"""Total number of values (int)."""
|
420 |
+
return self.metadata.num_values()
|
421 |
+
|
422 |
+
@property
|
423 |
+
def path_in_schema(self):
|
424 |
+
"""Nested path to field, separated by periods (str)."""
|
425 |
+
path = self.metadata.path_in_schema().get().ToDotString()
|
426 |
+
return frombytes(path)
|
427 |
+
|
428 |
+
@property
|
429 |
+
def is_stats_set(self):
|
430 |
+
"""Whether or not statistics are present in metadata (bool)."""
|
431 |
+
return self.metadata.is_stats_set()
|
432 |
+
|
433 |
+
@property
|
434 |
+
def statistics(self):
|
435 |
+
"""Statistics for column chunk (:class:`Statistics`)."""
|
436 |
+
if not self.metadata.is_stats_set():
|
437 |
+
return None
|
438 |
+
statistics = Statistics()
|
439 |
+
statistics.init(self.metadata.statistics(), self)
|
440 |
+
return statistics
|
441 |
+
|
442 |
+
@property
|
443 |
+
def compression(self):
|
444 |
+
"""
|
445 |
+
Type of compression used for column (str).
|
446 |
+
|
447 |
+
One of 'UNCOMPRESSED', 'SNAPPY', 'GZIP', 'LZO', 'BROTLI', 'LZ4', 'ZSTD',
|
448 |
+
or 'UNKNOWN'.
|
449 |
+
"""
|
450 |
+
return compression_name_from_enum(self.metadata.compression())
|
451 |
+
|
452 |
+
@property
|
453 |
+
def encodings(self):
|
454 |
+
"""
|
455 |
+
Encodings used for column (tuple of str).
|
456 |
+
|
457 |
+
One of 'PLAIN', 'BIT_PACKED', 'RLE', 'BYTE_STREAM_SPLIT', 'DELTA_BINARY_PACKED',
|
458 |
+
'DELTA_LENGTH_BYTE_ARRAY', 'DELTA_BYTE_ARRAY'.
|
459 |
+
"""
|
460 |
+
return tuple(map(encoding_name_from_enum, self.metadata.encodings()))
|
461 |
+
|
462 |
+
@property
|
463 |
+
def has_dictionary_page(self):
|
464 |
+
"""Whether there is dictionary data present in the column chunk (bool)."""
|
465 |
+
return bool(self.metadata.has_dictionary_page())
|
466 |
+
|
467 |
+
@property
|
468 |
+
def dictionary_page_offset(self):
|
469 |
+
"""Offset of dictionary page relative to column chunk offset (int)."""
|
470 |
+
if self.has_dictionary_page:
|
471 |
+
return self.metadata.dictionary_page_offset()
|
472 |
+
else:
|
473 |
+
return None
|
474 |
+
|
475 |
+
@property
|
476 |
+
def data_page_offset(self):
|
477 |
+
"""Offset of data page relative to column chunk offset (int)."""
|
478 |
+
return self.metadata.data_page_offset()
|
479 |
+
|
480 |
+
@property
|
481 |
+
def has_index_page(self):
|
482 |
+
"""Not yet supported."""
|
483 |
+
raise NotImplementedError('not supported in parquet-cpp')
|
484 |
+
|
485 |
+
@property
|
486 |
+
def index_page_offset(self):
|
487 |
+
"""Not yet supported."""
|
488 |
+
raise NotImplementedError("parquet-cpp doesn't return valid values")
|
489 |
+
|
490 |
+
@property
|
491 |
+
def total_compressed_size(self):
|
492 |
+
"""Compressed size in bytes (int)."""
|
493 |
+
return self.metadata.total_compressed_size()
|
494 |
+
|
495 |
+
@property
|
496 |
+
def total_uncompressed_size(self):
|
497 |
+
"""Uncompressed size in bytes (int)."""
|
498 |
+
return self.metadata.total_uncompressed_size()
|
499 |
+
|
500 |
+
@property
|
501 |
+
def has_offset_index(self):
|
502 |
+
"""Whether the column chunk has an offset index"""
|
503 |
+
return self.metadata.GetOffsetIndexLocation().has_value()
|
504 |
+
|
505 |
+
@property
|
506 |
+
def has_column_index(self):
|
507 |
+
"""Whether the column chunk has a column index"""
|
508 |
+
return self.metadata.GetColumnIndexLocation().has_value()
|
509 |
+
|
510 |
+
|
511 |
+
cdef class SortingColumn:
|
512 |
+
"""
|
513 |
+
Sorting specification for a single column.
|
514 |
+
|
515 |
+
Returned by :meth:`RowGroupMetaData.sorting_columns` and used in
|
516 |
+
:class:`ParquetWriter` to specify the sort order of the data.
|
517 |
+
|
518 |
+
Parameters
|
519 |
+
----------
|
520 |
+
column_index : int
|
521 |
+
Index of column that data is sorted by.
|
522 |
+
descending : bool, default False
|
523 |
+
Whether column is sorted in descending order.
|
524 |
+
nulls_first : bool, default False
|
525 |
+
Whether null values appear before valid values.
|
526 |
+
|
527 |
+
Notes
|
528 |
+
-----
|
529 |
+
|
530 |
+
Column indices are zero-based, refer only to leaf fields, and are in
|
531 |
+
depth-first order. This may make the column indices for nested schemas
|
532 |
+
different from what you expect. In most cases, it will be easier to
|
533 |
+
specify the sort order using column names instead of column indices
|
534 |
+
and converting using the ``from_ordering`` method.
|
535 |
+
|
536 |
+
Examples
|
537 |
+
--------
|
538 |
+
|
539 |
+
In other APIs, sort order is specified by names, such as:
|
540 |
+
|
541 |
+
>>> sort_order = [('id', 'ascending'), ('timestamp', 'descending')]
|
542 |
+
|
543 |
+
For Parquet, the column index must be used instead:
|
544 |
+
|
545 |
+
>>> import pyarrow.parquet as pq
|
546 |
+
>>> [pq.SortingColumn(0), pq.SortingColumn(1, descending=True)]
|
547 |
+
[SortingColumn(column_index=0, descending=False, nulls_first=False), SortingColumn(column_index=1, descending=True, nulls_first=False)]
|
548 |
+
|
549 |
+
Convert the sort_order into the list of sorting columns with
|
550 |
+
``from_ordering`` (note that the schema must be provided as well):
|
551 |
+
|
552 |
+
>>> import pyarrow as pa
|
553 |
+
>>> schema = pa.schema([('id', pa.int64()), ('timestamp', pa.timestamp('ms'))])
|
554 |
+
>>> sorting_columns = pq.SortingColumn.from_ordering(schema, sort_order)
|
555 |
+
>>> sorting_columns
|
556 |
+
(SortingColumn(column_index=0, descending=False, nulls_first=False), SortingColumn(column_index=1, descending=True, nulls_first=False))
|
557 |
+
|
558 |
+
Convert back to the sort order with ``to_ordering``:
|
559 |
+
|
560 |
+
>>> pq.SortingColumn.to_ordering(schema, sorting_columns)
|
561 |
+
((('id', 'ascending'), ('timestamp', 'descending')), 'at_end')
|
562 |
+
|
563 |
+
See Also
|
564 |
+
--------
|
565 |
+
RowGroupMetaData.sorting_columns
|
566 |
+
"""
|
567 |
+
cdef int column_index
|
568 |
+
cdef c_bool descending
|
569 |
+
cdef c_bool nulls_first
|
570 |
+
|
571 |
+
def __init__(self, int column_index, c_bool descending=False, c_bool nulls_first=False):
|
572 |
+
self.column_index = column_index
|
573 |
+
self.descending = descending
|
574 |
+
self.nulls_first = nulls_first
|
575 |
+
|
576 |
+
@classmethod
|
577 |
+
def from_ordering(cls, Schema schema, sort_keys, null_placement='at_end'):
|
578 |
+
"""
|
579 |
+
Create a tuple of SortingColumn objects from the same arguments as
|
580 |
+
:class:`pyarrow.compute.SortOptions`.
|
581 |
+
|
582 |
+
Parameters
|
583 |
+
----------
|
584 |
+
schema : Schema
|
585 |
+
Schema of the input data.
|
586 |
+
sort_keys : Sequence of (name, order) tuples
|
587 |
+
Names of field/column keys (str) to sort the input on,
|
588 |
+
along with the order each field/column is sorted in.
|
589 |
+
Accepted values for `order` are "ascending", "descending".
|
590 |
+
null_placement : {'at_start', 'at_end'}, default 'at_end'
|
591 |
+
Where null values should appear in the sort order.
|
592 |
+
|
593 |
+
Returns
|
594 |
+
-------
|
595 |
+
sorting_columns : tuple of SortingColumn
|
596 |
+
"""
|
597 |
+
if null_placement == 'at_start':
|
598 |
+
nulls_first = True
|
599 |
+
elif null_placement == 'at_end':
|
600 |
+
nulls_first = False
|
601 |
+
else:
|
602 |
+
raise ValueError('null_placement must be "at_start" or "at_end"')
|
603 |
+
|
604 |
+
col_map = _name_to_index_map(schema)
|
605 |
+
|
606 |
+
sorting_columns = []
|
607 |
+
|
608 |
+
for sort_key in sort_keys:
|
609 |
+
if isinstance(sort_key, str):
|
610 |
+
name = sort_key
|
611 |
+
descending = False
|
612 |
+
elif (isinstance(sort_key, tuple) and len(sort_key) == 2 and
|
613 |
+
isinstance(sort_key[0], str) and
|
614 |
+
isinstance(sort_key[1], str)):
|
615 |
+
name, descending = sort_key
|
616 |
+
if descending == "descending":
|
617 |
+
descending = True
|
618 |
+
elif descending == "ascending":
|
619 |
+
descending = False
|
620 |
+
else:
|
621 |
+
raise ValueError("Invalid sort key direction: {0}"
|
622 |
+
.format(descending))
|
623 |
+
else:
|
624 |
+
raise ValueError("Invalid sort key: {0}".format(sort_key))
|
625 |
+
|
626 |
+
try:
|
627 |
+
column_index = col_map[name]
|
628 |
+
except KeyError:
|
629 |
+
raise ValueError("Sort key name '{0}' not found in schema:\n{1}"
|
630 |
+
.format(name, schema))
|
631 |
+
|
632 |
+
sorting_columns.append(
|
633 |
+
cls(column_index, descending=descending, nulls_first=nulls_first)
|
634 |
+
)
|
635 |
+
|
636 |
+
return tuple(sorting_columns)
|
637 |
+
|
638 |
+
@staticmethod
|
639 |
+
def to_ordering(Schema schema, sorting_columns):
|
640 |
+
"""
|
641 |
+
Convert a tuple of SortingColumn objects to the same format as
|
642 |
+
:class:`pyarrow.compute.SortOptions`.
|
643 |
+
|
644 |
+
Parameters
|
645 |
+
----------
|
646 |
+
schema : Schema
|
647 |
+
Schema of the input data.
|
648 |
+
sorting_columns : tuple of SortingColumn
|
649 |
+
Columns to sort the input on.
|
650 |
+
|
651 |
+
Returns
|
652 |
+
-------
|
653 |
+
sort_keys : tuple of (name, order) tuples
|
654 |
+
null_placement : {'at_start', 'at_end'}
|
655 |
+
"""
|
656 |
+
col_map = {i: name for name, i in _name_to_index_map(schema).items()}
|
657 |
+
|
658 |
+
sort_keys = []
|
659 |
+
nulls_first = None
|
660 |
+
|
661 |
+
for sorting_column in sorting_columns:
|
662 |
+
name = col_map[sorting_column.column_index]
|
663 |
+
if sorting_column.descending:
|
664 |
+
order = "descending"
|
665 |
+
else:
|
666 |
+
order = "ascending"
|
667 |
+
sort_keys.append((name, order))
|
668 |
+
if nulls_first is None:
|
669 |
+
nulls_first = sorting_column.nulls_first
|
670 |
+
elif nulls_first != sorting_column.nulls_first:
|
671 |
+
raise ValueError("Sorting columns have inconsistent null placement")
|
672 |
+
|
673 |
+
if nulls_first:
|
674 |
+
null_placement = "at_start"
|
675 |
+
else:
|
676 |
+
null_placement = "at_end"
|
677 |
+
|
678 |
+
return tuple(sort_keys), null_placement
|
679 |
+
|
680 |
+
def __repr__(self):
|
681 |
+
return """{}(column_index={}, descending={}, nulls_first={})""".format(
|
682 |
+
self.__class__.__name__,
|
683 |
+
self.column_index, self.descending, self.nulls_first)
|
684 |
+
|
685 |
+
def __eq__(self, SortingColumn other):
|
686 |
+
return (self.column_index == other.column_index and
|
687 |
+
self.descending == other.descending and
|
688 |
+
self.nulls_first == other.nulls_first)
|
689 |
+
|
690 |
+
def __hash__(self):
|
691 |
+
return hash((self.column_index, self.descending, self.nulls_first))
|
692 |
+
|
693 |
+
@property
|
694 |
+
def column_index(self):
|
695 |
+
""""Index of column data is sorted by (int)."""
|
696 |
+
return self.column_index
|
697 |
+
|
698 |
+
@property
|
699 |
+
def descending(self):
|
700 |
+
"""Whether column is sorted in descending order (bool)."""
|
701 |
+
return self.descending
|
702 |
+
|
703 |
+
@property
|
704 |
+
def nulls_first(self):
|
705 |
+
"""Whether null values appear before valid values (bool)."""
|
706 |
+
return self.nulls_first
|
707 |
+
|
708 |
+
|
709 |
+
cdef class RowGroupMetaData(_Weakrefable):
|
710 |
+
"""Metadata for a single row group."""
|
711 |
+
|
712 |
+
def __cinit__(self, FileMetaData parent, int index):
|
713 |
+
if index < 0 or index >= parent.num_row_groups:
|
714 |
+
raise IndexError('{0} out of bounds'.format(index))
|
715 |
+
self.up_metadata = parent._metadata.RowGroup(index)
|
716 |
+
self.metadata = self.up_metadata.get()
|
717 |
+
self.parent = parent
|
718 |
+
self.index = index
|
719 |
+
|
720 |
+
def __reduce__(self):
|
721 |
+
return RowGroupMetaData, (self.parent, self.index)
|
722 |
+
|
723 |
+
def __eq__(self, other):
|
724 |
+
try:
|
725 |
+
return self.equals(other)
|
726 |
+
except TypeError:
|
727 |
+
return NotImplemented
|
728 |
+
|
729 |
+
def equals(self, RowGroupMetaData other):
|
730 |
+
"""
|
731 |
+
Return whether the two row group metadata objects are equal.
|
732 |
+
|
733 |
+
Parameters
|
734 |
+
----------
|
735 |
+
other : RowGroupMetaData
|
736 |
+
Metadata to compare against.
|
737 |
+
|
738 |
+
Returns
|
739 |
+
-------
|
740 |
+
are_equal : bool
|
741 |
+
"""
|
742 |
+
return self.metadata.Equals(deref(other.metadata))
|
743 |
+
|
744 |
+
def column(self, int i):
|
745 |
+
"""
|
746 |
+
Get column metadata at given index.
|
747 |
+
|
748 |
+
Parameters
|
749 |
+
----------
|
750 |
+
i : int
|
751 |
+
Index of column to get metadata for.
|
752 |
+
|
753 |
+
Returns
|
754 |
+
-------
|
755 |
+
ColumnChunkMetaData
|
756 |
+
Metadata for column within this chunk.
|
757 |
+
"""
|
758 |
+
if i < 0 or i >= self.num_columns:
|
759 |
+
raise IndexError('{0} out of bounds'.format(i))
|
760 |
+
chunk = ColumnChunkMetaData()
|
761 |
+
chunk.init(self, i)
|
762 |
+
return chunk
|
763 |
+
|
764 |
+
def __repr__(self):
|
765 |
+
return """{0}
|
766 |
+
num_columns: {1}
|
767 |
+
num_rows: {2}
|
768 |
+
total_byte_size: {3}
|
769 |
+
sorting_columns: {4}""".format(object.__repr__(self),
|
770 |
+
self.num_columns,
|
771 |
+
self.num_rows,
|
772 |
+
self.total_byte_size,
|
773 |
+
self.sorting_columns)
|
774 |
+
|
775 |
+
def to_dict(self):
|
776 |
+
"""
|
777 |
+
Get dictionary representation of the row group metadata.
|
778 |
+
|
779 |
+
Returns
|
780 |
+
-------
|
781 |
+
dict
|
782 |
+
Dictionary with a key for each attribute of this class.
|
783 |
+
"""
|
784 |
+
columns = []
|
785 |
+
d = dict(
|
786 |
+
num_columns=self.num_columns,
|
787 |
+
num_rows=self.num_rows,
|
788 |
+
total_byte_size=self.total_byte_size,
|
789 |
+
columns=columns,
|
790 |
+
sorting_columns=[col.to_dict() for col in self.sorting_columns]
|
791 |
+
)
|
792 |
+
for i in range(self.num_columns):
|
793 |
+
columns.append(self.column(i).to_dict())
|
794 |
+
return d
|
795 |
+
|
796 |
+
@property
|
797 |
+
def num_columns(self):
|
798 |
+
"""Number of columns in this row group (int)."""
|
799 |
+
return self.metadata.num_columns()
|
800 |
+
|
801 |
+
@property
|
802 |
+
def num_rows(self):
|
803 |
+
"""Number of rows in this row group (int)."""
|
804 |
+
return self.metadata.num_rows()
|
805 |
+
|
806 |
+
@property
|
807 |
+
def total_byte_size(self):
|
808 |
+
"""Total byte size of all the uncompressed column data in this row group (int)."""
|
809 |
+
return self.metadata.total_byte_size()
|
810 |
+
|
811 |
+
@property
|
812 |
+
def sorting_columns(self):
|
813 |
+
"""Columns the row group is sorted by (tuple of :class:`SortingColumn`))."""
|
814 |
+
out = []
|
815 |
+
cdef vector[CSortingColumn] sorting_columns = self.metadata.sorting_columns()
|
816 |
+
for sorting_col in sorting_columns:
|
817 |
+
out.append(SortingColumn(
|
818 |
+
sorting_col.column_idx,
|
819 |
+
sorting_col.descending,
|
820 |
+
sorting_col.nulls_first
|
821 |
+
))
|
822 |
+
return tuple(out)
|
823 |
+
|
824 |
+
|
825 |
+
def _reconstruct_filemetadata(Buffer serialized):
|
826 |
+
cdef:
|
827 |
+
FileMetaData metadata = FileMetaData.__new__(FileMetaData)
|
828 |
+
CBuffer *buffer = serialized.buffer.get()
|
829 |
+
uint32_t metadata_len = <uint32_t>buffer.size()
|
830 |
+
|
831 |
+
metadata.init(CFileMetaData_Make(buffer.data(), &metadata_len))
|
832 |
+
|
833 |
+
return metadata
|
834 |
+
|
835 |
+
|
836 |
+
cdef class FileMetaData(_Weakrefable):
|
837 |
+
"""Parquet metadata for a single file."""
|
838 |
+
|
839 |
+
def __cinit__(self):
|
840 |
+
pass
|
841 |
+
|
842 |
+
def __reduce__(self):
|
843 |
+
cdef:
|
844 |
+
NativeFile sink = BufferOutputStream()
|
845 |
+
COutputStream* c_sink = sink.get_output_stream().get()
|
846 |
+
with nogil:
|
847 |
+
self._metadata.WriteTo(c_sink)
|
848 |
+
|
849 |
+
cdef Buffer buffer = sink.getvalue()
|
850 |
+
return _reconstruct_filemetadata, (buffer,)
|
851 |
+
|
852 |
+
def __repr__(self):
|
853 |
+
return """{0}
|
854 |
+
created_by: {1}
|
855 |
+
num_columns: {2}
|
856 |
+
num_rows: {3}
|
857 |
+
num_row_groups: {4}
|
858 |
+
format_version: {5}
|
859 |
+
serialized_size: {6}""".format(object.__repr__(self),
|
860 |
+
self.created_by, self.num_columns,
|
861 |
+
self.num_rows, self.num_row_groups,
|
862 |
+
self.format_version,
|
863 |
+
self.serialized_size)
|
864 |
+
|
865 |
+
def to_dict(self):
|
866 |
+
"""
|
867 |
+
Get dictionary representation of the file metadata.
|
868 |
+
|
869 |
+
Returns
|
870 |
+
-------
|
871 |
+
dict
|
872 |
+
Dictionary with a key for each attribute of this class.
|
873 |
+
"""
|
874 |
+
row_groups = []
|
875 |
+
d = dict(
|
876 |
+
created_by=self.created_by,
|
877 |
+
num_columns=self.num_columns,
|
878 |
+
num_rows=self.num_rows,
|
879 |
+
num_row_groups=self.num_row_groups,
|
880 |
+
row_groups=row_groups,
|
881 |
+
format_version=self.format_version,
|
882 |
+
serialized_size=self.serialized_size
|
883 |
+
)
|
884 |
+
for i in range(self.num_row_groups):
|
885 |
+
row_groups.append(self.row_group(i).to_dict())
|
886 |
+
return d
|
887 |
+
|
888 |
+
def __eq__(self, other):
|
889 |
+
try:
|
890 |
+
return self.equals(other)
|
891 |
+
except TypeError:
|
892 |
+
return NotImplemented
|
893 |
+
|
894 |
+
def equals(self, FileMetaData other not None):
|
895 |
+
"""
|
896 |
+
Return whether the two file metadata objects are equal.
|
897 |
+
|
898 |
+
Parameters
|
899 |
+
----------
|
900 |
+
other : FileMetaData
|
901 |
+
Metadata to compare against.
|
902 |
+
|
903 |
+
Returns
|
904 |
+
-------
|
905 |
+
are_equal : bool
|
906 |
+
"""
|
907 |
+
return self._metadata.Equals(deref(other._metadata))
|
908 |
+
|
909 |
+
@property
|
910 |
+
def schema(self):
|
911 |
+
"""Schema of the file (:class:`ParquetSchema`)."""
|
912 |
+
if self._schema is None:
|
913 |
+
self._schema = ParquetSchema(self)
|
914 |
+
return self._schema
|
915 |
+
|
916 |
+
@property
|
917 |
+
def serialized_size(self):
|
918 |
+
"""Size of the original thrift encoded metadata footer (int)."""
|
919 |
+
return self._metadata.size()
|
920 |
+
|
921 |
+
@property
|
922 |
+
def num_columns(self):
|
923 |
+
"""Number of columns in file (int)."""
|
924 |
+
return self._metadata.num_columns()
|
925 |
+
|
926 |
+
@property
|
927 |
+
def num_rows(self):
|
928 |
+
"""Total number of rows in file (int)."""
|
929 |
+
return self._metadata.num_rows()
|
930 |
+
|
931 |
+
@property
|
932 |
+
def num_row_groups(self):
|
933 |
+
"""Number of row groups in file (int)."""
|
934 |
+
return self._metadata.num_row_groups()
|
935 |
+
|
936 |
+
@property
|
937 |
+
def format_version(self):
|
938 |
+
"""
|
939 |
+
Parquet format version used in file (str, such as '1.0', '2.4').
|
940 |
+
|
941 |
+
If version is missing or unparsable, will default to assuming '2.6'.
|
942 |
+
"""
|
943 |
+
cdef ParquetVersion version = self._metadata.version()
|
944 |
+
if version == ParquetVersion_V1:
|
945 |
+
return '1.0'
|
946 |
+
elif version == ParquetVersion_V2_0:
|
947 |
+
return 'pseudo-2.0'
|
948 |
+
elif version == ParquetVersion_V2_4:
|
949 |
+
return '2.4'
|
950 |
+
elif version == ParquetVersion_V2_6:
|
951 |
+
return '2.6'
|
952 |
+
else:
|
953 |
+
warnings.warn('Unrecognized file version, assuming 2.6: {}'
|
954 |
+
.format(version))
|
955 |
+
return '2.6'
|
956 |
+
|
957 |
+
@property
|
958 |
+
def created_by(self):
|
959 |
+
"""
|
960 |
+
String describing source of the parquet file (str).
|
961 |
+
|
962 |
+
This typically includes library name and version number. For example, Arrow 7.0's
|
963 |
+
writer returns 'parquet-cpp-arrow version 7.0.0'.
|
964 |
+
"""
|
965 |
+
return frombytes(self._metadata.created_by())
|
966 |
+
|
967 |
+
@property
|
968 |
+
def metadata(self):
|
969 |
+
"""Additional metadata as key value pairs (dict[bytes, bytes])."""
|
970 |
+
cdef:
|
971 |
+
unordered_map[c_string, c_string] metadata
|
972 |
+
const CKeyValueMetadata* underlying_metadata
|
973 |
+
underlying_metadata = self._metadata.key_value_metadata().get()
|
974 |
+
if underlying_metadata != NULL:
|
975 |
+
underlying_metadata.ToUnorderedMap(&metadata)
|
976 |
+
return metadata
|
977 |
+
else:
|
978 |
+
return None
|
979 |
+
|
980 |
+
def row_group(self, int i):
|
981 |
+
"""
|
982 |
+
Get metadata for row group at index i.
|
983 |
+
|
984 |
+
Parameters
|
985 |
+
----------
|
986 |
+
i : int
|
987 |
+
Row group index to get.
|
988 |
+
|
989 |
+
Returns
|
990 |
+
-------
|
991 |
+
row_group_metadata : RowGroupMetaData
|
992 |
+
"""
|
993 |
+
return RowGroupMetaData(self, i)
|
994 |
+
|
995 |
+
def set_file_path(self, path):
|
996 |
+
"""
|
997 |
+
Set ColumnChunk file paths to the given value.
|
998 |
+
|
999 |
+
This method modifies the ``file_path`` field of each ColumnChunk
|
1000 |
+
in the FileMetaData to be a particular value.
|
1001 |
+
|
1002 |
+
Parameters
|
1003 |
+
----------
|
1004 |
+
path : str
|
1005 |
+
The file path to set on all ColumnChunks.
|
1006 |
+
"""
|
1007 |
+
cdef:
|
1008 |
+
c_string c_path = tobytes(path)
|
1009 |
+
self._metadata.set_file_path(c_path)
|
1010 |
+
|
1011 |
+
def append_row_groups(self, FileMetaData other):
|
1012 |
+
"""
|
1013 |
+
Append row groups from other FileMetaData object.
|
1014 |
+
|
1015 |
+
Parameters
|
1016 |
+
----------
|
1017 |
+
other : FileMetaData
|
1018 |
+
Other metadata to append row groups from.
|
1019 |
+
"""
|
1020 |
+
cdef shared_ptr[CFileMetaData] c_metadata
|
1021 |
+
|
1022 |
+
c_metadata = other.sp_metadata
|
1023 |
+
self._metadata.AppendRowGroups(deref(c_metadata))
|
1024 |
+
|
1025 |
+
def write_metadata_file(self, where):
|
1026 |
+
"""
|
1027 |
+
Write the metadata to a metadata-only Parquet file.
|
1028 |
+
|
1029 |
+
Parameters
|
1030 |
+
----------
|
1031 |
+
where : path or file-like object
|
1032 |
+
Where to write the metadata. Should be a writable path on
|
1033 |
+
the local filesystem, or a writable file-like object.
|
1034 |
+
"""
|
1035 |
+
cdef:
|
1036 |
+
shared_ptr[COutputStream] sink
|
1037 |
+
c_string c_where
|
1038 |
+
|
1039 |
+
try:
|
1040 |
+
where = _stringify_path(where)
|
1041 |
+
except TypeError:
|
1042 |
+
get_writer(where, &sink)
|
1043 |
+
else:
|
1044 |
+
c_where = tobytes(where)
|
1045 |
+
with nogil:
|
1046 |
+
sink = GetResultValue(FileOutputStream.Open(c_where))
|
1047 |
+
|
1048 |
+
with nogil:
|
1049 |
+
check_status(
|
1050 |
+
WriteMetaDataFile(deref(self._metadata), sink.get()))
|
1051 |
+
|
1052 |
+
|
1053 |
+
cdef class ParquetSchema(_Weakrefable):
|
1054 |
+
"""A Parquet schema."""
|
1055 |
+
|
1056 |
+
def __cinit__(self, FileMetaData container):
|
1057 |
+
self.parent = container
|
1058 |
+
self.schema = container._metadata.schema()
|
1059 |
+
|
1060 |
+
def __repr__(self):
|
1061 |
+
return "{0}\n{1}".format(
|
1062 |
+
object.__repr__(self),
|
1063 |
+
frombytes(self.schema.ToString(), safe=True))
|
1064 |
+
|
1065 |
+
def __reduce__(self):
|
1066 |
+
return ParquetSchema, (self.parent,)
|
1067 |
+
|
1068 |
+
def __len__(self):
|
1069 |
+
return self.schema.num_columns()
|
1070 |
+
|
1071 |
+
def __getitem__(self, i):
|
1072 |
+
return self.column(i)
|
1073 |
+
|
1074 |
+
@property
|
1075 |
+
def names(self):
|
1076 |
+
"""Name of each field (list of str)."""
|
1077 |
+
return [self[i].name for i in range(len(self))]
|
1078 |
+
|
1079 |
+
def to_arrow_schema(self):
|
1080 |
+
"""
|
1081 |
+
Convert Parquet schema to effective Arrow schema.
|
1082 |
+
|
1083 |
+
Returns
|
1084 |
+
-------
|
1085 |
+
schema : Schema
|
1086 |
+
"""
|
1087 |
+
cdef shared_ptr[CSchema] sp_arrow_schema
|
1088 |
+
|
1089 |
+
with nogil:
|
1090 |
+
check_status(FromParquetSchema(
|
1091 |
+
self.schema, default_arrow_reader_properties(),
|
1092 |
+
self.parent._metadata.key_value_metadata(),
|
1093 |
+
&sp_arrow_schema))
|
1094 |
+
|
1095 |
+
return pyarrow_wrap_schema(sp_arrow_schema)
|
1096 |
+
|
1097 |
+
def __eq__(self, other):
|
1098 |
+
try:
|
1099 |
+
return self.equals(other)
|
1100 |
+
except TypeError:
|
1101 |
+
return NotImplemented
|
1102 |
+
|
1103 |
+
def equals(self, ParquetSchema other):
|
1104 |
+
"""
|
1105 |
+
Return whether the two schemas are equal.
|
1106 |
+
|
1107 |
+
Parameters
|
1108 |
+
----------
|
1109 |
+
other : ParquetSchema
|
1110 |
+
Schema to compare against.
|
1111 |
+
|
1112 |
+
Returns
|
1113 |
+
-------
|
1114 |
+
are_equal : bool
|
1115 |
+
"""
|
1116 |
+
return self.schema.Equals(deref(other.schema))
|
1117 |
+
|
1118 |
+
def column(self, i):
|
1119 |
+
"""
|
1120 |
+
Return the schema for a single column.
|
1121 |
+
|
1122 |
+
Parameters
|
1123 |
+
----------
|
1124 |
+
i : int
|
1125 |
+
Index of column in schema.
|
1126 |
+
|
1127 |
+
Returns
|
1128 |
+
-------
|
1129 |
+
column_schema : ColumnSchema
|
1130 |
+
"""
|
1131 |
+
if i < 0 or i >= len(self):
|
1132 |
+
raise IndexError('{0} out of bounds'.format(i))
|
1133 |
+
|
1134 |
+
return ColumnSchema(self, i)
|
1135 |
+
|
1136 |
+
|
1137 |
+
cdef class ColumnSchema(_Weakrefable):
|
1138 |
+
"""Schema for a single column."""
|
1139 |
+
cdef:
|
1140 |
+
int index
|
1141 |
+
ParquetSchema parent
|
1142 |
+
const ColumnDescriptor* descr
|
1143 |
+
|
1144 |
+
def __cinit__(self, ParquetSchema schema, int index):
|
1145 |
+
self.parent = schema
|
1146 |
+
self.index = index # for pickling support
|
1147 |
+
self.descr = schema.schema.Column(index)
|
1148 |
+
|
1149 |
+
def __eq__(self, other):
|
1150 |
+
try:
|
1151 |
+
return self.equals(other)
|
1152 |
+
except TypeError:
|
1153 |
+
return NotImplemented
|
1154 |
+
|
1155 |
+
def __reduce__(self):
|
1156 |
+
return ColumnSchema, (self.parent, self.index)
|
1157 |
+
|
1158 |
+
def equals(self, ColumnSchema other):
|
1159 |
+
"""
|
1160 |
+
Return whether the two column schemas are equal.
|
1161 |
+
|
1162 |
+
Parameters
|
1163 |
+
----------
|
1164 |
+
other : ColumnSchema
|
1165 |
+
Schema to compare against.
|
1166 |
+
|
1167 |
+
Returns
|
1168 |
+
-------
|
1169 |
+
are_equal : bool
|
1170 |
+
"""
|
1171 |
+
return self.descr.Equals(deref(other.descr))
|
1172 |
+
|
1173 |
+
def __repr__(self):
|
1174 |
+
physical_type = self.physical_type
|
1175 |
+
converted_type = self.converted_type
|
1176 |
+
if converted_type == 'DECIMAL':
|
1177 |
+
converted_type = 'DECIMAL({0}, {1})'.format(self.precision,
|
1178 |
+
self.scale)
|
1179 |
+
elif physical_type == 'FIXED_LEN_BYTE_ARRAY':
|
1180 |
+
converted_type = ('FIXED_LEN_BYTE_ARRAY(length={0})'
|
1181 |
+
.format(self.length))
|
1182 |
+
|
1183 |
+
return """<ParquetColumnSchema>
|
1184 |
+
name: {0}
|
1185 |
+
path: {1}
|
1186 |
+
max_definition_level: {2}
|
1187 |
+
max_repetition_level: {3}
|
1188 |
+
physical_type: {4}
|
1189 |
+
logical_type: {5}
|
1190 |
+
converted_type (legacy): {6}""".format(self.name, self.path,
|
1191 |
+
self.max_definition_level,
|
1192 |
+
self.max_repetition_level,
|
1193 |
+
physical_type,
|
1194 |
+
str(self.logical_type),
|
1195 |
+
converted_type)
|
1196 |
+
|
1197 |
+
@property
|
1198 |
+
def name(self):
|
1199 |
+
"""Name of field (str)."""
|
1200 |
+
return frombytes(self.descr.name())
|
1201 |
+
|
1202 |
+
@property
|
1203 |
+
def path(self):
|
1204 |
+
"""Nested path to field, separated by periods (str)."""
|
1205 |
+
return frombytes(self.descr.path().get().ToDotString())
|
1206 |
+
|
1207 |
+
@property
|
1208 |
+
def max_definition_level(self):
|
1209 |
+
"""Maximum definition level (int)."""
|
1210 |
+
return self.descr.max_definition_level()
|
1211 |
+
|
1212 |
+
@property
|
1213 |
+
def max_repetition_level(self):
|
1214 |
+
"""Maximum repetition level (int)."""
|
1215 |
+
return self.descr.max_repetition_level()
|
1216 |
+
|
1217 |
+
@property
|
1218 |
+
def physical_type(self):
|
1219 |
+
"""Name of physical type (str)."""
|
1220 |
+
return physical_type_name_from_enum(self.descr.physical_type())
|
1221 |
+
|
1222 |
+
@property
|
1223 |
+
def logical_type(self):
|
1224 |
+
"""Logical type of column (:class:`ParquetLogicalType`)."""
|
1225 |
+
return wrap_logical_type(self.descr.logical_type())
|
1226 |
+
|
1227 |
+
@property
|
1228 |
+
def converted_type(self):
|
1229 |
+
"""Legacy converted type (str or None)."""
|
1230 |
+
return converted_type_name_from_enum(self.descr.converted_type())
|
1231 |
+
|
1232 |
+
# FIXED_LEN_BYTE_ARRAY attribute
|
1233 |
+
@property
|
1234 |
+
def length(self):
|
1235 |
+
"""Array length if fixed length byte array type, None otherwise (int or None)."""
|
1236 |
+
return self.descr.type_length()
|
1237 |
+
|
1238 |
+
# Decimal attributes
|
1239 |
+
@property
|
1240 |
+
def precision(self):
|
1241 |
+
"""Precision if decimal type, None otherwise (int or None)."""
|
1242 |
+
return self.descr.type_precision()
|
1243 |
+
|
1244 |
+
@property
|
1245 |
+
def scale(self):
|
1246 |
+
"""Scale if decimal type, None otherwise (int or None)."""
|
1247 |
+
return self.descr.type_scale()
|
1248 |
+
|
1249 |
+
|
1250 |
+
cdef physical_type_name_from_enum(ParquetType type_):
|
1251 |
+
return {
|
1252 |
+
ParquetType_BOOLEAN: 'BOOLEAN',
|
1253 |
+
ParquetType_INT32: 'INT32',
|
1254 |
+
ParquetType_INT64: 'INT64',
|
1255 |
+
ParquetType_INT96: 'INT96',
|
1256 |
+
ParquetType_FLOAT: 'FLOAT',
|
1257 |
+
ParquetType_DOUBLE: 'DOUBLE',
|
1258 |
+
ParquetType_BYTE_ARRAY: 'BYTE_ARRAY',
|
1259 |
+
ParquetType_FIXED_LEN_BYTE_ARRAY: 'FIXED_LEN_BYTE_ARRAY',
|
1260 |
+
}.get(type_, 'UNKNOWN')
|
1261 |
+
|
1262 |
+
|
1263 |
+
cdef logical_type_name_from_enum(ParquetLogicalTypeId type_):
|
1264 |
+
return {
|
1265 |
+
ParquetLogicalType_UNDEFINED: 'UNDEFINED',
|
1266 |
+
ParquetLogicalType_STRING: 'STRING',
|
1267 |
+
ParquetLogicalType_MAP: 'MAP',
|
1268 |
+
ParquetLogicalType_LIST: 'LIST',
|
1269 |
+
ParquetLogicalType_ENUM: 'ENUM',
|
1270 |
+
ParquetLogicalType_DECIMAL: 'DECIMAL',
|
1271 |
+
ParquetLogicalType_DATE: 'DATE',
|
1272 |
+
ParquetLogicalType_TIME: 'TIME',
|
1273 |
+
ParquetLogicalType_TIMESTAMP: 'TIMESTAMP',
|
1274 |
+
ParquetLogicalType_INT: 'INT',
|
1275 |
+
ParquetLogicalType_JSON: 'JSON',
|
1276 |
+
ParquetLogicalType_BSON: 'BSON',
|
1277 |
+
ParquetLogicalType_UUID: 'UUID',
|
1278 |
+
ParquetLogicalType_NONE: 'NONE',
|
1279 |
+
}.get(type_, 'UNKNOWN')
|
1280 |
+
|
1281 |
+
|
1282 |
+
cdef converted_type_name_from_enum(ParquetConvertedType type_):
|
1283 |
+
return {
|
1284 |
+
ParquetConvertedType_NONE: 'NONE',
|
1285 |
+
ParquetConvertedType_UTF8: 'UTF8',
|
1286 |
+
ParquetConvertedType_MAP: 'MAP',
|
1287 |
+
ParquetConvertedType_MAP_KEY_VALUE: 'MAP_KEY_VALUE',
|
1288 |
+
ParquetConvertedType_LIST: 'LIST',
|
1289 |
+
ParquetConvertedType_ENUM: 'ENUM',
|
1290 |
+
ParquetConvertedType_DECIMAL: 'DECIMAL',
|
1291 |
+
ParquetConvertedType_DATE: 'DATE',
|
1292 |
+
ParquetConvertedType_TIME_MILLIS: 'TIME_MILLIS',
|
1293 |
+
ParquetConvertedType_TIME_MICROS: 'TIME_MICROS',
|
1294 |
+
ParquetConvertedType_TIMESTAMP_MILLIS: 'TIMESTAMP_MILLIS',
|
1295 |
+
ParquetConvertedType_TIMESTAMP_MICROS: 'TIMESTAMP_MICROS',
|
1296 |
+
ParquetConvertedType_UINT_8: 'UINT_8',
|
1297 |
+
ParquetConvertedType_UINT_16: 'UINT_16',
|
1298 |
+
ParquetConvertedType_UINT_32: 'UINT_32',
|
1299 |
+
ParquetConvertedType_UINT_64: 'UINT_64',
|
1300 |
+
ParquetConvertedType_INT_8: 'INT_8',
|
1301 |
+
ParquetConvertedType_INT_16: 'INT_16',
|
1302 |
+
ParquetConvertedType_INT_32: 'INT_32',
|
1303 |
+
ParquetConvertedType_INT_64: 'INT_64',
|
1304 |
+
ParquetConvertedType_JSON: 'JSON',
|
1305 |
+
ParquetConvertedType_BSON: 'BSON',
|
1306 |
+
ParquetConvertedType_INTERVAL: 'INTERVAL',
|
1307 |
+
}.get(type_, 'UNKNOWN')
|
1308 |
+
|
1309 |
+
|
1310 |
+
cdef encoding_name_from_enum(ParquetEncoding encoding_):
|
1311 |
+
return {
|
1312 |
+
ParquetEncoding_PLAIN: 'PLAIN',
|
1313 |
+
ParquetEncoding_PLAIN_DICTIONARY: 'PLAIN_DICTIONARY',
|
1314 |
+
ParquetEncoding_RLE: 'RLE',
|
1315 |
+
ParquetEncoding_BIT_PACKED: 'BIT_PACKED',
|
1316 |
+
ParquetEncoding_DELTA_BINARY_PACKED: 'DELTA_BINARY_PACKED',
|
1317 |
+
ParquetEncoding_DELTA_LENGTH_BYTE_ARRAY: 'DELTA_LENGTH_BYTE_ARRAY',
|
1318 |
+
ParquetEncoding_DELTA_BYTE_ARRAY: 'DELTA_BYTE_ARRAY',
|
1319 |
+
ParquetEncoding_RLE_DICTIONARY: 'RLE_DICTIONARY',
|
1320 |
+
ParquetEncoding_BYTE_STREAM_SPLIT: 'BYTE_STREAM_SPLIT',
|
1321 |
+
}.get(encoding_, 'UNKNOWN')
|
1322 |
+
|
1323 |
+
|
1324 |
+
cdef encoding_enum_from_name(str encoding_name):
|
1325 |
+
enc = {
|
1326 |
+
'PLAIN': ParquetEncoding_PLAIN,
|
1327 |
+
'BIT_PACKED': ParquetEncoding_BIT_PACKED,
|
1328 |
+
'RLE': ParquetEncoding_RLE,
|
1329 |
+
'BYTE_STREAM_SPLIT': ParquetEncoding_BYTE_STREAM_SPLIT,
|
1330 |
+
'DELTA_BINARY_PACKED': ParquetEncoding_DELTA_BINARY_PACKED,
|
1331 |
+
'DELTA_LENGTH_BYTE_ARRAY': ParquetEncoding_DELTA_LENGTH_BYTE_ARRAY,
|
1332 |
+
'DELTA_BYTE_ARRAY': ParquetEncoding_DELTA_BYTE_ARRAY,
|
1333 |
+
'RLE_DICTIONARY': 'dict',
|
1334 |
+
'PLAIN_DICTIONARY': 'dict',
|
1335 |
+
}.get(encoding_name, None)
|
1336 |
+
if enc is None:
|
1337 |
+
raise ValueError(f"Unsupported column encoding: {encoding_name!r}")
|
1338 |
+
elif enc == 'dict':
|
1339 |
+
raise ValueError(f"{encoding_name!r} is already used by default.")
|
1340 |
+
else:
|
1341 |
+
return enc
|
1342 |
+
|
1343 |
+
|
1344 |
+
cdef compression_name_from_enum(ParquetCompression compression_):
|
1345 |
+
return {
|
1346 |
+
ParquetCompression_UNCOMPRESSED: 'UNCOMPRESSED',
|
1347 |
+
ParquetCompression_SNAPPY: 'SNAPPY',
|
1348 |
+
ParquetCompression_GZIP: 'GZIP',
|
1349 |
+
ParquetCompression_LZO: 'LZO',
|
1350 |
+
ParquetCompression_BROTLI: 'BROTLI',
|
1351 |
+
ParquetCompression_LZ4: 'LZ4',
|
1352 |
+
ParquetCompression_ZSTD: 'ZSTD',
|
1353 |
+
}.get(compression_, 'UNKNOWN')
|
1354 |
+
|
1355 |
+
|
1356 |
+
cdef int check_compression_name(name) except -1:
|
1357 |
+
if name.upper() not in {'NONE', 'SNAPPY', 'GZIP', 'LZO', 'BROTLI', 'LZ4',
|
1358 |
+
'ZSTD'}:
|
1359 |
+
raise ArrowException("Unsupported compression: " + name)
|
1360 |
+
return 0
|
1361 |
+
|
1362 |
+
|
1363 |
+
cdef ParquetCompression compression_from_name(name):
|
1364 |
+
name = name.upper()
|
1365 |
+
if name == 'SNAPPY':
|
1366 |
+
return ParquetCompression_SNAPPY
|
1367 |
+
elif name == 'GZIP':
|
1368 |
+
return ParquetCompression_GZIP
|
1369 |
+
elif name == 'LZO':
|
1370 |
+
return ParquetCompression_LZO
|
1371 |
+
elif name == 'BROTLI':
|
1372 |
+
return ParquetCompression_BROTLI
|
1373 |
+
elif name == 'LZ4':
|
1374 |
+
return ParquetCompression_LZ4
|
1375 |
+
elif name == 'ZSTD':
|
1376 |
+
return ParquetCompression_ZSTD
|
1377 |
+
else:
|
1378 |
+
return ParquetCompression_UNCOMPRESSED
|
1379 |
+
|
1380 |
+
|
1381 |
+
cdef class ParquetReader(_Weakrefable):
|
1382 |
+
cdef:
|
1383 |
+
object source
|
1384 |
+
CMemoryPool* pool
|
1385 |
+
UniquePtrNoGIL[FileReader] reader
|
1386 |
+
FileMetaData _metadata
|
1387 |
+
shared_ptr[CRandomAccessFile] rd_handle
|
1388 |
+
|
1389 |
+
cdef public:
|
1390 |
+
_column_idx_map
|
1391 |
+
|
1392 |
+
def __cinit__(self, MemoryPool memory_pool=None):
|
1393 |
+
self.pool = maybe_unbox_memory_pool(memory_pool)
|
1394 |
+
self._metadata = None
|
1395 |
+
|
1396 |
+
def open(self, object source not None, *, bint use_memory_map=False,
|
1397 |
+
read_dictionary=None, FileMetaData metadata=None,
|
1398 |
+
int buffer_size=0, bint pre_buffer=False,
|
1399 |
+
coerce_int96_timestamp_unit=None,
|
1400 |
+
FileDecryptionProperties decryption_properties=None,
|
1401 |
+
thrift_string_size_limit=None,
|
1402 |
+
thrift_container_size_limit=None,
|
1403 |
+
page_checksum_verification=False):
|
1404 |
+
"""
|
1405 |
+
Open a parquet file for reading.
|
1406 |
+
|
1407 |
+
Parameters
|
1408 |
+
----------
|
1409 |
+
source : str, pathlib.Path, pyarrow.NativeFile, or file-like object
|
1410 |
+
use_memory_map : bool, default False
|
1411 |
+
read_dictionary : iterable[int or str], optional
|
1412 |
+
metadata : FileMetaData, optional
|
1413 |
+
buffer_size : int, default 0
|
1414 |
+
pre_buffer : bool, default False
|
1415 |
+
coerce_int96_timestamp_unit : str, optional
|
1416 |
+
decryption_properties : FileDecryptionProperties, optional
|
1417 |
+
thrift_string_size_limit : int, optional
|
1418 |
+
thrift_container_size_limit : int, optional
|
1419 |
+
page_checksum_verification : bool, default False
|
1420 |
+
"""
|
1421 |
+
cdef:
|
1422 |
+
shared_ptr[CFileMetaData] c_metadata
|
1423 |
+
CReaderProperties properties = default_reader_properties()
|
1424 |
+
ArrowReaderProperties arrow_props = (
|
1425 |
+
default_arrow_reader_properties())
|
1426 |
+
FileReaderBuilder builder
|
1427 |
+
|
1428 |
+
if metadata is not None:
|
1429 |
+
c_metadata = metadata.sp_metadata
|
1430 |
+
|
1431 |
+
if buffer_size > 0:
|
1432 |
+
properties.enable_buffered_stream()
|
1433 |
+
properties.set_buffer_size(buffer_size)
|
1434 |
+
elif buffer_size == 0:
|
1435 |
+
properties.disable_buffered_stream()
|
1436 |
+
else:
|
1437 |
+
raise ValueError('Buffer size must be larger than zero')
|
1438 |
+
|
1439 |
+
if thrift_string_size_limit is not None:
|
1440 |
+
if thrift_string_size_limit <= 0:
|
1441 |
+
raise ValueError("thrift_string_size_limit "
|
1442 |
+
"must be larger than zero")
|
1443 |
+
properties.set_thrift_string_size_limit(thrift_string_size_limit)
|
1444 |
+
if thrift_container_size_limit is not None:
|
1445 |
+
if thrift_container_size_limit <= 0:
|
1446 |
+
raise ValueError("thrift_container_size_limit "
|
1447 |
+
"must be larger than zero")
|
1448 |
+
properties.set_thrift_container_size_limit(
|
1449 |
+
thrift_container_size_limit)
|
1450 |
+
|
1451 |
+
if decryption_properties is not None:
|
1452 |
+
properties.file_decryption_properties(
|
1453 |
+
decryption_properties.unwrap())
|
1454 |
+
|
1455 |
+
arrow_props.set_pre_buffer(pre_buffer)
|
1456 |
+
|
1457 |
+
properties.set_page_checksum_verification(page_checksum_verification)
|
1458 |
+
|
1459 |
+
if coerce_int96_timestamp_unit is None:
|
1460 |
+
# use the default defined in default_arrow_reader_properties()
|
1461 |
+
pass
|
1462 |
+
else:
|
1463 |
+
arrow_props.set_coerce_int96_timestamp_unit(
|
1464 |
+
string_to_timeunit(coerce_int96_timestamp_unit))
|
1465 |
+
|
1466 |
+
self.source = source
|
1467 |
+
get_reader(source, use_memory_map, &self.rd_handle)
|
1468 |
+
|
1469 |
+
with nogil:
|
1470 |
+
check_status(builder.Open(self.rd_handle, properties, c_metadata))
|
1471 |
+
|
1472 |
+
# Set up metadata
|
1473 |
+
with nogil:
|
1474 |
+
c_metadata = builder.raw_reader().metadata()
|
1475 |
+
self._metadata = result = FileMetaData()
|
1476 |
+
result.init(c_metadata)
|
1477 |
+
|
1478 |
+
if read_dictionary is not None:
|
1479 |
+
self._set_read_dictionary(read_dictionary, &arrow_props)
|
1480 |
+
|
1481 |
+
with nogil:
|
1482 |
+
check_status(builder.memory_pool(self.pool)
|
1483 |
+
.properties(arrow_props)
|
1484 |
+
.Build(&self.reader))
|
1485 |
+
|
1486 |
+
cdef _set_read_dictionary(self, read_dictionary,
|
1487 |
+
ArrowReaderProperties* props):
|
1488 |
+
for column in read_dictionary:
|
1489 |
+
if not isinstance(column, int):
|
1490 |
+
column = self.column_name_idx(column)
|
1491 |
+
props.set_read_dictionary(column, True)
|
1492 |
+
|
1493 |
+
@property
|
1494 |
+
def column_paths(self):
|
1495 |
+
cdef:
|
1496 |
+
FileMetaData container = self.metadata
|
1497 |
+
const CFileMetaData* metadata = container._metadata
|
1498 |
+
vector[c_string] path
|
1499 |
+
int i = 0
|
1500 |
+
|
1501 |
+
paths = []
|
1502 |
+
for i in range(0, metadata.num_columns()):
|
1503 |
+
path = (metadata.schema().Column(i)
|
1504 |
+
.path().get().ToDotVector())
|
1505 |
+
paths.append([frombytes(x) for x in path])
|
1506 |
+
|
1507 |
+
return paths
|
1508 |
+
|
1509 |
+
@property
|
1510 |
+
def metadata(self):
|
1511 |
+
return self._metadata
|
1512 |
+
|
1513 |
+
@property
|
1514 |
+
def schema_arrow(self):
|
1515 |
+
cdef shared_ptr[CSchema] out
|
1516 |
+
with nogil:
|
1517 |
+
check_status(self.reader.get().GetSchema(&out))
|
1518 |
+
return pyarrow_wrap_schema(out)
|
1519 |
+
|
1520 |
+
@property
|
1521 |
+
def num_row_groups(self):
|
1522 |
+
return self.reader.get().num_row_groups()
|
1523 |
+
|
1524 |
+
def set_use_threads(self, bint use_threads):
|
1525 |
+
"""
|
1526 |
+
Parameters
|
1527 |
+
----------
|
1528 |
+
use_threads : bool
|
1529 |
+
"""
|
1530 |
+
self.reader.get().set_use_threads(use_threads)
|
1531 |
+
|
1532 |
+
def set_batch_size(self, int64_t batch_size):
|
1533 |
+
"""
|
1534 |
+
Parameters
|
1535 |
+
----------
|
1536 |
+
batch_size : int64
|
1537 |
+
"""
|
1538 |
+
self.reader.get().set_batch_size(batch_size)
|
1539 |
+
|
1540 |
+
def iter_batches(self, int64_t batch_size, row_groups, column_indices=None,
|
1541 |
+
bint use_threads=True):
|
1542 |
+
"""
|
1543 |
+
Parameters
|
1544 |
+
----------
|
1545 |
+
batch_size : int64
|
1546 |
+
row_groups : list[int]
|
1547 |
+
column_indices : list[int], optional
|
1548 |
+
use_threads : bool, default True
|
1549 |
+
|
1550 |
+
Yields
|
1551 |
+
------
|
1552 |
+
next : RecordBatch
|
1553 |
+
"""
|
1554 |
+
cdef:
|
1555 |
+
vector[int] c_row_groups
|
1556 |
+
vector[int] c_column_indices
|
1557 |
+
shared_ptr[CRecordBatch] record_batch
|
1558 |
+
UniquePtrNoGIL[CRecordBatchReader] recordbatchreader
|
1559 |
+
|
1560 |
+
self.set_batch_size(batch_size)
|
1561 |
+
|
1562 |
+
if use_threads:
|
1563 |
+
self.set_use_threads(use_threads)
|
1564 |
+
|
1565 |
+
for row_group in row_groups:
|
1566 |
+
c_row_groups.push_back(row_group)
|
1567 |
+
|
1568 |
+
if column_indices is not None:
|
1569 |
+
for index in column_indices:
|
1570 |
+
c_column_indices.push_back(index)
|
1571 |
+
with nogil:
|
1572 |
+
check_status(
|
1573 |
+
self.reader.get().GetRecordBatchReader(
|
1574 |
+
c_row_groups, c_column_indices, &recordbatchreader
|
1575 |
+
)
|
1576 |
+
)
|
1577 |
+
else:
|
1578 |
+
with nogil:
|
1579 |
+
check_status(
|
1580 |
+
self.reader.get().GetRecordBatchReader(
|
1581 |
+
c_row_groups, &recordbatchreader
|
1582 |
+
)
|
1583 |
+
)
|
1584 |
+
|
1585 |
+
while True:
|
1586 |
+
with nogil:
|
1587 |
+
check_status(
|
1588 |
+
recordbatchreader.get().ReadNext(&record_batch)
|
1589 |
+
)
|
1590 |
+
if record_batch.get() == NULL:
|
1591 |
+
break
|
1592 |
+
|
1593 |
+
yield pyarrow_wrap_batch(record_batch)
|
1594 |
+
|
1595 |
+
def read_row_group(self, int i, column_indices=None,
|
1596 |
+
bint use_threads=True):
|
1597 |
+
"""
|
1598 |
+
Parameters
|
1599 |
+
----------
|
1600 |
+
i : int
|
1601 |
+
column_indices : list[int], optional
|
1602 |
+
use_threads : bool, default True
|
1603 |
+
|
1604 |
+
Returns
|
1605 |
+
-------
|
1606 |
+
table : pyarrow.Table
|
1607 |
+
"""
|
1608 |
+
return self.read_row_groups([i], column_indices, use_threads)
|
1609 |
+
|
1610 |
+
def read_row_groups(self, row_groups not None, column_indices=None,
|
1611 |
+
bint use_threads=True):
|
1612 |
+
"""
|
1613 |
+
Parameters
|
1614 |
+
----------
|
1615 |
+
row_groups : list[int]
|
1616 |
+
column_indices : list[int], optional
|
1617 |
+
use_threads : bool, default True
|
1618 |
+
|
1619 |
+
Returns
|
1620 |
+
-------
|
1621 |
+
table : pyarrow.Table
|
1622 |
+
"""
|
1623 |
+
cdef:
|
1624 |
+
shared_ptr[CTable] ctable
|
1625 |
+
vector[int] c_row_groups
|
1626 |
+
vector[int] c_column_indices
|
1627 |
+
|
1628 |
+
self.set_use_threads(use_threads)
|
1629 |
+
|
1630 |
+
for row_group in row_groups:
|
1631 |
+
c_row_groups.push_back(row_group)
|
1632 |
+
|
1633 |
+
if column_indices is not None:
|
1634 |
+
for index in column_indices:
|
1635 |
+
c_column_indices.push_back(index)
|
1636 |
+
|
1637 |
+
with nogil:
|
1638 |
+
check_status(self.reader.get()
|
1639 |
+
.ReadRowGroups(c_row_groups, c_column_indices,
|
1640 |
+
&ctable))
|
1641 |
+
else:
|
1642 |
+
# Read all columns
|
1643 |
+
with nogil:
|
1644 |
+
check_status(self.reader.get()
|
1645 |
+
.ReadRowGroups(c_row_groups, &ctable))
|
1646 |
+
return pyarrow_wrap_table(ctable)
|
1647 |
+
|
1648 |
+
def read_all(self, column_indices=None, bint use_threads=True):
|
1649 |
+
"""
|
1650 |
+
Parameters
|
1651 |
+
----------
|
1652 |
+
column_indices : list[int], optional
|
1653 |
+
use_threads : bool, default True
|
1654 |
+
|
1655 |
+
Returns
|
1656 |
+
-------
|
1657 |
+
table : pyarrow.Table
|
1658 |
+
"""
|
1659 |
+
cdef:
|
1660 |
+
shared_ptr[CTable] ctable
|
1661 |
+
vector[int] c_column_indices
|
1662 |
+
|
1663 |
+
self.set_use_threads(use_threads)
|
1664 |
+
|
1665 |
+
if column_indices is not None:
|
1666 |
+
for index in column_indices:
|
1667 |
+
c_column_indices.push_back(index)
|
1668 |
+
|
1669 |
+
with nogil:
|
1670 |
+
check_status(self.reader.get()
|
1671 |
+
.ReadTable(c_column_indices, &ctable))
|
1672 |
+
else:
|
1673 |
+
# Read all columns
|
1674 |
+
with nogil:
|
1675 |
+
check_status(self.reader.get()
|
1676 |
+
.ReadTable(&ctable))
|
1677 |
+
return pyarrow_wrap_table(ctable)
|
1678 |
+
|
1679 |
+
def scan_contents(self, column_indices=None, batch_size=65536):
|
1680 |
+
"""
|
1681 |
+
Parameters
|
1682 |
+
----------
|
1683 |
+
column_indices : list[int], optional
|
1684 |
+
batch_size : int32, default 65536
|
1685 |
+
|
1686 |
+
Returns
|
1687 |
+
-------
|
1688 |
+
num_rows : int64
|
1689 |
+
"""
|
1690 |
+
cdef:
|
1691 |
+
vector[int] c_column_indices
|
1692 |
+
int32_t c_batch_size
|
1693 |
+
int64_t c_num_rows
|
1694 |
+
|
1695 |
+
if column_indices is not None:
|
1696 |
+
for index in column_indices:
|
1697 |
+
c_column_indices.push_back(index)
|
1698 |
+
|
1699 |
+
c_batch_size = batch_size
|
1700 |
+
|
1701 |
+
with nogil:
|
1702 |
+
check_status(self.reader.get()
|
1703 |
+
.ScanContents(c_column_indices, c_batch_size,
|
1704 |
+
&c_num_rows))
|
1705 |
+
|
1706 |
+
return c_num_rows
|
1707 |
+
|
1708 |
+
def column_name_idx(self, column_name):
|
1709 |
+
"""
|
1710 |
+
Find the index of a column by its name.
|
1711 |
+
|
1712 |
+
Parameters
|
1713 |
+
----------
|
1714 |
+
column_name : str
|
1715 |
+
Name of the column; separation of nesting levels is done via ".".
|
1716 |
+
|
1717 |
+
Returns
|
1718 |
+
-------
|
1719 |
+
column_idx : int
|
1720 |
+
Integer index of the column in the schema.
|
1721 |
+
"""
|
1722 |
+
cdef:
|
1723 |
+
FileMetaData container = self.metadata
|
1724 |
+
const CFileMetaData* metadata = container._metadata
|
1725 |
+
int i = 0
|
1726 |
+
|
1727 |
+
if self._column_idx_map is None:
|
1728 |
+
self._column_idx_map = {}
|
1729 |
+
for i in range(0, metadata.num_columns()):
|
1730 |
+
col_bytes = tobytes(metadata.schema().Column(i)
|
1731 |
+
.path().get().ToDotString())
|
1732 |
+
self._column_idx_map[col_bytes] = i
|
1733 |
+
|
1734 |
+
return self._column_idx_map[tobytes(column_name)]
|
1735 |
+
|
1736 |
+
def read_column(self, int column_index):
|
1737 |
+
"""
|
1738 |
+
Read the column at the specified index.
|
1739 |
+
|
1740 |
+
Parameters
|
1741 |
+
----------
|
1742 |
+
column_index : int
|
1743 |
+
Index of the column.
|
1744 |
+
|
1745 |
+
Returns
|
1746 |
+
-------
|
1747 |
+
column : pyarrow.ChunkedArray
|
1748 |
+
"""
|
1749 |
+
cdef shared_ptr[CChunkedArray] out
|
1750 |
+
with nogil:
|
1751 |
+
check_status(self.reader.get()
|
1752 |
+
.ReadColumn(column_index, &out))
|
1753 |
+
return pyarrow_wrap_chunked_array(out)
|
1754 |
+
|
1755 |
+
def close(self):
|
1756 |
+
if not self.closed:
|
1757 |
+
with nogil:
|
1758 |
+
check_status(self.rd_handle.get().Close())
|
1759 |
+
|
1760 |
+
@property
|
1761 |
+
def closed(self):
|
1762 |
+
if self.rd_handle == NULL:
|
1763 |
+
return True
|
1764 |
+
with nogil:
|
1765 |
+
closed = self.rd_handle.get().closed()
|
1766 |
+
return closed
|
1767 |
+
|
1768 |
+
|
1769 |
+
cdef CSortingColumn _convert_sorting_column(SortingColumn sorting_column):
|
1770 |
+
cdef CSortingColumn c_sorting_column
|
1771 |
+
|
1772 |
+
c_sorting_column.column_idx = sorting_column.column_index
|
1773 |
+
c_sorting_column.descending = sorting_column.descending
|
1774 |
+
c_sorting_column.nulls_first = sorting_column.nulls_first
|
1775 |
+
|
1776 |
+
return c_sorting_column
|
1777 |
+
|
1778 |
+
|
1779 |
+
cdef vector[CSortingColumn] _convert_sorting_columns(sorting_columns) except *:
|
1780 |
+
if not (isinstance(sorting_columns, Sequence)
|
1781 |
+
and all(isinstance(col, SortingColumn) for col in sorting_columns)):
|
1782 |
+
raise ValueError(
|
1783 |
+
"'sorting_columns' must be a list of `SortingColumn`")
|
1784 |
+
|
1785 |
+
cdef vector[CSortingColumn] c_sorting_columns = [_convert_sorting_column(col)
|
1786 |
+
for col in sorting_columns]
|
1787 |
+
|
1788 |
+
return c_sorting_columns
|
1789 |
+
|
1790 |
+
|
1791 |
+
cdef shared_ptr[WriterProperties] _create_writer_properties(
|
1792 |
+
use_dictionary=None,
|
1793 |
+
compression=None,
|
1794 |
+
version=None,
|
1795 |
+
write_statistics=None,
|
1796 |
+
data_page_size=None,
|
1797 |
+
compression_level=None,
|
1798 |
+
use_byte_stream_split=False,
|
1799 |
+
column_encoding=None,
|
1800 |
+
data_page_version=None,
|
1801 |
+
FileEncryptionProperties encryption_properties=None,
|
1802 |
+
write_batch_size=None,
|
1803 |
+
dictionary_pagesize_limit=None,
|
1804 |
+
write_page_index=False,
|
1805 |
+
write_page_checksum=False,
|
1806 |
+
sorting_columns=None) except *:
|
1807 |
+
"""General writer properties"""
|
1808 |
+
cdef:
|
1809 |
+
shared_ptr[WriterProperties] properties
|
1810 |
+
WriterProperties.Builder props
|
1811 |
+
|
1812 |
+
# data_page_version
|
1813 |
+
|
1814 |
+
if data_page_version is not None:
|
1815 |
+
if data_page_version == "1.0":
|
1816 |
+
props.data_page_version(ParquetDataPageVersion_V1)
|
1817 |
+
elif data_page_version == "2.0":
|
1818 |
+
props.data_page_version(ParquetDataPageVersion_V2)
|
1819 |
+
else:
|
1820 |
+
raise ValueError("Unsupported Parquet data page version: {0}"
|
1821 |
+
.format(data_page_version))
|
1822 |
+
|
1823 |
+
# version
|
1824 |
+
|
1825 |
+
if version is not None:
|
1826 |
+
if version == "1.0":
|
1827 |
+
props.version(ParquetVersion_V1)
|
1828 |
+
elif version in ("2.0", "pseudo-2.0"):
|
1829 |
+
warnings.warn(
|
1830 |
+
"Parquet format '2.0' pseudo version is deprecated, use "
|
1831 |
+
"'2.4' or '2.6' for fine-grained feature selection",
|
1832 |
+
FutureWarning, stacklevel=2)
|
1833 |
+
props.version(ParquetVersion_V2_0)
|
1834 |
+
elif version == "2.4":
|
1835 |
+
props.version(ParquetVersion_V2_4)
|
1836 |
+
elif version == "2.6":
|
1837 |
+
props.version(ParquetVersion_V2_6)
|
1838 |
+
else:
|
1839 |
+
raise ValueError("Unsupported Parquet format version: {0}"
|
1840 |
+
.format(version))
|
1841 |
+
|
1842 |
+
# compression
|
1843 |
+
|
1844 |
+
if isinstance(compression, basestring):
|
1845 |
+
check_compression_name(compression)
|
1846 |
+
props.compression(compression_from_name(compression))
|
1847 |
+
elif compression is not None:
|
1848 |
+
for column, codec in compression.iteritems():
|
1849 |
+
check_compression_name(codec)
|
1850 |
+
props.compression(tobytes(column), compression_from_name(codec))
|
1851 |
+
|
1852 |
+
if isinstance(compression_level, int):
|
1853 |
+
props.compression_level(compression_level)
|
1854 |
+
elif compression_level is not None:
|
1855 |
+
for column, level in compression_level.iteritems():
|
1856 |
+
props.compression_level(tobytes(column), level)
|
1857 |
+
|
1858 |
+
# use_dictionary
|
1859 |
+
|
1860 |
+
if isinstance(use_dictionary, bool):
|
1861 |
+
if use_dictionary:
|
1862 |
+
props.enable_dictionary()
|
1863 |
+
if column_encoding is not None:
|
1864 |
+
raise ValueError(
|
1865 |
+
"To use 'column_encoding' set 'use_dictionary' to False")
|
1866 |
+
else:
|
1867 |
+
props.disable_dictionary()
|
1868 |
+
elif use_dictionary is not None:
|
1869 |
+
# Deactivate dictionary encoding by default
|
1870 |
+
props.disable_dictionary()
|
1871 |
+
for column in use_dictionary:
|
1872 |
+
props.enable_dictionary(tobytes(column))
|
1873 |
+
if (column_encoding is not None and
|
1874 |
+
column_encoding.get(column) is not None):
|
1875 |
+
raise ValueError(
|
1876 |
+
"To use 'column_encoding' set 'use_dictionary' to False")
|
1877 |
+
|
1878 |
+
# write_statistics
|
1879 |
+
|
1880 |
+
if isinstance(write_statistics, bool):
|
1881 |
+
if write_statistics:
|
1882 |
+
props.enable_statistics()
|
1883 |
+
else:
|
1884 |
+
props.disable_statistics()
|
1885 |
+
elif write_statistics is not None:
|
1886 |
+
# Deactivate statistics by default and enable for specified columns
|
1887 |
+
props.disable_statistics()
|
1888 |
+
for column in write_statistics:
|
1889 |
+
props.enable_statistics(tobytes(column))
|
1890 |
+
|
1891 |
+
# sorting_columns
|
1892 |
+
|
1893 |
+
if sorting_columns is not None:
|
1894 |
+
props.set_sorting_columns(_convert_sorting_columns(sorting_columns))
|
1895 |
+
|
1896 |
+
# use_byte_stream_split
|
1897 |
+
|
1898 |
+
if isinstance(use_byte_stream_split, bool):
|
1899 |
+
if use_byte_stream_split:
|
1900 |
+
if column_encoding is not None:
|
1901 |
+
raise ValueError(
|
1902 |
+
"'use_byte_stream_split' cannot be passed"
|
1903 |
+
"together with 'column_encoding'")
|
1904 |
+
else:
|
1905 |
+
props.encoding(ParquetEncoding_BYTE_STREAM_SPLIT)
|
1906 |
+
elif use_byte_stream_split is not None:
|
1907 |
+
for column in use_byte_stream_split:
|
1908 |
+
if column_encoding is None:
|
1909 |
+
column_encoding = {column: 'BYTE_STREAM_SPLIT'}
|
1910 |
+
elif column_encoding.get(column, None) is None:
|
1911 |
+
column_encoding[column] = 'BYTE_STREAM_SPLIT'
|
1912 |
+
else:
|
1913 |
+
raise ValueError(
|
1914 |
+
"'use_byte_stream_split' cannot be passed"
|
1915 |
+
"together with 'column_encoding'")
|
1916 |
+
|
1917 |
+
# column_encoding
|
1918 |
+
# encoding map - encode individual columns
|
1919 |
+
|
1920 |
+
if column_encoding is not None:
|
1921 |
+
if isinstance(column_encoding, dict):
|
1922 |
+
for column, _encoding in column_encoding.items():
|
1923 |
+
props.encoding(tobytes(column),
|
1924 |
+
encoding_enum_from_name(_encoding))
|
1925 |
+
elif isinstance(column_encoding, str):
|
1926 |
+
props.encoding(encoding_enum_from_name(column_encoding))
|
1927 |
+
else:
|
1928 |
+
raise TypeError(
|
1929 |
+
"'column_encoding' should be a dictionary or a string")
|
1930 |
+
|
1931 |
+
if data_page_size is not None:
|
1932 |
+
props.data_pagesize(data_page_size)
|
1933 |
+
|
1934 |
+
if write_batch_size is not None:
|
1935 |
+
props.write_batch_size(write_batch_size)
|
1936 |
+
|
1937 |
+
if dictionary_pagesize_limit is not None:
|
1938 |
+
props.dictionary_pagesize_limit(dictionary_pagesize_limit)
|
1939 |
+
|
1940 |
+
# encryption
|
1941 |
+
|
1942 |
+
if encryption_properties is not None:
|
1943 |
+
props.encryption(
|
1944 |
+
(<FileEncryptionProperties>encryption_properties).unwrap())
|
1945 |
+
|
1946 |
+
# For backwards compatibility reasons we cap the maximum row group size
|
1947 |
+
# at 64Mi rows. This could be changed in the future, though it would be
|
1948 |
+
# a breaking change.
|
1949 |
+
#
|
1950 |
+
# The user can always specify a smaller row group size (and the default
|
1951 |
+
# is smaller) when calling write_table. If the call to write_table uses
|
1952 |
+
# a size larger than this then it will be latched to this value.
|
1953 |
+
props.max_row_group_length(_MAX_ROW_GROUP_SIZE)
|
1954 |
+
|
1955 |
+
# checksum
|
1956 |
+
|
1957 |
+
if write_page_checksum:
|
1958 |
+
props.enable_page_checksum()
|
1959 |
+
else:
|
1960 |
+
props.disable_page_checksum()
|
1961 |
+
|
1962 |
+
# page index
|
1963 |
+
|
1964 |
+
if write_page_index:
|
1965 |
+
props.enable_write_page_index()
|
1966 |
+
else:
|
1967 |
+
props.disable_write_page_index()
|
1968 |
+
|
1969 |
+
properties = props.build()
|
1970 |
+
|
1971 |
+
return properties
|
1972 |
+
|
1973 |
+
|
1974 |
+
cdef shared_ptr[ArrowWriterProperties] _create_arrow_writer_properties(
|
1975 |
+
use_deprecated_int96_timestamps=False,
|
1976 |
+
coerce_timestamps=None,
|
1977 |
+
allow_truncated_timestamps=False,
|
1978 |
+
writer_engine_version=None,
|
1979 |
+
use_compliant_nested_type=True,
|
1980 |
+
store_schema=True) except *:
|
1981 |
+
"""Arrow writer properties"""
|
1982 |
+
cdef:
|
1983 |
+
shared_ptr[ArrowWriterProperties] arrow_properties
|
1984 |
+
ArrowWriterProperties.Builder arrow_props
|
1985 |
+
|
1986 |
+
# Store the original Arrow schema so things like dictionary types can
|
1987 |
+
# be automatically reconstructed
|
1988 |
+
if store_schema:
|
1989 |
+
arrow_props.store_schema()
|
1990 |
+
|
1991 |
+
# int96 support
|
1992 |
+
|
1993 |
+
if use_deprecated_int96_timestamps:
|
1994 |
+
arrow_props.enable_deprecated_int96_timestamps()
|
1995 |
+
else:
|
1996 |
+
arrow_props.disable_deprecated_int96_timestamps()
|
1997 |
+
|
1998 |
+
# coerce_timestamps
|
1999 |
+
|
2000 |
+
if coerce_timestamps == 'ms':
|
2001 |
+
arrow_props.coerce_timestamps(TimeUnit_MILLI)
|
2002 |
+
elif coerce_timestamps == 'us':
|
2003 |
+
arrow_props.coerce_timestamps(TimeUnit_MICRO)
|
2004 |
+
elif coerce_timestamps is not None:
|
2005 |
+
raise ValueError('Invalid value for coerce_timestamps: {0}'
|
2006 |
+
.format(coerce_timestamps))
|
2007 |
+
|
2008 |
+
# allow_truncated_timestamps
|
2009 |
+
|
2010 |
+
if allow_truncated_timestamps:
|
2011 |
+
arrow_props.allow_truncated_timestamps()
|
2012 |
+
else:
|
2013 |
+
arrow_props.disallow_truncated_timestamps()
|
2014 |
+
|
2015 |
+
# use_compliant_nested_type
|
2016 |
+
|
2017 |
+
if use_compliant_nested_type:
|
2018 |
+
arrow_props.enable_compliant_nested_types()
|
2019 |
+
else:
|
2020 |
+
arrow_props.disable_compliant_nested_types()
|
2021 |
+
|
2022 |
+
# writer_engine_version
|
2023 |
+
|
2024 |
+
if writer_engine_version == "V1":
|
2025 |
+
warnings.warn("V1 parquet writer engine is a no-op. Use V2.")
|
2026 |
+
arrow_props.set_engine_version(ArrowWriterEngineVersion.V1)
|
2027 |
+
elif writer_engine_version != "V2":
|
2028 |
+
raise ValueError("Unsupported Writer Engine Version: {0}"
|
2029 |
+
.format(writer_engine_version))
|
2030 |
+
|
2031 |
+
arrow_properties = arrow_props.build()
|
2032 |
+
|
2033 |
+
return arrow_properties
|
2034 |
+
|
2035 |
+
cdef _name_to_index_map(Schema arrow_schema):
|
2036 |
+
cdef:
|
2037 |
+
shared_ptr[CSchema] sp_arrow_schema
|
2038 |
+
shared_ptr[SchemaDescriptor] sp_parquet_schema
|
2039 |
+
shared_ptr[WriterProperties] props = _create_writer_properties()
|
2040 |
+
shared_ptr[ArrowWriterProperties] arrow_props = _create_arrow_writer_properties(
|
2041 |
+
use_deprecated_int96_timestamps=False,
|
2042 |
+
coerce_timestamps=None,
|
2043 |
+
allow_truncated_timestamps=False,
|
2044 |
+
writer_engine_version="V2"
|
2045 |
+
)
|
2046 |
+
|
2047 |
+
sp_arrow_schema = pyarrow_unwrap_schema(arrow_schema)
|
2048 |
+
|
2049 |
+
with nogil:
|
2050 |
+
check_status(ToParquetSchema(
|
2051 |
+
sp_arrow_schema.get(), deref(props.get()), deref(arrow_props.get()), &sp_parquet_schema))
|
2052 |
+
|
2053 |
+
out = dict()
|
2054 |
+
|
2055 |
+
cdef SchemaDescriptor* parquet_schema = sp_parquet_schema.get()
|
2056 |
+
|
2057 |
+
for i in range(parquet_schema.num_columns()):
|
2058 |
+
name = frombytes(parquet_schema.Column(i).path().get().ToDotString())
|
2059 |
+
out[name] = i
|
2060 |
+
|
2061 |
+
return out
|
2062 |
+
|
2063 |
+
|
2064 |
+
cdef class ParquetWriter(_Weakrefable):
|
2065 |
+
cdef:
|
2066 |
+
unique_ptr[FileWriter] writer
|
2067 |
+
shared_ptr[COutputStream] sink
|
2068 |
+
bint own_sink
|
2069 |
+
|
2070 |
+
cdef readonly:
|
2071 |
+
object use_dictionary
|
2072 |
+
object use_deprecated_int96_timestamps
|
2073 |
+
object use_byte_stream_split
|
2074 |
+
object column_encoding
|
2075 |
+
object coerce_timestamps
|
2076 |
+
object allow_truncated_timestamps
|
2077 |
+
object compression
|
2078 |
+
object compression_level
|
2079 |
+
object data_page_version
|
2080 |
+
object use_compliant_nested_type
|
2081 |
+
object version
|
2082 |
+
object write_statistics
|
2083 |
+
object writer_engine_version
|
2084 |
+
int row_group_size
|
2085 |
+
int64_t data_page_size
|
2086 |
+
FileEncryptionProperties encryption_properties
|
2087 |
+
int64_t write_batch_size
|
2088 |
+
int64_t dictionary_pagesize_limit
|
2089 |
+
object store_schema
|
2090 |
+
|
2091 |
+
def __cinit__(self, where, Schema schema not None, use_dictionary=None,
|
2092 |
+
compression=None, version=None,
|
2093 |
+
write_statistics=None,
|
2094 |
+
MemoryPool memory_pool=None,
|
2095 |
+
use_deprecated_int96_timestamps=False,
|
2096 |
+
coerce_timestamps=None,
|
2097 |
+
data_page_size=None,
|
2098 |
+
allow_truncated_timestamps=False,
|
2099 |
+
compression_level=None,
|
2100 |
+
use_byte_stream_split=False,
|
2101 |
+
column_encoding=None,
|
2102 |
+
writer_engine_version=None,
|
2103 |
+
data_page_version=None,
|
2104 |
+
use_compliant_nested_type=True,
|
2105 |
+
encryption_properties=None,
|
2106 |
+
write_batch_size=None,
|
2107 |
+
dictionary_pagesize_limit=None,
|
2108 |
+
store_schema=True,
|
2109 |
+
write_page_index=False,
|
2110 |
+
write_page_checksum=False,
|
2111 |
+
sorting_columns=None):
|
2112 |
+
cdef:
|
2113 |
+
shared_ptr[WriterProperties] properties
|
2114 |
+
shared_ptr[ArrowWriterProperties] arrow_properties
|
2115 |
+
c_string c_where
|
2116 |
+
CMemoryPool* pool
|
2117 |
+
|
2118 |
+
try:
|
2119 |
+
where = _stringify_path(where)
|
2120 |
+
except TypeError:
|
2121 |
+
get_writer(where, &self.sink)
|
2122 |
+
self.own_sink = False
|
2123 |
+
else:
|
2124 |
+
c_where = tobytes(where)
|
2125 |
+
with nogil:
|
2126 |
+
self.sink = GetResultValue(FileOutputStream.Open(c_where))
|
2127 |
+
self.own_sink = True
|
2128 |
+
|
2129 |
+
properties = _create_writer_properties(
|
2130 |
+
use_dictionary=use_dictionary,
|
2131 |
+
compression=compression,
|
2132 |
+
version=version,
|
2133 |
+
write_statistics=write_statistics,
|
2134 |
+
data_page_size=data_page_size,
|
2135 |
+
compression_level=compression_level,
|
2136 |
+
use_byte_stream_split=use_byte_stream_split,
|
2137 |
+
column_encoding=column_encoding,
|
2138 |
+
data_page_version=data_page_version,
|
2139 |
+
encryption_properties=encryption_properties,
|
2140 |
+
write_batch_size=write_batch_size,
|
2141 |
+
dictionary_pagesize_limit=dictionary_pagesize_limit,
|
2142 |
+
write_page_index=write_page_index,
|
2143 |
+
write_page_checksum=write_page_checksum,
|
2144 |
+
sorting_columns=sorting_columns,
|
2145 |
+
)
|
2146 |
+
arrow_properties = _create_arrow_writer_properties(
|
2147 |
+
use_deprecated_int96_timestamps=use_deprecated_int96_timestamps,
|
2148 |
+
coerce_timestamps=coerce_timestamps,
|
2149 |
+
allow_truncated_timestamps=allow_truncated_timestamps,
|
2150 |
+
writer_engine_version=writer_engine_version,
|
2151 |
+
use_compliant_nested_type=use_compliant_nested_type,
|
2152 |
+
store_schema=store_schema,
|
2153 |
+
)
|
2154 |
+
|
2155 |
+
pool = maybe_unbox_memory_pool(memory_pool)
|
2156 |
+
with nogil:
|
2157 |
+
self.writer = move(GetResultValue(
|
2158 |
+
FileWriter.Open(deref(schema.schema), pool,
|
2159 |
+
self.sink, properties, arrow_properties)))
|
2160 |
+
|
2161 |
+
def close(self):
|
2162 |
+
with nogil:
|
2163 |
+
check_status(self.writer.get().Close())
|
2164 |
+
if self.own_sink:
|
2165 |
+
check_status(self.sink.get().Close())
|
2166 |
+
|
2167 |
+
def write_table(self, Table table, row_group_size=None):
|
2168 |
+
cdef:
|
2169 |
+
CTable* ctable = table.table
|
2170 |
+
int64_t c_row_group_size
|
2171 |
+
|
2172 |
+
if row_group_size is None or row_group_size == -1:
|
2173 |
+
c_row_group_size = min(ctable.num_rows(), _DEFAULT_ROW_GROUP_SIZE)
|
2174 |
+
elif row_group_size == 0:
|
2175 |
+
raise ValueError('Row group size cannot be 0')
|
2176 |
+
else:
|
2177 |
+
c_row_group_size = row_group_size
|
2178 |
+
|
2179 |
+
with nogil:
|
2180 |
+
check_status(self.writer.get()
|
2181 |
+
.WriteTable(deref(ctable), c_row_group_size))
|
2182 |
+
|
2183 |
+
@property
|
2184 |
+
def metadata(self):
|
2185 |
+
cdef:
|
2186 |
+
shared_ptr[CFileMetaData] metadata
|
2187 |
+
FileMetaData result
|
2188 |
+
with nogil:
|
2189 |
+
metadata = self.writer.get().metadata()
|
2190 |
+
if metadata:
|
2191 |
+
result = FileMetaData()
|
2192 |
+
result.init(metadata)
|
2193 |
+
return result
|
2194 |
+
raise RuntimeError(
|
2195 |
+
'file metadata is only available after writer close')
|
env-llmeval/lib/python3.10/site-packages/pyarrow/_s3fs.pyx
ADDED
@@ -0,0 +1,458 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
# cython: language_level = 3
|
19 |
+
|
20 |
+
from cython cimport binding
|
21 |
+
|
22 |
+
from pyarrow.lib cimport (check_status, pyarrow_wrap_metadata,
|
23 |
+
pyarrow_unwrap_metadata)
|
24 |
+
from pyarrow.lib import frombytes, tobytes, KeyValueMetadata
|
25 |
+
from pyarrow.includes.common cimport *
|
26 |
+
from pyarrow.includes.libarrow cimport *
|
27 |
+
from pyarrow.includes.libarrow_fs cimport *
|
28 |
+
from pyarrow._fs cimport FileSystem
|
29 |
+
|
30 |
+
|
31 |
+
cpdef enum S3LogLevel:
|
32 |
+
Off = <int8_t> CS3LogLevel_Off
|
33 |
+
Fatal = <int8_t> CS3LogLevel_Fatal
|
34 |
+
Error = <int8_t> CS3LogLevel_Error
|
35 |
+
Warn = <int8_t> CS3LogLevel_Warn
|
36 |
+
Info = <int8_t> CS3LogLevel_Info
|
37 |
+
Debug = <int8_t> CS3LogLevel_Debug
|
38 |
+
Trace = <int8_t> CS3LogLevel_Trace
|
39 |
+
|
40 |
+
|
41 |
+
def initialize_s3(S3LogLevel log_level=S3LogLevel.Fatal, int num_event_loop_threads=1):
|
42 |
+
"""
|
43 |
+
Initialize S3 support
|
44 |
+
|
45 |
+
Parameters
|
46 |
+
----------
|
47 |
+
log_level : S3LogLevel
|
48 |
+
level of logging
|
49 |
+
num_event_loop_threads : int, default 1
|
50 |
+
how many threads to use for the AWS SDK's I/O event loop
|
51 |
+
|
52 |
+
Examples
|
53 |
+
--------
|
54 |
+
>>> fs.initialize_s3(fs.S3LogLevel.Error) # doctest: +SKIP
|
55 |
+
"""
|
56 |
+
cdef CS3GlobalOptions options
|
57 |
+
options.log_level = <CS3LogLevel> log_level
|
58 |
+
options.num_event_loop_threads = num_event_loop_threads
|
59 |
+
check_status(CInitializeS3(options))
|
60 |
+
|
61 |
+
|
62 |
+
def ensure_s3_initialized():
|
63 |
+
"""
|
64 |
+
Initialize S3 (with default options) if not already initialized
|
65 |
+
"""
|
66 |
+
check_status(CEnsureS3Initialized())
|
67 |
+
|
68 |
+
|
69 |
+
def finalize_s3():
|
70 |
+
check_status(CFinalizeS3())
|
71 |
+
|
72 |
+
|
73 |
+
def ensure_s3_finalized():
|
74 |
+
"""
|
75 |
+
Finalize S3 if already initialized
|
76 |
+
"""
|
77 |
+
check_status(CEnsureS3Finalized())
|
78 |
+
|
79 |
+
|
80 |
+
def resolve_s3_region(bucket):
|
81 |
+
"""
|
82 |
+
Resolve the S3 region of a bucket.
|
83 |
+
|
84 |
+
Parameters
|
85 |
+
----------
|
86 |
+
bucket : str
|
87 |
+
A S3 bucket name
|
88 |
+
|
89 |
+
Returns
|
90 |
+
-------
|
91 |
+
region : str
|
92 |
+
A S3 region name
|
93 |
+
|
94 |
+
Examples
|
95 |
+
--------
|
96 |
+
>>> fs.resolve_s3_region('voltrondata-labs-datasets')
|
97 |
+
'us-east-2'
|
98 |
+
"""
|
99 |
+
cdef:
|
100 |
+
c_string c_bucket
|
101 |
+
c_string c_region
|
102 |
+
|
103 |
+
ensure_s3_initialized()
|
104 |
+
|
105 |
+
c_bucket = tobytes(bucket)
|
106 |
+
with nogil:
|
107 |
+
c_region = GetResultValue(ResolveS3BucketRegion(c_bucket))
|
108 |
+
|
109 |
+
return frombytes(c_region)
|
110 |
+
|
111 |
+
|
112 |
+
class S3RetryStrategy:
|
113 |
+
"""
|
114 |
+
Base class for AWS retry strategies for use with S3.
|
115 |
+
|
116 |
+
Parameters
|
117 |
+
----------
|
118 |
+
max_attempts : int, default 3
|
119 |
+
The maximum number of retry attempts to attempt before failing.
|
120 |
+
"""
|
121 |
+
|
122 |
+
def __init__(self, max_attempts=3):
|
123 |
+
self.max_attempts = max_attempts
|
124 |
+
|
125 |
+
|
126 |
+
class AwsStandardS3RetryStrategy(S3RetryStrategy):
|
127 |
+
"""
|
128 |
+
Represents an AWS Standard retry strategy for use with S3.
|
129 |
+
|
130 |
+
Parameters
|
131 |
+
----------
|
132 |
+
max_attempts : int, default 3
|
133 |
+
The maximum number of retry attempts to attempt before failing.
|
134 |
+
"""
|
135 |
+
pass
|
136 |
+
|
137 |
+
|
138 |
+
class AwsDefaultS3RetryStrategy(S3RetryStrategy):
|
139 |
+
"""
|
140 |
+
Represents an AWS Default retry strategy for use with S3.
|
141 |
+
|
142 |
+
Parameters
|
143 |
+
----------
|
144 |
+
max_attempts : int, default 3
|
145 |
+
The maximum number of retry attempts to attempt before failing.
|
146 |
+
"""
|
147 |
+
pass
|
148 |
+
|
149 |
+
|
150 |
+
cdef class S3FileSystem(FileSystem):
|
151 |
+
"""
|
152 |
+
S3-backed FileSystem implementation
|
153 |
+
|
154 |
+
AWS access_key and secret_key can be provided explicitly.
|
155 |
+
|
156 |
+
If role_arn is provided instead of access_key and secret_key, temporary
|
157 |
+
credentials will be fetched by issuing a request to STS to assume the
|
158 |
+
specified role.
|
159 |
+
|
160 |
+
If neither access_key nor secret_key are provided, and role_arn is also not
|
161 |
+
provided, then attempts to establish the credentials automatically.
|
162 |
+
S3FileSystem will try the following methods, in order:
|
163 |
+
|
164 |
+
* ``AWS_ACCESS_KEY_ID``, ``AWS_SECRET_ACCESS_KEY``, and ``AWS_SESSION_TOKEN`` environment variables
|
165 |
+
* configuration files such as ``~/.aws/credentials`` and ``~/.aws/config``
|
166 |
+
* for nodes on Amazon EC2, the EC2 Instance Metadata Service
|
167 |
+
|
168 |
+
Note: S3 buckets are special and the operations available on them may be
|
169 |
+
limited or more expensive than desired.
|
170 |
+
|
171 |
+
When S3FileSystem creates new buckets (assuming allow_bucket_creation is
|
172 |
+
True), it does not pass any non-default settings. In AWS S3, the bucket and
|
173 |
+
all objects will be not publicly visible, and will have no bucket policies
|
174 |
+
and no resource tags. To have more control over how buckets are created,
|
175 |
+
use a different API to create them.
|
176 |
+
|
177 |
+
Parameters
|
178 |
+
----------
|
179 |
+
access_key : str, default None
|
180 |
+
AWS Access Key ID. Pass None to use the standard AWS environment
|
181 |
+
variables and/or configuration file.
|
182 |
+
secret_key : str, default None
|
183 |
+
AWS Secret Access key. Pass None to use the standard AWS environment
|
184 |
+
variables and/or configuration file.
|
185 |
+
session_token : str, default None
|
186 |
+
AWS Session Token. An optional session token, required if access_key
|
187 |
+
and secret_key are temporary credentials from STS.
|
188 |
+
anonymous : boolean, default False
|
189 |
+
Whether to connect anonymously if access_key and secret_key are None.
|
190 |
+
If true, will not attempt to look up credentials using standard AWS
|
191 |
+
configuration methods.
|
192 |
+
role_arn : str, default None
|
193 |
+
AWS Role ARN. If provided instead of access_key and secret_key,
|
194 |
+
temporary credentials will be fetched by assuming this role.
|
195 |
+
session_name : str, default None
|
196 |
+
An optional identifier for the assumed role session.
|
197 |
+
external_id : str, default None
|
198 |
+
An optional unique identifier that might be required when you assume
|
199 |
+
a role in another account.
|
200 |
+
load_frequency : int, default 900
|
201 |
+
The frequency (in seconds) with which temporary credentials from an
|
202 |
+
assumed role session will be refreshed.
|
203 |
+
region : str, default None
|
204 |
+
AWS region to connect to. If not set, the AWS SDK will attempt to
|
205 |
+
determine the region using heuristics such as environment variables,
|
206 |
+
configuration profile, EC2 metadata, or default to 'us-east-1' when SDK
|
207 |
+
version <1.8. One can also use :func:`pyarrow.fs.resolve_s3_region` to
|
208 |
+
automatically resolve the region from a bucket name.
|
209 |
+
request_timeout : double, default None
|
210 |
+
Socket read timeouts on Windows and macOS, in seconds.
|
211 |
+
If omitted, the AWS SDK default value is used (typically 3 seconds).
|
212 |
+
This option is ignored on non-Windows, non-macOS systems.
|
213 |
+
connect_timeout : double, default None
|
214 |
+
Socket connection timeout, in seconds.
|
215 |
+
If omitted, the AWS SDK default value is used (typically 1 second).
|
216 |
+
scheme : str, default 'https'
|
217 |
+
S3 connection transport scheme.
|
218 |
+
endpoint_override : str, default None
|
219 |
+
Override region with a connect string such as "localhost:9000"
|
220 |
+
background_writes : boolean, default True
|
221 |
+
Whether file writes will be issued in the background, without
|
222 |
+
blocking.
|
223 |
+
default_metadata : mapping or pyarrow.KeyValueMetadata, default None
|
224 |
+
Default metadata for open_output_stream. This will be ignored if
|
225 |
+
non-empty metadata is passed to open_output_stream.
|
226 |
+
proxy_options : dict or str, default None
|
227 |
+
If a proxy is used, provide the options here. Supported options are:
|
228 |
+
'scheme' (str: 'http' or 'https'; required), 'host' (str; required),
|
229 |
+
'port' (int; required), 'username' (str; optional),
|
230 |
+
'password' (str; optional).
|
231 |
+
A proxy URI (str) can also be provided, in which case these options
|
232 |
+
will be derived from the provided URI.
|
233 |
+
The following are equivalent::
|
234 |
+
|
235 |
+
S3FileSystem(proxy_options='http://username:password@localhost:8020')
|
236 |
+
S3FileSystem(proxy_options={'scheme': 'http', 'host': 'localhost',
|
237 |
+
'port': 8020, 'username': 'username',
|
238 |
+
'password': 'password'})
|
239 |
+
allow_bucket_creation : bool, default False
|
240 |
+
Whether to allow CreateDir at the bucket-level. This option may also be
|
241 |
+
passed in a URI query parameter.
|
242 |
+
allow_bucket_deletion : bool, default False
|
243 |
+
Whether to allow DeleteDir at the bucket-level. This option may also be
|
244 |
+
passed in a URI query parameter.
|
245 |
+
retry_strategy : S3RetryStrategy, default AwsStandardS3RetryStrategy(max_attempts=3)
|
246 |
+
The retry strategy to use with S3; fail after max_attempts. Available
|
247 |
+
strategies are AwsStandardS3RetryStrategy, AwsDefaultS3RetryStrategy.
|
248 |
+
|
249 |
+
Examples
|
250 |
+
--------
|
251 |
+
>>> from pyarrow import fs
|
252 |
+
>>> s3 = fs.S3FileSystem(region='us-west-2')
|
253 |
+
>>> s3.get_file_info(fs.FileSelector(
|
254 |
+
... 'power-analysis-ready-datastore/power_901_constants.zarr/FROCEAN', recursive=True
|
255 |
+
... ))
|
256 |
+
[<FileInfo for 'power-analysis-ready-datastore/power_901_constants.zarr/FROCEAN/.zarray...
|
257 |
+
|
258 |
+
For usage of the methods see examples for :func:`~pyarrow.fs.LocalFileSystem`.
|
259 |
+
"""
|
260 |
+
|
261 |
+
cdef:
|
262 |
+
CS3FileSystem* s3fs
|
263 |
+
|
264 |
+
def __init__(self, *, access_key=None, secret_key=None, session_token=None,
|
265 |
+
bint anonymous=False, region=None, request_timeout=None,
|
266 |
+
connect_timeout=None, scheme=None, endpoint_override=None,
|
267 |
+
bint background_writes=True, default_metadata=None,
|
268 |
+
role_arn=None, session_name=None, external_id=None,
|
269 |
+
load_frequency=900, proxy_options=None,
|
270 |
+
allow_bucket_creation=False, allow_bucket_deletion=False,
|
271 |
+
retry_strategy: S3RetryStrategy = AwsStandardS3RetryStrategy(max_attempts=3)):
|
272 |
+
cdef:
|
273 |
+
optional[CS3Options] options
|
274 |
+
shared_ptr[CS3FileSystem] wrapped
|
275 |
+
|
276 |
+
# Need to do this before initializing `options` as the S3Options
|
277 |
+
# constructor has a debug check against use after S3 finalization.
|
278 |
+
ensure_s3_initialized()
|
279 |
+
|
280 |
+
if access_key is not None and secret_key is None:
|
281 |
+
raise ValueError(
|
282 |
+
'In order to initialize with explicit credentials both '
|
283 |
+
'access_key and secret_key must be provided, '
|
284 |
+
'`secret_key` is not set.'
|
285 |
+
)
|
286 |
+
elif access_key is None and secret_key is not None:
|
287 |
+
raise ValueError(
|
288 |
+
'In order to initialize with explicit credentials both '
|
289 |
+
'access_key and secret_key must be provided, '
|
290 |
+
'`access_key` is not set.'
|
291 |
+
)
|
292 |
+
|
293 |
+
elif session_token is not None and (access_key is None or
|
294 |
+
secret_key is None):
|
295 |
+
raise ValueError(
|
296 |
+
'In order to initialize a session with temporary credentials, '
|
297 |
+
'both secret_key and access_key must be provided in addition '
|
298 |
+
'to session_token.'
|
299 |
+
)
|
300 |
+
|
301 |
+
elif (access_key is not None or secret_key is not None):
|
302 |
+
if anonymous:
|
303 |
+
raise ValueError(
|
304 |
+
'Cannot pass anonymous=True together with access_key '
|
305 |
+
'and secret_key.')
|
306 |
+
|
307 |
+
if role_arn:
|
308 |
+
raise ValueError(
|
309 |
+
'Cannot provide role_arn with access_key and secret_key')
|
310 |
+
|
311 |
+
if session_token is None:
|
312 |
+
session_token = ""
|
313 |
+
|
314 |
+
options = CS3Options.FromAccessKey(
|
315 |
+
tobytes(access_key),
|
316 |
+
tobytes(secret_key),
|
317 |
+
tobytes(session_token)
|
318 |
+
)
|
319 |
+
elif anonymous:
|
320 |
+
if role_arn:
|
321 |
+
raise ValueError(
|
322 |
+
'Cannot provide role_arn with anonymous=True')
|
323 |
+
|
324 |
+
options = CS3Options.Anonymous()
|
325 |
+
elif role_arn:
|
326 |
+
if session_name is None:
|
327 |
+
session_name = ''
|
328 |
+
if external_id is None:
|
329 |
+
external_id = ''
|
330 |
+
|
331 |
+
options = CS3Options.FromAssumeRole(
|
332 |
+
tobytes(role_arn),
|
333 |
+
tobytes(session_name),
|
334 |
+
tobytes(external_id),
|
335 |
+
load_frequency
|
336 |
+
)
|
337 |
+
else:
|
338 |
+
options = CS3Options.Defaults()
|
339 |
+
|
340 |
+
if region is not None:
|
341 |
+
options.value().region = tobytes(region)
|
342 |
+
if request_timeout is not None:
|
343 |
+
options.value().request_timeout = request_timeout
|
344 |
+
if connect_timeout is not None:
|
345 |
+
options.value().connect_timeout = connect_timeout
|
346 |
+
if scheme is not None:
|
347 |
+
options.value().scheme = tobytes(scheme)
|
348 |
+
if endpoint_override is not None:
|
349 |
+
options.value().endpoint_override = tobytes(endpoint_override)
|
350 |
+
if background_writes is not None:
|
351 |
+
options.value().background_writes = background_writes
|
352 |
+
if default_metadata is not None:
|
353 |
+
if not isinstance(default_metadata, KeyValueMetadata):
|
354 |
+
default_metadata = KeyValueMetadata(default_metadata)
|
355 |
+
options.value().default_metadata = pyarrow_unwrap_metadata(
|
356 |
+
default_metadata)
|
357 |
+
|
358 |
+
if proxy_options is not None:
|
359 |
+
if isinstance(proxy_options, dict):
|
360 |
+
options.value().proxy_options.scheme = tobytes(
|
361 |
+
proxy_options["scheme"])
|
362 |
+
options.value().proxy_options.host = tobytes(
|
363 |
+
proxy_options["host"])
|
364 |
+
options.value().proxy_options.port = proxy_options["port"]
|
365 |
+
proxy_username = proxy_options.get("username", None)
|
366 |
+
if proxy_username:
|
367 |
+
options.value().proxy_options.username = tobytes(
|
368 |
+
proxy_username)
|
369 |
+
proxy_password = proxy_options.get("password", None)
|
370 |
+
if proxy_password:
|
371 |
+
options.value().proxy_options.password = tobytes(
|
372 |
+
proxy_password)
|
373 |
+
elif isinstance(proxy_options, str):
|
374 |
+
options.value().proxy_options = GetResultValue(
|
375 |
+
CS3ProxyOptions.FromUriString(tobytes(proxy_options)))
|
376 |
+
else:
|
377 |
+
raise TypeError(
|
378 |
+
"'proxy_options': expected 'dict' or 'str', "
|
379 |
+
f"got {type(proxy_options)} instead.")
|
380 |
+
|
381 |
+
options.value().allow_bucket_creation = allow_bucket_creation
|
382 |
+
options.value().allow_bucket_deletion = allow_bucket_deletion
|
383 |
+
|
384 |
+
if isinstance(retry_strategy, AwsStandardS3RetryStrategy):
|
385 |
+
options.value().retry_strategy = CS3RetryStrategy.GetAwsStandardRetryStrategy(
|
386 |
+
retry_strategy.max_attempts)
|
387 |
+
elif isinstance(retry_strategy, AwsDefaultS3RetryStrategy):
|
388 |
+
options.value().retry_strategy = CS3RetryStrategy.GetAwsDefaultRetryStrategy(
|
389 |
+
retry_strategy.max_attempts)
|
390 |
+
else:
|
391 |
+
raise ValueError(f'Invalid retry_strategy {retry_strategy!r}')
|
392 |
+
|
393 |
+
with nogil:
|
394 |
+
wrapped = GetResultValue(CS3FileSystem.Make(options.value()))
|
395 |
+
|
396 |
+
self.init(<shared_ptr[CFileSystem]> wrapped)
|
397 |
+
|
398 |
+
cdef init(self, const shared_ptr[CFileSystem]& wrapped):
|
399 |
+
FileSystem.init(self, wrapped)
|
400 |
+
self.s3fs = <CS3FileSystem*> wrapped.get()
|
401 |
+
|
402 |
+
@staticmethod
|
403 |
+
@binding(True) # Required for cython < 3
|
404 |
+
def _reconstruct(kwargs):
|
405 |
+
# __reduce__ doesn't allow passing named arguments directly to the
|
406 |
+
# reconstructor, hence this wrapper.
|
407 |
+
return S3FileSystem(**kwargs)
|
408 |
+
|
409 |
+
def __reduce__(self):
|
410 |
+
cdef CS3Options opts = self.s3fs.options()
|
411 |
+
|
412 |
+
# if creds were explicitly provided, then use them
|
413 |
+
# else obtain them as they were last time.
|
414 |
+
if opts.credentials_kind == CS3CredentialsKind_Explicit:
|
415 |
+
access_key = frombytes(opts.GetAccessKey())
|
416 |
+
secret_key = frombytes(opts.GetSecretKey())
|
417 |
+
session_token = frombytes(opts.GetSessionToken())
|
418 |
+
else:
|
419 |
+
access_key = None
|
420 |
+
secret_key = None
|
421 |
+
session_token = None
|
422 |
+
|
423 |
+
return (
|
424 |
+
S3FileSystem._reconstruct, (dict(
|
425 |
+
access_key=access_key,
|
426 |
+
secret_key=secret_key,
|
427 |
+
session_token=session_token,
|
428 |
+
anonymous=(opts.credentials_kind ==
|
429 |
+
CS3CredentialsKind_Anonymous),
|
430 |
+
region=frombytes(opts.region),
|
431 |
+
scheme=frombytes(opts.scheme),
|
432 |
+
connect_timeout=opts.connect_timeout,
|
433 |
+
request_timeout=opts.request_timeout,
|
434 |
+
endpoint_override=frombytes(opts.endpoint_override),
|
435 |
+
role_arn=frombytes(opts.role_arn),
|
436 |
+
session_name=frombytes(opts.session_name),
|
437 |
+
external_id=frombytes(opts.external_id),
|
438 |
+
load_frequency=opts.load_frequency,
|
439 |
+
background_writes=opts.background_writes,
|
440 |
+
allow_bucket_creation=opts.allow_bucket_creation,
|
441 |
+
allow_bucket_deletion=opts.allow_bucket_deletion,
|
442 |
+
default_metadata=pyarrow_wrap_metadata(opts.default_metadata),
|
443 |
+
proxy_options={'scheme': frombytes(opts.proxy_options.scheme),
|
444 |
+
'host': frombytes(opts.proxy_options.host),
|
445 |
+
'port': opts.proxy_options.port,
|
446 |
+
'username': frombytes(
|
447 |
+
opts.proxy_options.username),
|
448 |
+
'password': frombytes(
|
449 |
+
opts.proxy_options.password)},
|
450 |
+
),)
|
451 |
+
)
|
452 |
+
|
453 |
+
@property
|
454 |
+
def region(self):
|
455 |
+
"""
|
456 |
+
The AWS region this filesystem connects to.
|
457 |
+
"""
|
458 |
+
return frombytes(self.s3fs.region())
|
env-llmeval/lib/python3.10/site-packages/pyarrow/_substrait.cpython-310-x86_64-linux-gnu.so
ADDED
Binary file (190 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pyarrow/flight.py
ADDED
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
try:
|
19 |
+
from pyarrow._flight import ( # noqa:F401
|
20 |
+
connect,
|
21 |
+
Action,
|
22 |
+
ActionType,
|
23 |
+
BasicAuth,
|
24 |
+
CallInfo,
|
25 |
+
CertKeyPair,
|
26 |
+
ClientAuthHandler,
|
27 |
+
ClientMiddleware,
|
28 |
+
ClientMiddlewareFactory,
|
29 |
+
DescriptorType,
|
30 |
+
FlightCallOptions,
|
31 |
+
FlightCancelledError,
|
32 |
+
FlightClient,
|
33 |
+
FlightDataStream,
|
34 |
+
FlightDescriptor,
|
35 |
+
FlightEndpoint,
|
36 |
+
FlightError,
|
37 |
+
FlightInfo,
|
38 |
+
FlightInternalError,
|
39 |
+
FlightMetadataReader,
|
40 |
+
FlightMetadataWriter,
|
41 |
+
FlightMethod,
|
42 |
+
FlightServerBase,
|
43 |
+
FlightServerError,
|
44 |
+
FlightStreamChunk,
|
45 |
+
FlightStreamReader,
|
46 |
+
FlightStreamWriter,
|
47 |
+
FlightTimedOutError,
|
48 |
+
FlightUnauthenticatedError,
|
49 |
+
FlightUnauthorizedError,
|
50 |
+
FlightUnavailableError,
|
51 |
+
FlightWriteSizeExceededError,
|
52 |
+
GeneratorStream,
|
53 |
+
Location,
|
54 |
+
MetadataRecordBatchReader,
|
55 |
+
MetadataRecordBatchWriter,
|
56 |
+
RecordBatchStream,
|
57 |
+
Result,
|
58 |
+
SchemaResult,
|
59 |
+
ServerAuthHandler,
|
60 |
+
ServerCallContext,
|
61 |
+
ServerMiddleware,
|
62 |
+
ServerMiddlewareFactory,
|
63 |
+
Ticket,
|
64 |
+
TracingServerMiddlewareFactory,
|
65 |
+
)
|
66 |
+
except ImportError as exc:
|
67 |
+
raise ImportError(
|
68 |
+
f"The pyarrow installation is not built with support for 'flight' ({str(exc)})"
|
69 |
+
) from None
|
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/api.h
ADDED
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
// Coarse public API while the library is in development
|
19 |
+
|
20 |
+
#pragma once
|
21 |
+
|
22 |
+
#include "arrow/array.h" // IWYU pragma: export
|
23 |
+
#include "arrow/array/array_run_end.h" // IWYU pragma: export
|
24 |
+
#include "arrow/array/concatenate.h" // IWYU pragma: export
|
25 |
+
#include "arrow/buffer.h" // IWYU pragma: export
|
26 |
+
#include "arrow/builder.h" // IWYU pragma: export
|
27 |
+
#include "arrow/chunked_array.h" // IWYU pragma: export
|
28 |
+
#include "arrow/compare.h" // IWYU pragma: export
|
29 |
+
#include "arrow/config.h" // IWYU pragma: export
|
30 |
+
#include "arrow/datum.h" // IWYU pragma: export
|
31 |
+
#include "arrow/extension_type.h" // IWYU pragma: export
|
32 |
+
#include "arrow/memory_pool.h" // IWYU pragma: export
|
33 |
+
#include "arrow/pretty_print.h" // IWYU pragma: export
|
34 |
+
#include "arrow/record_batch.h" // IWYU pragma: export
|
35 |
+
#include "arrow/result.h" // IWYU pragma: export
|
36 |
+
#include "arrow/status.h" // IWYU pragma: export
|
37 |
+
#include "arrow/table.h" // IWYU pragma: export
|
38 |
+
#include "arrow/table_builder.h" // IWYU pragma: export
|
39 |
+
#include "arrow/tensor.h" // IWYU pragma: export
|
40 |
+
#include "arrow/type.h" // IWYU pragma: export
|
41 |
+
#include "arrow/util/key_value_metadata.h" // IWYU pragma: export
|
42 |
+
#include "arrow/visit_array_inline.h" // IWYU pragma: export
|
43 |
+
#include "arrow/visit_scalar_inline.h" // IWYU pragma: export
|
44 |
+
#include "arrow/visitor.h" // IWYU pragma: export
|
45 |
+
|
46 |
+
/// \brief Top-level namespace for Apache Arrow C++ API
|
47 |
+
namespace arrow {}
|
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/buffer.h
ADDED
@@ -0,0 +1,587 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include <cstdint>
|
21 |
+
#include <cstring>
|
22 |
+
#include <memory>
|
23 |
+
#include <optional>
|
24 |
+
#include <string>
|
25 |
+
#include <string_view>
|
26 |
+
#include <utility>
|
27 |
+
#include <vector>
|
28 |
+
|
29 |
+
#include "arrow/device.h"
|
30 |
+
#include "arrow/status.h"
|
31 |
+
#include "arrow/type_fwd.h"
|
32 |
+
#include "arrow/util/macros.h"
|
33 |
+
#include "arrow/util/span.h"
|
34 |
+
#include "arrow/util/visibility.h"
|
35 |
+
|
36 |
+
namespace arrow {
|
37 |
+
|
38 |
+
// ----------------------------------------------------------------------
|
39 |
+
// Buffer classes
|
40 |
+
|
41 |
+
/// \class Buffer
|
42 |
+
/// \brief Object containing a pointer to a piece of contiguous memory with a
|
43 |
+
/// particular size.
|
44 |
+
///
|
45 |
+
/// Buffers have two related notions of length: size and capacity. Size is
|
46 |
+
/// the number of bytes that might have valid data. Capacity is the number
|
47 |
+
/// of bytes that were allocated for the buffer in total.
|
48 |
+
///
|
49 |
+
/// The Buffer base class does not own its memory, but subclasses often do.
|
50 |
+
///
|
51 |
+
/// The following invariant is always true: Size <= Capacity
|
52 |
+
class ARROW_EXPORT Buffer {
|
53 |
+
public:
|
54 |
+
ARROW_DISALLOW_COPY_AND_ASSIGN(Buffer);
|
55 |
+
|
56 |
+
/// \brief Construct from buffer and size without copying memory
|
57 |
+
///
|
58 |
+
/// \param[in] data a memory buffer
|
59 |
+
/// \param[in] size buffer size
|
60 |
+
///
|
61 |
+
/// \note The passed memory must be kept alive through some other means
|
62 |
+
Buffer(const uint8_t* data, int64_t size)
|
63 |
+
: is_mutable_(false),
|
64 |
+
is_cpu_(true),
|
65 |
+
data_(data),
|
66 |
+
size_(size),
|
67 |
+
capacity_(size),
|
68 |
+
device_type_(DeviceAllocationType::kCPU) {
|
69 |
+
SetMemoryManager(default_cpu_memory_manager());
|
70 |
+
}
|
71 |
+
|
72 |
+
Buffer(const uint8_t* data, int64_t size, std::shared_ptr<MemoryManager> mm,
|
73 |
+
std::shared_ptr<Buffer> parent = NULLPTR,
|
74 |
+
std::optional<DeviceAllocationType> device_type_override = std::nullopt)
|
75 |
+
: is_mutable_(false),
|
76 |
+
data_(data),
|
77 |
+
size_(size),
|
78 |
+
capacity_(size),
|
79 |
+
parent_(std::move(parent)) {
|
80 |
+
// SetMemoryManager will also set device_type_
|
81 |
+
SetMemoryManager(std::move(mm));
|
82 |
+
// If a device type is specified, use that instead. Example of when this can be
|
83 |
+
// useful: the CudaMemoryManager can set device_type_ to kCUDA, but you can specify
|
84 |
+
// device_type_override=kCUDA_HOST as the device type to override it.
|
85 |
+
if (device_type_override != std::nullopt) {
|
86 |
+
device_type_ = *device_type_override;
|
87 |
+
}
|
88 |
+
}
|
89 |
+
|
90 |
+
Buffer(uintptr_t address, int64_t size, std::shared_ptr<MemoryManager> mm,
|
91 |
+
std::shared_ptr<Buffer> parent = NULLPTR)
|
92 |
+
: Buffer(reinterpret_cast<const uint8_t*>(address), size, std::move(mm),
|
93 |
+
std::move(parent)) {}
|
94 |
+
|
95 |
+
/// \brief Construct from string_view without copying memory
|
96 |
+
///
|
97 |
+
/// \param[in] data a string_view object
|
98 |
+
///
|
99 |
+
/// \note The memory viewed by data must not be deallocated in the lifetime of the
|
100 |
+
/// Buffer; temporary rvalue strings must be stored in an lvalue somewhere
|
101 |
+
explicit Buffer(std::string_view data)
|
102 |
+
: Buffer(reinterpret_cast<const uint8_t*>(data.data()),
|
103 |
+
static_cast<int64_t>(data.size())) {}
|
104 |
+
|
105 |
+
virtual ~Buffer() = default;
|
106 |
+
|
107 |
+
/// An offset into data that is owned by another buffer, but we want to be
|
108 |
+
/// able to retain a valid pointer to it even after other shared_ptr's to the
|
109 |
+
/// parent buffer have been destroyed
|
110 |
+
///
|
111 |
+
/// This method makes no assertions about alignment or padding of the buffer but
|
112 |
+
/// in general we expected buffers to be aligned and padded to 64 bytes. In the future
|
113 |
+
/// we might add utility methods to help determine if a buffer satisfies this contract.
|
114 |
+
Buffer(const std::shared_ptr<Buffer>& parent, const int64_t offset, const int64_t size)
|
115 |
+
: Buffer(parent->data_ + offset, size) {
|
116 |
+
parent_ = parent;
|
117 |
+
SetMemoryManager(parent->memory_manager_);
|
118 |
+
}
|
119 |
+
|
120 |
+
uint8_t operator[](std::size_t i) const { return data_[i]; }
|
121 |
+
|
122 |
+
/// \brief Construct a new std::string with a hexadecimal representation of the buffer.
|
123 |
+
/// \return std::string
|
124 |
+
std::string ToHexString();
|
125 |
+
|
126 |
+
/// Return true if both buffers are the same size and contain the same bytes
|
127 |
+
/// up to the number of compared bytes
|
128 |
+
bool Equals(const Buffer& other, int64_t nbytes) const;
|
129 |
+
|
130 |
+
/// Return true if both buffers are the same size and contain the same bytes
|
131 |
+
bool Equals(const Buffer& other) const;
|
132 |
+
|
133 |
+
/// Copy a section of the buffer into a new Buffer.
|
134 |
+
Result<std::shared_ptr<Buffer>> CopySlice(
|
135 |
+
const int64_t start, const int64_t nbytes,
|
136 |
+
MemoryPool* pool = default_memory_pool()) const;
|
137 |
+
|
138 |
+
/// Zero bytes in padding, i.e. bytes between size_ and capacity_.
|
139 |
+
void ZeroPadding() {
|
140 |
+
#ifndef NDEBUG
|
141 |
+
CheckMutable();
|
142 |
+
#endif
|
143 |
+
// A zero-capacity buffer can have a null data pointer
|
144 |
+
if (capacity_ != 0) {
|
145 |
+
memset(mutable_data() + size_, 0, static_cast<size_t>(capacity_ - size_));
|
146 |
+
}
|
147 |
+
}
|
148 |
+
|
149 |
+
/// \brief Construct an immutable buffer that takes ownership of the contents
|
150 |
+
/// of an std::string (without copying it).
|
151 |
+
///
|
152 |
+
/// \param[in] data a string to own
|
153 |
+
/// \return a new Buffer instance
|
154 |
+
static std::shared_ptr<Buffer> FromString(std::string data);
|
155 |
+
|
156 |
+
/// \brief Construct an immutable buffer that takes ownership of the contents
|
157 |
+
/// of an std::vector (without copying it). Only vectors of TrivialType objects
|
158 |
+
/// (integers, floating point numbers, ...) can be wrapped by this function.
|
159 |
+
///
|
160 |
+
/// \param[in] vec a vector to own
|
161 |
+
/// \return a new Buffer instance
|
162 |
+
template <typename T>
|
163 |
+
static std::shared_ptr<Buffer> FromVector(std::vector<T> vec) {
|
164 |
+
static_assert(std::is_trivial_v<T>,
|
165 |
+
"Buffer::FromVector can only wrap vectors of trivial objects");
|
166 |
+
|
167 |
+
if (vec.empty()) {
|
168 |
+
return std::shared_ptr<Buffer>{new Buffer()};
|
169 |
+
}
|
170 |
+
|
171 |
+
auto* data = reinterpret_cast<uint8_t*>(vec.data());
|
172 |
+
auto size_in_bytes = static_cast<int64_t>(vec.size() * sizeof(T));
|
173 |
+
return std::shared_ptr<Buffer>{
|
174 |
+
new Buffer{data, size_in_bytes},
|
175 |
+
// Keep the vector's buffer alive inside the shared_ptr's destructor until after
|
176 |
+
// we have deleted the Buffer. Note we can't use this trick in FromString since
|
177 |
+
// std::string's data is inline for short strings so moving invalidates pointers
|
178 |
+
// into the string's buffer.
|
179 |
+
[vec = std::move(vec)](Buffer* buffer) { delete buffer; }};
|
180 |
+
}
|
181 |
+
|
182 |
+
/// \brief Create buffer referencing typed memory with some length without
|
183 |
+
/// copying
|
184 |
+
/// \param[in] data the typed memory as C array
|
185 |
+
/// \param[in] length the number of values in the array
|
186 |
+
/// \return a new shared_ptr<Buffer>
|
187 |
+
template <typename T, typename SizeType = int64_t>
|
188 |
+
static std::shared_ptr<Buffer> Wrap(const T* data, SizeType length) {
|
189 |
+
return std::make_shared<Buffer>(reinterpret_cast<const uint8_t*>(data),
|
190 |
+
static_cast<int64_t>(sizeof(T) * length));
|
191 |
+
}
|
192 |
+
|
193 |
+
/// \brief Create buffer referencing std::vector with some length without
|
194 |
+
/// copying
|
195 |
+
/// \param[in] data the vector to be referenced. If this vector is changed,
|
196 |
+
/// the buffer may become invalid
|
197 |
+
/// \return a new shared_ptr<Buffer>
|
198 |
+
template <typename T>
|
199 |
+
static std::shared_ptr<Buffer> Wrap(const std::vector<T>& data) {
|
200 |
+
return std::make_shared<Buffer>(reinterpret_cast<const uint8_t*>(data.data()),
|
201 |
+
static_cast<int64_t>(sizeof(T) * data.size()));
|
202 |
+
}
|
203 |
+
|
204 |
+
/// \brief Copy buffer contents into a new std::string
|
205 |
+
/// \return std::string
|
206 |
+
/// \note Can throw std::bad_alloc if buffer is large
|
207 |
+
std::string ToString() const;
|
208 |
+
|
209 |
+
/// \brief View buffer contents as a std::string_view
|
210 |
+
/// \return std::string_view
|
211 |
+
explicit operator std::string_view() const {
|
212 |
+
return {reinterpret_cast<const char*>(data_), static_cast<size_t>(size_)};
|
213 |
+
}
|
214 |
+
|
215 |
+
/// \brief Return a pointer to the buffer's data
|
216 |
+
///
|
217 |
+
/// The buffer has to be a CPU buffer (`is_cpu()` is true).
|
218 |
+
/// Otherwise, an assertion may be thrown or a null pointer may be returned.
|
219 |
+
///
|
220 |
+
/// To get the buffer's data address regardless of its device, call `address()`.
|
221 |
+
const uint8_t* data() const {
|
222 |
+
#ifndef NDEBUG
|
223 |
+
CheckCPU();
|
224 |
+
#endif
|
225 |
+
return ARROW_PREDICT_TRUE(is_cpu_) ? data_ : NULLPTR;
|
226 |
+
}
|
227 |
+
|
228 |
+
/// \brief Return a pointer to the buffer's data cast to a specific type
|
229 |
+
///
|
230 |
+
/// The buffer has to be a CPU buffer (`is_cpu()` is true).
|
231 |
+
/// Otherwise, an assertion may be thrown or a null pointer may be returned.
|
232 |
+
template <typename T>
|
233 |
+
const T* data_as() const {
|
234 |
+
return reinterpret_cast<const T*>(data());
|
235 |
+
}
|
236 |
+
|
237 |
+
/// \brief Return the buffer's data as a span
|
238 |
+
template <typename T>
|
239 |
+
util::span<const T> span_as() const {
|
240 |
+
return util::span(data_as<T>(), static_cast<size_t>(size() / sizeof(T)));
|
241 |
+
}
|
242 |
+
|
243 |
+
/// \brief Return a writable pointer to the buffer's data
|
244 |
+
///
|
245 |
+
/// The buffer has to be a mutable CPU buffer (`is_cpu()` and `is_mutable()`
|
246 |
+
/// are true). Otherwise, an assertion may be thrown or a null pointer may
|
247 |
+
/// be returned.
|
248 |
+
///
|
249 |
+
/// To get the buffer's mutable data address regardless of its device, call
|
250 |
+
/// `mutable_address()`.
|
251 |
+
uint8_t* mutable_data() {
|
252 |
+
#ifndef NDEBUG
|
253 |
+
CheckCPU();
|
254 |
+
CheckMutable();
|
255 |
+
#endif
|
256 |
+
return ARROW_PREDICT_TRUE(is_cpu_ && is_mutable_) ? const_cast<uint8_t*>(data_)
|
257 |
+
: NULLPTR;
|
258 |
+
}
|
259 |
+
|
260 |
+
/// \brief Return a writable pointer to the buffer's data cast to a specific type
|
261 |
+
///
|
262 |
+
/// The buffer has to be a mutable CPU buffer (`is_cpu()` and `is_mutable()`
|
263 |
+
/// are true). Otherwise, an assertion may be thrown or a null pointer may
|
264 |
+
/// be returned.
|
265 |
+
template <typename T>
|
266 |
+
T* mutable_data_as() {
|
267 |
+
return reinterpret_cast<T*>(mutable_data());
|
268 |
+
}
|
269 |
+
|
270 |
+
/// \brief Return the buffer's mutable data as a span
|
271 |
+
template <typename T>
|
272 |
+
util::span<T> mutable_span_as() const {
|
273 |
+
return util::span(mutable_data_as<T>(), static_cast<size_t>(size() / sizeof(T)));
|
274 |
+
}
|
275 |
+
|
276 |
+
/// \brief Return the device address of the buffer's data
|
277 |
+
uintptr_t address() const { return reinterpret_cast<uintptr_t>(data_); }
|
278 |
+
|
279 |
+
/// \brief Return a writable device address to the buffer's data
|
280 |
+
///
|
281 |
+
/// The buffer has to be a mutable buffer (`is_mutable()` is true).
|
282 |
+
/// Otherwise, an assertion may be thrown or 0 may be returned.
|
283 |
+
uintptr_t mutable_address() const {
|
284 |
+
#ifndef NDEBUG
|
285 |
+
CheckMutable();
|
286 |
+
#endif
|
287 |
+
return ARROW_PREDICT_TRUE(is_mutable_) ? reinterpret_cast<uintptr_t>(data_) : 0;
|
288 |
+
}
|
289 |
+
|
290 |
+
/// \brief Return the buffer's size in bytes
|
291 |
+
int64_t size() const { return size_; }
|
292 |
+
|
293 |
+
/// \brief Return the buffer's capacity (number of allocated bytes)
|
294 |
+
int64_t capacity() const { return capacity_; }
|
295 |
+
|
296 |
+
/// \brief Whether the buffer is directly CPU-accessible
|
297 |
+
///
|
298 |
+
/// If this function returns true, you can read directly from the buffer's
|
299 |
+
/// `data()` pointer. Otherwise, you'll have to `View()` or `Copy()` it.
|
300 |
+
bool is_cpu() const { return is_cpu_; }
|
301 |
+
|
302 |
+
/// \brief Whether the buffer is mutable
|
303 |
+
///
|
304 |
+
/// If this function returns true, you are allowed to modify buffer contents
|
305 |
+
/// using the pointer returned by `mutable_data()` or `mutable_address()`.
|
306 |
+
bool is_mutable() const { return is_mutable_; }
|
307 |
+
|
308 |
+
const std::shared_ptr<Device>& device() const { return memory_manager_->device(); }
|
309 |
+
|
310 |
+
const std::shared_ptr<MemoryManager>& memory_manager() const { return memory_manager_; }
|
311 |
+
|
312 |
+
DeviceAllocationType device_type() const { return device_type_; }
|
313 |
+
|
314 |
+
std::shared_ptr<Buffer> parent() const { return parent_; }
|
315 |
+
|
316 |
+
/// \brief Get a RandomAccessFile for reading a buffer
|
317 |
+
///
|
318 |
+
/// The returned file object reads from this buffer's underlying memory.
|
319 |
+
static Result<std::shared_ptr<io::RandomAccessFile>> GetReader(std::shared_ptr<Buffer>);
|
320 |
+
|
321 |
+
/// \brief Get a OutputStream for writing to a buffer
|
322 |
+
///
|
323 |
+
/// The buffer must be mutable. The returned stream object writes into the buffer's
|
324 |
+
/// underlying memory (but it won't resize it).
|
325 |
+
static Result<std::shared_ptr<io::OutputStream>> GetWriter(std::shared_ptr<Buffer>);
|
326 |
+
|
327 |
+
/// \brief Copy buffer
|
328 |
+
///
|
329 |
+
/// The buffer contents will be copied into a new buffer allocated by the
|
330 |
+
/// given MemoryManager. This function supports cross-device copies.
|
331 |
+
static Result<std::shared_ptr<Buffer>> Copy(std::shared_ptr<Buffer> source,
|
332 |
+
const std::shared_ptr<MemoryManager>& to);
|
333 |
+
|
334 |
+
/// \brief Copy a non-owned buffer
|
335 |
+
///
|
336 |
+
/// This is useful for cases where the source memory area is externally managed
|
337 |
+
/// (its lifetime not tied to the source Buffer), otherwise please use Copy().
|
338 |
+
static Result<std::unique_ptr<Buffer>> CopyNonOwned(
|
339 |
+
const Buffer& source, const std::shared_ptr<MemoryManager>& to);
|
340 |
+
|
341 |
+
/// \brief View buffer
|
342 |
+
///
|
343 |
+
/// Return a Buffer that reflects this buffer, seen potentially from another
|
344 |
+
/// device, without making an explicit copy of the contents. The underlying
|
345 |
+
/// mechanism is typically implemented by the kernel or device driver, and may
|
346 |
+
/// involve lazy caching of parts of the buffer contents on the destination
|
347 |
+
/// device's memory.
|
348 |
+
///
|
349 |
+
/// If a non-copy view is unsupported for the buffer on the given device,
|
350 |
+
/// nullptr is returned. An error can be returned if some low-level
|
351 |
+
/// operation fails (such as an out-of-memory condition).
|
352 |
+
static Result<std::shared_ptr<Buffer>> View(std::shared_ptr<Buffer> source,
|
353 |
+
const std::shared_ptr<MemoryManager>& to);
|
354 |
+
|
355 |
+
/// \brief View or copy buffer
|
356 |
+
///
|
357 |
+
/// Try to view buffer contents on the given MemoryManager's device, but
|
358 |
+
/// fall back to copying if a no-copy view isn't supported.
|
359 |
+
static Result<std::shared_ptr<Buffer>> ViewOrCopy(
|
360 |
+
std::shared_ptr<Buffer> source, const std::shared_ptr<MemoryManager>& to);
|
361 |
+
|
362 |
+
virtual std::shared_ptr<Device::SyncEvent> device_sync_event() { return NULLPTR; }
|
363 |
+
|
364 |
+
protected:
|
365 |
+
bool is_mutable_;
|
366 |
+
bool is_cpu_;
|
367 |
+
const uint8_t* data_;
|
368 |
+
int64_t size_;
|
369 |
+
int64_t capacity_;
|
370 |
+
DeviceAllocationType device_type_;
|
371 |
+
|
372 |
+
// null by default, but may be set
|
373 |
+
std::shared_ptr<Buffer> parent_;
|
374 |
+
|
375 |
+
private:
|
376 |
+
// private so that subclasses are forced to call SetMemoryManager()
|
377 |
+
std::shared_ptr<MemoryManager> memory_manager_;
|
378 |
+
|
379 |
+
protected:
|
380 |
+
Buffer();
|
381 |
+
|
382 |
+
void CheckMutable() const;
|
383 |
+
void CheckCPU() const;
|
384 |
+
|
385 |
+
void SetMemoryManager(std::shared_ptr<MemoryManager> mm) {
|
386 |
+
memory_manager_ = std::move(mm);
|
387 |
+
is_cpu_ = memory_manager_->is_cpu();
|
388 |
+
device_type_ = memory_manager_->device()->device_type();
|
389 |
+
}
|
390 |
+
};
|
391 |
+
|
392 |
+
/// \defgroup buffer-slicing-functions Functions for slicing buffers
|
393 |
+
///
|
394 |
+
/// @{
|
395 |
+
|
396 |
+
/// \brief Construct a view on a buffer at the given offset and length.
|
397 |
+
///
|
398 |
+
/// This function cannot fail and does not check for errors (except in debug builds)
|
399 |
+
static inline std::shared_ptr<Buffer> SliceBuffer(const std::shared_ptr<Buffer>& buffer,
|
400 |
+
const int64_t offset,
|
401 |
+
const int64_t length) {
|
402 |
+
return std::make_shared<Buffer>(buffer, offset, length);
|
403 |
+
}
|
404 |
+
|
405 |
+
/// \brief Construct a view on a buffer at the given offset, up to the buffer's end.
|
406 |
+
///
|
407 |
+
/// This function cannot fail and does not check for errors (except in debug builds)
|
408 |
+
static inline std::shared_ptr<Buffer> SliceBuffer(const std::shared_ptr<Buffer>& buffer,
|
409 |
+
const int64_t offset) {
|
410 |
+
int64_t length = buffer->size() - offset;
|
411 |
+
return SliceBuffer(buffer, offset, length);
|
412 |
+
}
|
413 |
+
|
414 |
+
/// \brief Input-checking version of SliceBuffer
|
415 |
+
///
|
416 |
+
/// An Invalid Status is returned if the requested slice falls out of bounds.
|
417 |
+
ARROW_EXPORT
|
418 |
+
Result<std::shared_ptr<Buffer>> SliceBufferSafe(const std::shared_ptr<Buffer>& buffer,
|
419 |
+
int64_t offset);
|
420 |
+
/// \brief Input-checking version of SliceBuffer
|
421 |
+
///
|
422 |
+
/// An Invalid Status is returned if the requested slice falls out of bounds.
|
423 |
+
/// Note that unlike SliceBuffer, `length` isn't clamped to the available buffer size.
|
424 |
+
ARROW_EXPORT
|
425 |
+
Result<std::shared_ptr<Buffer>> SliceBufferSafe(const std::shared_ptr<Buffer>& buffer,
|
426 |
+
int64_t offset, int64_t length);
|
427 |
+
|
428 |
+
/// \brief Like SliceBuffer, but construct a mutable buffer slice.
|
429 |
+
///
|
430 |
+
/// If the parent buffer is not mutable, behavior is undefined (it may abort
|
431 |
+
/// in debug builds).
|
432 |
+
ARROW_EXPORT
|
433 |
+
std::shared_ptr<Buffer> SliceMutableBuffer(const std::shared_ptr<Buffer>& buffer,
|
434 |
+
const int64_t offset, const int64_t length);
|
435 |
+
|
436 |
+
/// \brief Like SliceBuffer, but construct a mutable buffer slice.
|
437 |
+
///
|
438 |
+
/// If the parent buffer is not mutable, behavior is undefined (it may abort
|
439 |
+
/// in debug builds).
|
440 |
+
static inline std::shared_ptr<Buffer> SliceMutableBuffer(
|
441 |
+
const std::shared_ptr<Buffer>& buffer, const int64_t offset) {
|
442 |
+
int64_t length = buffer->size() - offset;
|
443 |
+
return SliceMutableBuffer(buffer, offset, length);
|
444 |
+
}
|
445 |
+
|
446 |
+
/// \brief Input-checking version of SliceMutableBuffer
|
447 |
+
///
|
448 |
+
/// An Invalid Status is returned if the requested slice falls out of bounds.
|
449 |
+
ARROW_EXPORT
|
450 |
+
Result<std::shared_ptr<Buffer>> SliceMutableBufferSafe(
|
451 |
+
const std::shared_ptr<Buffer>& buffer, int64_t offset);
|
452 |
+
/// \brief Input-checking version of SliceMutableBuffer
|
453 |
+
///
|
454 |
+
/// An Invalid Status is returned if the requested slice falls out of bounds.
|
455 |
+
/// Note that unlike SliceBuffer, `length` isn't clamped to the available buffer size.
|
456 |
+
ARROW_EXPORT
|
457 |
+
Result<std::shared_ptr<Buffer>> SliceMutableBufferSafe(
|
458 |
+
const std::shared_ptr<Buffer>& buffer, int64_t offset, int64_t length);
|
459 |
+
|
460 |
+
/// @}
|
461 |
+
|
462 |
+
/// \class MutableBuffer
|
463 |
+
/// \brief A Buffer whose contents can be mutated. May or may not own its data.
|
464 |
+
class ARROW_EXPORT MutableBuffer : public Buffer {
|
465 |
+
public:
|
466 |
+
MutableBuffer(uint8_t* data, const int64_t size) : Buffer(data, size) {
|
467 |
+
is_mutable_ = true;
|
468 |
+
}
|
469 |
+
|
470 |
+
MutableBuffer(uint8_t* data, const int64_t size, std::shared_ptr<MemoryManager> mm)
|
471 |
+
: Buffer(data, size, std::move(mm)) {
|
472 |
+
is_mutable_ = true;
|
473 |
+
}
|
474 |
+
|
475 |
+
MutableBuffer(const std::shared_ptr<Buffer>& parent, const int64_t offset,
|
476 |
+
const int64_t size);
|
477 |
+
|
478 |
+
/// \brief Create buffer referencing typed memory with some length
|
479 |
+
/// \param[in] data the typed memory as C array
|
480 |
+
/// \param[in] length the number of values in the array
|
481 |
+
/// \return a new shared_ptr<Buffer>
|
482 |
+
template <typename T, typename SizeType = int64_t>
|
483 |
+
static std::shared_ptr<Buffer> Wrap(T* data, SizeType length) {
|
484 |
+
return std::make_shared<MutableBuffer>(reinterpret_cast<uint8_t*>(data),
|
485 |
+
static_cast<int64_t>(sizeof(T) * length));
|
486 |
+
}
|
487 |
+
|
488 |
+
protected:
|
489 |
+
MutableBuffer() : Buffer(NULLPTR, 0) {}
|
490 |
+
};
|
491 |
+
|
492 |
+
/// \class ResizableBuffer
|
493 |
+
/// \brief A mutable buffer that can be resized
|
494 |
+
class ARROW_EXPORT ResizableBuffer : public MutableBuffer {
|
495 |
+
public:
|
496 |
+
/// Change buffer reported size to indicated size, allocating memory if
|
497 |
+
/// necessary. This will ensure that the capacity of the buffer is a multiple
|
498 |
+
/// of 64 bytes as defined in Layout.md.
|
499 |
+
/// Consider using ZeroPadding afterwards, to conform to the Arrow layout
|
500 |
+
/// specification.
|
501 |
+
///
|
502 |
+
/// @param new_size The new size for the buffer.
|
503 |
+
/// @param shrink_to_fit Whether to shrink the capacity if new size < current size
|
504 |
+
virtual Status Resize(const int64_t new_size, bool shrink_to_fit) = 0;
|
505 |
+
Status Resize(const int64_t new_size) {
|
506 |
+
return Resize(new_size, /*shrink_to_fit=*/true);
|
507 |
+
}
|
508 |
+
|
509 |
+
/// Ensure that buffer has enough memory allocated to fit the indicated
|
510 |
+
/// capacity (and meets the 64 byte padding requirement in Layout.md).
|
511 |
+
/// It does not change buffer's reported size and doesn't zero the padding.
|
512 |
+
virtual Status Reserve(const int64_t new_capacity) = 0;
|
513 |
+
|
514 |
+
template <class T>
|
515 |
+
Status TypedResize(const int64_t new_nb_elements, bool shrink_to_fit = true) {
|
516 |
+
return Resize(sizeof(T) * new_nb_elements, shrink_to_fit);
|
517 |
+
}
|
518 |
+
|
519 |
+
template <class T>
|
520 |
+
Status TypedReserve(const int64_t new_nb_elements) {
|
521 |
+
return Reserve(sizeof(T) * new_nb_elements);
|
522 |
+
}
|
523 |
+
|
524 |
+
protected:
|
525 |
+
ResizableBuffer(uint8_t* data, int64_t size) : MutableBuffer(data, size) {}
|
526 |
+
ResizableBuffer(uint8_t* data, int64_t size, std::shared_ptr<MemoryManager> mm)
|
527 |
+
: MutableBuffer(data, size, std::move(mm)) {}
|
528 |
+
};
|
529 |
+
|
530 |
+
/// \defgroup buffer-allocation-functions Functions for allocating buffers
|
531 |
+
///
|
532 |
+
/// @{
|
533 |
+
|
534 |
+
/// \brief Allocate a fixed size mutable buffer from a memory pool, zero its padding.
|
535 |
+
///
|
536 |
+
/// \param[in] size size of buffer to allocate
|
537 |
+
/// \param[in] pool a memory pool
|
538 |
+
ARROW_EXPORT
|
539 |
+
Result<std::unique_ptr<Buffer>> AllocateBuffer(const int64_t size,
|
540 |
+
MemoryPool* pool = NULLPTR);
|
541 |
+
ARROW_EXPORT
|
542 |
+
Result<std::unique_ptr<Buffer>> AllocateBuffer(const int64_t size, int64_t alignment,
|
543 |
+
MemoryPool* pool = NULLPTR);
|
544 |
+
|
545 |
+
/// \brief Allocate a resizeable buffer from a memory pool, zero its padding.
|
546 |
+
///
|
547 |
+
/// \param[in] size size of buffer to allocate
|
548 |
+
/// \param[in] pool a memory pool
|
549 |
+
ARROW_EXPORT
|
550 |
+
Result<std::unique_ptr<ResizableBuffer>> AllocateResizableBuffer(
|
551 |
+
const int64_t size, MemoryPool* pool = NULLPTR);
|
552 |
+
ARROW_EXPORT
|
553 |
+
Result<std::unique_ptr<ResizableBuffer>> AllocateResizableBuffer(
|
554 |
+
const int64_t size, const int64_t alignment, MemoryPool* pool = NULLPTR);
|
555 |
+
|
556 |
+
/// \brief Allocate a bitmap buffer from a memory pool
|
557 |
+
/// no guarantee on values is provided.
|
558 |
+
///
|
559 |
+
/// \param[in] length size in bits of bitmap to allocate
|
560 |
+
/// \param[in] pool memory pool to allocate memory from
|
561 |
+
ARROW_EXPORT
|
562 |
+
Result<std::shared_ptr<Buffer>> AllocateBitmap(int64_t length,
|
563 |
+
MemoryPool* pool = NULLPTR);
|
564 |
+
|
565 |
+
/// \brief Allocate a zero-initialized bitmap buffer from a memory pool
|
566 |
+
///
|
567 |
+
/// \param[in] length size in bits of bitmap to allocate
|
568 |
+
/// \param[in] pool memory pool to allocate memory from
|
569 |
+
ARROW_EXPORT
|
570 |
+
Result<std::shared_ptr<Buffer>> AllocateEmptyBitmap(int64_t length,
|
571 |
+
MemoryPool* pool = NULLPTR);
|
572 |
+
|
573 |
+
ARROW_EXPORT
|
574 |
+
Result<std::shared_ptr<Buffer>> AllocateEmptyBitmap(int64_t length, int64_t alignment,
|
575 |
+
MemoryPool* pool = NULLPTR);
|
576 |
+
|
577 |
+
/// \brief Concatenate multiple buffers into a single buffer
|
578 |
+
///
|
579 |
+
/// \param[in] buffers to be concatenated
|
580 |
+
/// \param[in] pool memory pool to allocate the new buffer from
|
581 |
+
ARROW_EXPORT
|
582 |
+
Result<std::shared_ptr<Buffer>> ConcatenateBuffers(const BufferVector& buffers,
|
583 |
+
MemoryPool* pool = NULLPTR);
|
584 |
+
|
585 |
+
/// @}
|
586 |
+
|
587 |
+
} // namespace arrow
|
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/chunk_resolver.h
ADDED
@@ -0,0 +1,104 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include <atomic>
|
21 |
+
#include <cstdint>
|
22 |
+
#include <vector>
|
23 |
+
|
24 |
+
#include "arrow/type_fwd.h"
|
25 |
+
#include "arrow/util/macros.h"
|
26 |
+
|
27 |
+
namespace arrow {
|
28 |
+
namespace internal {
|
29 |
+
|
30 |
+
struct ChunkLocation {
|
31 |
+
int64_t chunk_index, index_in_chunk;
|
32 |
+
};
|
33 |
+
|
34 |
+
// An object that resolves an array chunk depending on a logical index
|
35 |
+
struct ARROW_EXPORT ChunkResolver {
|
36 |
+
explicit ChunkResolver(const ArrayVector& chunks);
|
37 |
+
|
38 |
+
explicit ChunkResolver(const std::vector<const Array*>& chunks);
|
39 |
+
|
40 |
+
explicit ChunkResolver(const RecordBatchVector& batches);
|
41 |
+
|
42 |
+
ChunkResolver(ChunkResolver&& other) noexcept
|
43 |
+
: offsets_(std::move(other.offsets_)), cached_chunk_(other.cached_chunk_.load()) {}
|
44 |
+
|
45 |
+
ChunkResolver& operator=(ChunkResolver&& other) {
|
46 |
+
offsets_ = std::move(other.offsets_);
|
47 |
+
cached_chunk_.store(other.cached_chunk_.load());
|
48 |
+
return *this;
|
49 |
+
}
|
50 |
+
|
51 |
+
/// \brief Return a ChunkLocation containing the chunk index and in-chunk value index of
|
52 |
+
/// the chunked array at logical index
|
53 |
+
inline ChunkLocation Resolve(const int64_t index) const {
|
54 |
+
// It is common for the algorithms below to make consecutive accesses at
|
55 |
+
// a relatively small distance from each other, hence often falling in
|
56 |
+
// the same chunk.
|
57 |
+
// This is trivial when merging (assuming each side of the merge uses
|
58 |
+
// its own resolver), but also in the inner recursive invocations of
|
59 |
+
// partitioning.
|
60 |
+
if (offsets_.size() <= 1) {
|
61 |
+
return {0, index};
|
62 |
+
}
|
63 |
+
const auto cached_chunk = cached_chunk_.load();
|
64 |
+
const bool cache_hit =
|
65 |
+
(index >= offsets_[cached_chunk] && index < offsets_[cached_chunk + 1]);
|
66 |
+
if (ARROW_PREDICT_TRUE(cache_hit)) {
|
67 |
+
return {cached_chunk, index - offsets_[cached_chunk]};
|
68 |
+
}
|
69 |
+
auto chunk_index = Bisect(index);
|
70 |
+
cached_chunk_.store(chunk_index);
|
71 |
+
return {chunk_index, index - offsets_[chunk_index]};
|
72 |
+
}
|
73 |
+
|
74 |
+
protected:
|
75 |
+
// Find the chunk index corresponding to a value index using binary search
|
76 |
+
inline int64_t Bisect(const int64_t index) const {
|
77 |
+
// Like std::upper_bound(), but hand-written as it can help the compiler.
|
78 |
+
// Search [lo, lo + n)
|
79 |
+
int64_t lo = 0;
|
80 |
+
auto n = static_cast<int64_t>(offsets_.size());
|
81 |
+
while (n > 1) {
|
82 |
+
const int64_t m = n >> 1;
|
83 |
+
const int64_t mid = lo + m;
|
84 |
+
if (static_cast<int64_t>(index) >= offsets_[mid]) {
|
85 |
+
lo = mid;
|
86 |
+
n -= m;
|
87 |
+
} else {
|
88 |
+
n = m;
|
89 |
+
}
|
90 |
+
}
|
91 |
+
return lo;
|
92 |
+
}
|
93 |
+
|
94 |
+
private:
|
95 |
+
// Collection of starting offsets used for binary search
|
96 |
+
std::vector<int64_t> offsets_;
|
97 |
+
|
98 |
+
// Tracks the most recently used chunk index to allow fast
|
99 |
+
// access for consecutive indices corresponding to the same chunk
|
100 |
+
mutable std::atomic<int64_t> cached_chunk_;
|
101 |
+
};
|
102 |
+
|
103 |
+
} // namespace internal
|
104 |
+
} // namespace arrow
|
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/chunked_array.h
ADDED
@@ -0,0 +1,275 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include <cstdint>
|
21 |
+
#include <memory>
|
22 |
+
#include <string>
|
23 |
+
#include <utility>
|
24 |
+
#include <vector>
|
25 |
+
|
26 |
+
#include "arrow/chunk_resolver.h"
|
27 |
+
#include "arrow/compare.h"
|
28 |
+
#include "arrow/result.h"
|
29 |
+
#include "arrow/status.h"
|
30 |
+
#include "arrow/type_fwd.h"
|
31 |
+
#include "arrow/util/macros.h"
|
32 |
+
#include "arrow/util/visibility.h"
|
33 |
+
|
34 |
+
namespace arrow {
|
35 |
+
|
36 |
+
class Array;
|
37 |
+
class DataType;
|
38 |
+
class MemoryPool;
|
39 |
+
namespace stl {
|
40 |
+
template <typename T, typename V>
|
41 |
+
class ChunkedArrayIterator;
|
42 |
+
} // namespace stl
|
43 |
+
|
44 |
+
/// \class ChunkedArray
|
45 |
+
/// \brief A data structure managing a list of primitive Arrow arrays logically
|
46 |
+
/// as one large array
|
47 |
+
///
|
48 |
+
/// Data chunking is treated throughout this project largely as an
|
49 |
+
/// implementation detail for performance and memory use optimization.
|
50 |
+
/// ChunkedArray allows Array objects to be collected and interpreted
|
51 |
+
/// as a single logical array without requiring an expensive concatenation
|
52 |
+
/// step.
|
53 |
+
///
|
54 |
+
/// In some cases, data produced by a function may exceed the capacity of an
|
55 |
+
/// Array (like BinaryArray or StringArray) and so returning multiple Arrays is
|
56 |
+
/// the only possibility. In these cases, we recommend returning a ChunkedArray
|
57 |
+
/// instead of vector of Arrays or some alternative.
|
58 |
+
///
|
59 |
+
/// When data is processed in parallel, it may not be practical or possible to
|
60 |
+
/// create large contiguous memory allocations and write output into them. With
|
61 |
+
/// some data types, like binary and string types, it is not possible at all to
|
62 |
+
/// produce non-chunked array outputs without requiring a concatenation step at
|
63 |
+
/// the end of processing.
|
64 |
+
///
|
65 |
+
/// Application developers may tune chunk sizes based on analysis of
|
66 |
+
/// performance profiles but many developer-users will not need to be
|
67 |
+
/// especially concerned with the chunking details.
|
68 |
+
///
|
69 |
+
/// Preserving the chunk layout/sizes in processing steps is generally not
|
70 |
+
/// considered to be a contract in APIs. A function may decide to alter the
|
71 |
+
/// chunking of its result. Similarly, APIs accepting multiple ChunkedArray
|
72 |
+
/// inputs should not expect the chunk layout to be the same in each input.
|
73 |
+
class ARROW_EXPORT ChunkedArray {
|
74 |
+
public:
|
75 |
+
ChunkedArray(ChunkedArray&&) = default;
|
76 |
+
ChunkedArray& operator=(ChunkedArray&&) = default;
|
77 |
+
|
78 |
+
/// \brief Construct a chunked array from a single Array
|
79 |
+
explicit ChunkedArray(std::shared_ptr<Array> chunk)
|
80 |
+
: ChunkedArray(ArrayVector{std::move(chunk)}) {}
|
81 |
+
|
82 |
+
/// \brief Construct a chunked array from a vector of arrays and an optional data type
|
83 |
+
///
|
84 |
+
/// The vector elements must have the same data type.
|
85 |
+
/// If the data type is passed explicitly, the vector may be empty.
|
86 |
+
/// If the data type is omitted, the vector must be non-empty.
|
87 |
+
explicit ChunkedArray(ArrayVector chunks, std::shared_ptr<DataType> type = NULLPTR);
|
88 |
+
|
89 |
+
// \brief Constructor with basic input validation.
|
90 |
+
static Result<std::shared_ptr<ChunkedArray>> Make(
|
91 |
+
ArrayVector chunks, std::shared_ptr<DataType> type = NULLPTR);
|
92 |
+
|
93 |
+
/// \brief Create an empty ChunkedArray of a given type
|
94 |
+
///
|
95 |
+
/// The output ChunkedArray will have one chunk with an empty
|
96 |
+
/// array of the given type.
|
97 |
+
///
|
98 |
+
/// \param[in] type the data type of the empty ChunkedArray
|
99 |
+
/// \param[in] pool the memory pool to allocate memory from
|
100 |
+
/// \return the resulting ChunkedArray
|
101 |
+
static Result<std::shared_ptr<ChunkedArray>> MakeEmpty(
|
102 |
+
std::shared_ptr<DataType> type, MemoryPool* pool = default_memory_pool());
|
103 |
+
|
104 |
+
/// \return the total length of the chunked array; computed on construction
|
105 |
+
int64_t length() const { return length_; }
|
106 |
+
|
107 |
+
/// \return the total number of nulls among all chunks
|
108 |
+
int64_t null_count() const { return null_count_; }
|
109 |
+
|
110 |
+
/// \return the total number of chunks in the chunked array
|
111 |
+
int num_chunks() const { return static_cast<int>(chunks_.size()); }
|
112 |
+
|
113 |
+
/// \return chunk a particular chunk from the chunked array
|
114 |
+
const std::shared_ptr<Array>& chunk(int i) const { return chunks_[i]; }
|
115 |
+
|
116 |
+
/// \return an ArrayVector of chunks
|
117 |
+
const ArrayVector& chunks() const { return chunks_; }
|
118 |
+
|
119 |
+
/// \brief Construct a zero-copy slice of the chunked array with the
|
120 |
+
/// indicated offset and length
|
121 |
+
///
|
122 |
+
/// \param[in] offset the position of the first element in the constructed
|
123 |
+
/// slice
|
124 |
+
/// \param[in] length the length of the slice. If there are not enough
|
125 |
+
/// elements in the chunked array, the length will be adjusted accordingly
|
126 |
+
///
|
127 |
+
/// \return a new object wrapped in std::shared_ptr<ChunkedArray>
|
128 |
+
std::shared_ptr<ChunkedArray> Slice(int64_t offset, int64_t length) const;
|
129 |
+
|
130 |
+
/// \brief Slice from offset until end of the chunked array
|
131 |
+
std::shared_ptr<ChunkedArray> Slice(int64_t offset) const;
|
132 |
+
|
133 |
+
/// \brief Flatten this chunked array as a vector of chunked arrays, one
|
134 |
+
/// for each struct field
|
135 |
+
///
|
136 |
+
/// \param[in] pool The pool for buffer allocations, if any
|
137 |
+
Result<std::vector<std::shared_ptr<ChunkedArray>>> Flatten(
|
138 |
+
MemoryPool* pool = default_memory_pool()) const;
|
139 |
+
|
140 |
+
/// Construct a zero-copy view of this chunked array with the given
|
141 |
+
/// type. Calls Array::View on each constituent chunk. Always succeeds if
|
142 |
+
/// there are zero chunks
|
143 |
+
Result<std::shared_ptr<ChunkedArray>> View(const std::shared_ptr<DataType>& type) const;
|
144 |
+
|
145 |
+
/// \brief Return the type of the chunked array
|
146 |
+
const std::shared_ptr<DataType>& type() const { return type_; }
|
147 |
+
|
148 |
+
/// \brief Return a Scalar containing the value of this array at index
|
149 |
+
Result<std::shared_ptr<Scalar>> GetScalar(int64_t index) const;
|
150 |
+
|
151 |
+
/// \brief Determine if two chunked arrays are equal.
|
152 |
+
///
|
153 |
+
/// Two chunked arrays can be equal only if they have equal datatypes.
|
154 |
+
/// However, they may be equal even if they have different chunkings.
|
155 |
+
bool Equals(const ChunkedArray& other,
|
156 |
+
const EqualOptions& opts = EqualOptions::Defaults()) const;
|
157 |
+
/// \brief Determine if two chunked arrays are equal.
|
158 |
+
bool Equals(const std::shared_ptr<ChunkedArray>& other,
|
159 |
+
const EqualOptions& opts = EqualOptions::Defaults()) const;
|
160 |
+
/// \brief Determine if two chunked arrays approximately equal
|
161 |
+
bool ApproxEquals(const ChunkedArray& other,
|
162 |
+
const EqualOptions& = EqualOptions::Defaults()) const;
|
163 |
+
|
164 |
+
/// \return PrettyPrint representation suitable for debugging
|
165 |
+
std::string ToString() const;
|
166 |
+
|
167 |
+
/// \brief Perform cheap validation checks to determine obvious inconsistencies
|
168 |
+
/// within the chunk array's internal data.
|
169 |
+
///
|
170 |
+
/// This is O(k*m) where k is the number of array descendents,
|
171 |
+
/// and m is the number of chunks.
|
172 |
+
///
|
173 |
+
/// \return Status
|
174 |
+
Status Validate() const;
|
175 |
+
|
176 |
+
/// \brief Perform extensive validation checks to determine inconsistencies
|
177 |
+
/// within the chunk array's internal data.
|
178 |
+
///
|
179 |
+
/// This is O(k*n) where k is the number of array descendents,
|
180 |
+
/// and n is the length in elements.
|
181 |
+
///
|
182 |
+
/// \return Status
|
183 |
+
Status ValidateFull() const;
|
184 |
+
|
185 |
+
protected:
|
186 |
+
ArrayVector chunks_;
|
187 |
+
std::shared_ptr<DataType> type_;
|
188 |
+
int64_t length_;
|
189 |
+
int64_t null_count_;
|
190 |
+
|
191 |
+
private:
|
192 |
+
template <typename T, typename V>
|
193 |
+
friend class ::arrow::stl::ChunkedArrayIterator;
|
194 |
+
internal::ChunkResolver chunk_resolver_;
|
195 |
+
ARROW_DISALLOW_COPY_AND_ASSIGN(ChunkedArray);
|
196 |
+
};
|
197 |
+
|
198 |
+
namespace internal {
|
199 |
+
|
200 |
+
/// \brief EXPERIMENTAL: Utility for incremental iteration over contiguous
|
201 |
+
/// pieces of potentially differently-chunked ChunkedArray objects
|
202 |
+
class ARROW_EXPORT MultipleChunkIterator {
|
203 |
+
public:
|
204 |
+
MultipleChunkIterator(const ChunkedArray& left, const ChunkedArray& right)
|
205 |
+
: left_(left),
|
206 |
+
right_(right),
|
207 |
+
pos_(0),
|
208 |
+
length_(left.length()),
|
209 |
+
chunk_idx_left_(0),
|
210 |
+
chunk_idx_right_(0),
|
211 |
+
chunk_pos_left_(0),
|
212 |
+
chunk_pos_right_(0) {}
|
213 |
+
|
214 |
+
bool Next(std::shared_ptr<Array>* next_left, std::shared_ptr<Array>* next_right);
|
215 |
+
|
216 |
+
int64_t position() const { return pos_; }
|
217 |
+
|
218 |
+
private:
|
219 |
+
const ChunkedArray& left_;
|
220 |
+
const ChunkedArray& right_;
|
221 |
+
|
222 |
+
// The amount of the entire ChunkedArray consumed
|
223 |
+
int64_t pos_;
|
224 |
+
|
225 |
+
// Length of the chunked array(s)
|
226 |
+
int64_t length_;
|
227 |
+
|
228 |
+
// Current left chunk
|
229 |
+
int chunk_idx_left_;
|
230 |
+
|
231 |
+
// Current right chunk
|
232 |
+
int chunk_idx_right_;
|
233 |
+
|
234 |
+
// Offset into the current left chunk
|
235 |
+
int64_t chunk_pos_left_;
|
236 |
+
|
237 |
+
// Offset into the current right chunk
|
238 |
+
int64_t chunk_pos_right_;
|
239 |
+
};
|
240 |
+
|
241 |
+
/// \brief Evaluate binary function on two ChunkedArray objects having possibly
|
242 |
+
/// different chunk layouts. The passed binary function / functor should have
|
243 |
+
/// the following signature.
|
244 |
+
///
|
245 |
+
/// Status(const Array&, const Array&, int64_t)
|
246 |
+
///
|
247 |
+
/// The third argument is the absolute position relative to the start of each
|
248 |
+
/// ChunkedArray. The function is executed against each contiguous pair of
|
249 |
+
/// array segments, slicing if necessary.
|
250 |
+
///
|
251 |
+
/// For example, if two arrays have chunk sizes
|
252 |
+
///
|
253 |
+
/// left: [10, 10, 20]
|
254 |
+
/// right: [15, 10, 15]
|
255 |
+
///
|
256 |
+
/// Then the following invocations take place (pseudocode)
|
257 |
+
///
|
258 |
+
/// func(left.chunk[0][0:10], right.chunk[0][0:10], 0)
|
259 |
+
/// func(left.chunk[1][0:5], right.chunk[0][10:15], 10)
|
260 |
+
/// func(left.chunk[1][5:10], right.chunk[1][0:5], 15)
|
261 |
+
/// func(left.chunk[2][0:5], right.chunk[1][5:10], 20)
|
262 |
+
/// func(left.chunk[2][5:20], right.chunk[2][:], 25)
|
263 |
+
template <typename Action>
|
264 |
+
Status ApplyBinaryChunked(const ChunkedArray& left, const ChunkedArray& right,
|
265 |
+
Action&& action) {
|
266 |
+
MultipleChunkIterator iterator(left, right);
|
267 |
+
std::shared_ptr<Array> left_piece, right_piece;
|
268 |
+
while (iterator.Next(&left_piece, &right_piece)) {
|
269 |
+
ARROW_RETURN_NOT_OK(action(*left_piece, *right_piece, iterator.position()));
|
270 |
+
}
|
271 |
+
return Status::OK();
|
272 |
+
}
|
273 |
+
|
274 |
+
} // namespace internal
|
275 |
+
} // namespace arrow
|
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/compare.h
ADDED
@@ -0,0 +1,145 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
// Functions for comparing Arrow data structures
|
19 |
+
|
20 |
+
#pragma once
|
21 |
+
|
22 |
+
#include <cstdint>
|
23 |
+
#include <iosfwd>
|
24 |
+
|
25 |
+
#include "arrow/util/macros.h"
|
26 |
+
#include "arrow/util/visibility.h"
|
27 |
+
|
28 |
+
namespace arrow {
|
29 |
+
|
30 |
+
class Array;
|
31 |
+
class DataType;
|
32 |
+
class Tensor;
|
33 |
+
class SparseTensor;
|
34 |
+
struct Scalar;
|
35 |
+
|
36 |
+
static constexpr double kDefaultAbsoluteTolerance = 1E-5;
|
37 |
+
|
38 |
+
/// A container of options for equality comparisons
|
39 |
+
class EqualOptions {
|
40 |
+
public:
|
41 |
+
/// Whether or not NaNs are considered equal.
|
42 |
+
bool nans_equal() const { return nans_equal_; }
|
43 |
+
|
44 |
+
/// Return a new EqualOptions object with the "nans_equal" property changed.
|
45 |
+
EqualOptions nans_equal(bool v) const {
|
46 |
+
auto res = EqualOptions(*this);
|
47 |
+
res.nans_equal_ = v;
|
48 |
+
return res;
|
49 |
+
}
|
50 |
+
|
51 |
+
/// Whether or not zeros with differing signs are considered equal.
|
52 |
+
bool signed_zeros_equal() const { return signed_zeros_equal_; }
|
53 |
+
|
54 |
+
/// Return a new EqualOptions object with the "signed_zeros_equal" property changed.
|
55 |
+
EqualOptions signed_zeros_equal(bool v) const {
|
56 |
+
auto res = EqualOptions(*this);
|
57 |
+
res.signed_zeros_equal_ = v;
|
58 |
+
return res;
|
59 |
+
}
|
60 |
+
|
61 |
+
/// The absolute tolerance for approximate comparisons of floating-point values.
|
62 |
+
double atol() const { return atol_; }
|
63 |
+
|
64 |
+
/// Return a new EqualOptions object with the "atol" property changed.
|
65 |
+
EqualOptions atol(double v) const {
|
66 |
+
auto res = EqualOptions(*this);
|
67 |
+
res.atol_ = v;
|
68 |
+
return res;
|
69 |
+
}
|
70 |
+
|
71 |
+
/// The ostream to which a diff will be formatted if arrays disagree.
|
72 |
+
/// If this is null (the default) no diff will be formatted.
|
73 |
+
std::ostream* diff_sink() const { return diff_sink_; }
|
74 |
+
|
75 |
+
/// Return a new EqualOptions object with the "diff_sink" property changed.
|
76 |
+
/// This option will be ignored if diff formatting of the types of compared arrays is
|
77 |
+
/// not supported.
|
78 |
+
EqualOptions diff_sink(std::ostream* diff_sink) const {
|
79 |
+
auto res = EqualOptions(*this);
|
80 |
+
res.diff_sink_ = diff_sink;
|
81 |
+
return res;
|
82 |
+
}
|
83 |
+
|
84 |
+
static EqualOptions Defaults() { return {}; }
|
85 |
+
|
86 |
+
protected:
|
87 |
+
double atol_ = kDefaultAbsoluteTolerance;
|
88 |
+
bool nans_equal_ = false;
|
89 |
+
bool signed_zeros_equal_ = true;
|
90 |
+
|
91 |
+
std::ostream* diff_sink_ = NULLPTR;
|
92 |
+
};
|
93 |
+
|
94 |
+
/// Returns true if the arrays are exactly equal
|
95 |
+
ARROW_EXPORT bool ArrayEquals(const Array& left, const Array& right,
|
96 |
+
const EqualOptions& = EqualOptions::Defaults());
|
97 |
+
|
98 |
+
/// Returns true if the arrays are approximately equal. For non-floating point
|
99 |
+
/// types, this is equivalent to ArrayEquals(left, right)
|
100 |
+
ARROW_EXPORT bool ArrayApproxEquals(const Array& left, const Array& right,
|
101 |
+
const EqualOptions& = EqualOptions::Defaults());
|
102 |
+
|
103 |
+
/// Returns true if indicated equal-length segment of arrays are exactly equal
|
104 |
+
ARROW_EXPORT bool ArrayRangeEquals(const Array& left, const Array& right,
|
105 |
+
int64_t start_idx, int64_t end_idx,
|
106 |
+
int64_t other_start_idx,
|
107 |
+
const EqualOptions& = EqualOptions::Defaults());
|
108 |
+
|
109 |
+
/// Returns true if indicated equal-length segment of arrays are approximately equal
|
110 |
+
ARROW_EXPORT bool ArrayRangeApproxEquals(const Array& left, const Array& right,
|
111 |
+
int64_t start_idx, int64_t end_idx,
|
112 |
+
int64_t other_start_idx,
|
113 |
+
const EqualOptions& = EqualOptions::Defaults());
|
114 |
+
|
115 |
+
ARROW_EXPORT bool TensorEquals(const Tensor& left, const Tensor& right,
|
116 |
+
const EqualOptions& = EqualOptions::Defaults());
|
117 |
+
|
118 |
+
/// EXPERIMENTAL: Returns true if the given sparse tensors are exactly equal
|
119 |
+
ARROW_EXPORT bool SparseTensorEquals(const SparseTensor& left, const SparseTensor& right,
|
120 |
+
const EqualOptions& = EqualOptions::Defaults());
|
121 |
+
|
122 |
+
/// Returns true if the type metadata are exactly equal
|
123 |
+
/// \param[in] left a DataType
|
124 |
+
/// \param[in] right a DataType
|
125 |
+
/// \param[in] check_metadata whether to compare KeyValueMetadata for child
|
126 |
+
/// fields
|
127 |
+
ARROW_EXPORT bool TypeEquals(const DataType& left, const DataType& right,
|
128 |
+
bool check_metadata = true);
|
129 |
+
|
130 |
+
/// Returns true if scalars are equal
|
131 |
+
/// \param[in] left a Scalar
|
132 |
+
/// \param[in] right a Scalar
|
133 |
+
/// \param[in] options comparison options
|
134 |
+
ARROW_EXPORT bool ScalarEquals(const Scalar& left, const Scalar& right,
|
135 |
+
const EqualOptions& options = EqualOptions::Defaults());
|
136 |
+
|
137 |
+
/// Returns true if scalars are approximately equal
|
138 |
+
/// \param[in] left a Scalar
|
139 |
+
/// \param[in] right a Scalar
|
140 |
+
/// \param[in] options comparison options
|
141 |
+
ARROW_EXPORT bool ScalarApproxEquals(
|
142 |
+
const Scalar& left, const Scalar& right,
|
143 |
+
const EqualOptions& options = EqualOptions::Defaults());
|
144 |
+
|
145 |
+
} // namespace arrow
|
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/config.h
ADDED
@@ -0,0 +1,98 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include <optional>
|
21 |
+
#include <string>
|
22 |
+
|
23 |
+
#include "arrow/status.h"
|
24 |
+
#include "arrow/util/config.h" // IWYU pragma: export
|
25 |
+
#include "arrow/util/visibility.h"
|
26 |
+
|
27 |
+
namespace arrow {
|
28 |
+
|
29 |
+
struct BuildInfo {
|
30 |
+
/// The packed version number, e.g. 1002003 (decimal) for Arrow 1.2.3
|
31 |
+
int version;
|
32 |
+
/// The "major" version number, e.g. 1 for Arrow 1.2.3
|
33 |
+
int version_major;
|
34 |
+
/// The "minor" version number, e.g. 2 for Arrow 1.2.3
|
35 |
+
int version_minor;
|
36 |
+
/// The "patch" version number, e.g. 3 for Arrow 1.2.3
|
37 |
+
int version_patch;
|
38 |
+
/// The version string, e.g. "1.2.3"
|
39 |
+
std::string version_string;
|
40 |
+
std::string so_version;
|
41 |
+
std::string full_so_version;
|
42 |
+
|
43 |
+
/// The CMake compiler identifier, e.g. "GNU"
|
44 |
+
std::string compiler_id;
|
45 |
+
std::string compiler_version;
|
46 |
+
std::string compiler_flags;
|
47 |
+
|
48 |
+
/// The git changeset id, if available
|
49 |
+
std::string git_id;
|
50 |
+
/// The git changeset description, if available
|
51 |
+
std::string git_description;
|
52 |
+
std::string package_kind;
|
53 |
+
|
54 |
+
/// The uppercase build type, e.g. "DEBUG" or "RELEASE"
|
55 |
+
std::string build_type;
|
56 |
+
};
|
57 |
+
|
58 |
+
struct RuntimeInfo {
|
59 |
+
/// The enabled SIMD level
|
60 |
+
///
|
61 |
+
/// This can be less than `detected_simd_level` if the ARROW_USER_SIMD_LEVEL
|
62 |
+
/// environment variable is set to another value.
|
63 |
+
std::string simd_level;
|
64 |
+
|
65 |
+
/// The SIMD level available on the OS and CPU
|
66 |
+
std::string detected_simd_level;
|
67 |
+
|
68 |
+
/// Whether using the OS-based timezone database
|
69 |
+
/// This is set at compile-time.
|
70 |
+
bool using_os_timezone_db;
|
71 |
+
|
72 |
+
/// The path to the timezone database; by default None.
|
73 |
+
std::optional<std::string> timezone_db_path;
|
74 |
+
};
|
75 |
+
|
76 |
+
/// \brief Get runtime build info.
|
77 |
+
///
|
78 |
+
/// The returned values correspond to exact loaded version of the Arrow library,
|
79 |
+
/// rather than the values frozen at application compile-time through the `ARROW_*`
|
80 |
+
/// preprocessor definitions.
|
81 |
+
ARROW_EXPORT
|
82 |
+
const BuildInfo& GetBuildInfo();
|
83 |
+
|
84 |
+
/// \brief Get runtime info.
|
85 |
+
///
|
86 |
+
ARROW_EXPORT
|
87 |
+
RuntimeInfo GetRuntimeInfo();
|
88 |
+
|
89 |
+
struct GlobalOptions {
|
90 |
+
/// Path to text timezone database. This is only configurable on Windows,
|
91 |
+
/// which does not have a compatible OS timezone database.
|
92 |
+
std::optional<std::string> timezone_db_path;
|
93 |
+
};
|
94 |
+
|
95 |
+
ARROW_EXPORT
|
96 |
+
Status Initialize(const GlobalOptions& options) noexcept;
|
97 |
+
|
98 |
+
} // namespace arrow
|
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/csv/api.h
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include "arrow/csv/options.h"
|
21 |
+
#include "arrow/csv/reader.h"
|
22 |
+
#include "arrow/csv/writer.h"
|
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/csv/chunker.h
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include <cstdint>
|
21 |
+
#include <memory>
|
22 |
+
|
23 |
+
#include "arrow/csv/options.h"
|
24 |
+
#include "arrow/status.h"
|
25 |
+
#include "arrow/util/delimiting.h"
|
26 |
+
#include "arrow/util/macros.h"
|
27 |
+
#include "arrow/util/visibility.h"
|
28 |
+
|
29 |
+
namespace arrow {
|
30 |
+
namespace csv {
|
31 |
+
|
32 |
+
ARROW_EXPORT
|
33 |
+
std::unique_ptr<Chunker> MakeChunker(const ParseOptions& options);
|
34 |
+
|
35 |
+
} // namespace csv
|
36 |
+
} // namespace arrow
|
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/csv/column_builder.h
ADDED
@@ -0,0 +1,78 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include <cstdint>
|
21 |
+
#include <memory>
|
22 |
+
#include <utility>
|
23 |
+
|
24 |
+
#include "arrow/result.h"
|
25 |
+
#include "arrow/type_fwd.h"
|
26 |
+
#include "arrow/util/type_fwd.h"
|
27 |
+
#include "arrow/util/visibility.h"
|
28 |
+
|
29 |
+
namespace arrow {
|
30 |
+
namespace csv {
|
31 |
+
|
32 |
+
class BlockParser;
|
33 |
+
struct ConvertOptions;
|
34 |
+
|
35 |
+
class ARROW_EXPORT ColumnBuilder {
|
36 |
+
public:
|
37 |
+
virtual ~ColumnBuilder() = default;
|
38 |
+
|
39 |
+
/// Spawn a task that will try to convert and append the given CSV block.
|
40 |
+
/// All calls to Append() should happen on the same thread, otherwise
|
41 |
+
/// call Insert() instead.
|
42 |
+
virtual void Append(const std::shared_ptr<BlockParser>& parser) = 0;
|
43 |
+
|
44 |
+
/// Spawn a task that will try to convert and insert the given CSV block
|
45 |
+
virtual void Insert(int64_t block_index,
|
46 |
+
const std::shared_ptr<BlockParser>& parser) = 0;
|
47 |
+
|
48 |
+
/// Return the final chunked array. The TaskGroup _must_ have finished!
|
49 |
+
virtual Result<std::shared_ptr<ChunkedArray>> Finish() = 0;
|
50 |
+
|
51 |
+
std::shared_ptr<arrow::internal::TaskGroup> task_group() { return task_group_; }
|
52 |
+
|
53 |
+
/// Construct a strictly-typed ColumnBuilder.
|
54 |
+
static Result<std::shared_ptr<ColumnBuilder>> Make(
|
55 |
+
MemoryPool* pool, const std::shared_ptr<DataType>& type, int32_t col_index,
|
56 |
+
const ConvertOptions& options,
|
57 |
+
const std::shared_ptr<arrow::internal::TaskGroup>& task_group);
|
58 |
+
|
59 |
+
/// Construct a type-inferring ColumnBuilder.
|
60 |
+
static Result<std::shared_ptr<ColumnBuilder>> Make(
|
61 |
+
MemoryPool* pool, int32_t col_index, const ConvertOptions& options,
|
62 |
+
const std::shared_ptr<arrow::internal::TaskGroup>& task_group);
|
63 |
+
|
64 |
+
/// Construct a ColumnBuilder for a column of nulls
|
65 |
+
/// (i.e. not present in the CSV file).
|
66 |
+
static Result<std::shared_ptr<ColumnBuilder>> MakeNull(
|
67 |
+
MemoryPool* pool, const std::shared_ptr<DataType>& type,
|
68 |
+
const std::shared_ptr<arrow::internal::TaskGroup>& task_group);
|
69 |
+
|
70 |
+
protected:
|
71 |
+
explicit ColumnBuilder(std::shared_ptr<arrow::internal::TaskGroup> task_group)
|
72 |
+
: task_group_(std::move(task_group)) {}
|
73 |
+
|
74 |
+
std::shared_ptr<arrow::internal::TaskGroup> task_group_;
|
75 |
+
};
|
76 |
+
|
77 |
+
} // namespace csv
|
78 |
+
} // namespace arrow
|
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/csv/column_decoder.h
ADDED
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include <cstdint>
|
21 |
+
#include <memory>
|
22 |
+
#include <utility>
|
23 |
+
|
24 |
+
#include "arrow/result.h"
|
25 |
+
#include "arrow/type_fwd.h"
|
26 |
+
#include "arrow/util/type_fwd.h"
|
27 |
+
#include "arrow/util/visibility.h"
|
28 |
+
|
29 |
+
namespace arrow {
|
30 |
+
namespace csv {
|
31 |
+
|
32 |
+
class BlockParser;
|
33 |
+
struct ConvertOptions;
|
34 |
+
|
35 |
+
class ARROW_EXPORT ColumnDecoder {
|
36 |
+
public:
|
37 |
+
virtual ~ColumnDecoder() = default;
|
38 |
+
|
39 |
+
/// Spawn a task that will try to convert and insert the given CSV block
|
40 |
+
virtual Future<std::shared_ptr<Array>> Decode(
|
41 |
+
const std::shared_ptr<BlockParser>& parser) = 0;
|
42 |
+
|
43 |
+
/// Construct a strictly-typed ColumnDecoder.
|
44 |
+
static Result<std::shared_ptr<ColumnDecoder>> Make(MemoryPool* pool,
|
45 |
+
std::shared_ptr<DataType> type,
|
46 |
+
int32_t col_index,
|
47 |
+
const ConvertOptions& options);
|
48 |
+
|
49 |
+
/// Construct a type-inferring ColumnDecoder.
|
50 |
+
/// Inference will run only on the first block, the type will be frozen afterwards.
|
51 |
+
static Result<std::shared_ptr<ColumnDecoder>> Make(MemoryPool* pool, int32_t col_index,
|
52 |
+
const ConvertOptions& options);
|
53 |
+
|
54 |
+
/// Construct a ColumnDecoder for a column of nulls
|
55 |
+
/// (i.e. not present in the CSV file).
|
56 |
+
static Result<std::shared_ptr<ColumnDecoder>> MakeNull(MemoryPool* pool,
|
57 |
+
std::shared_ptr<DataType> type);
|
58 |
+
|
59 |
+
protected:
|
60 |
+
ColumnDecoder() = default;
|
61 |
+
};
|
62 |
+
|
63 |
+
} // namespace csv
|
64 |
+
} // namespace arrow
|
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/csv/converter.h
ADDED
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include <cstdint>
|
21 |
+
#include <memory>
|
22 |
+
|
23 |
+
#include "arrow/csv/options.h"
|
24 |
+
#include "arrow/result.h"
|
25 |
+
#include "arrow/type_fwd.h"
|
26 |
+
#include "arrow/util/macros.h"
|
27 |
+
#include "arrow/util/visibility.h"
|
28 |
+
|
29 |
+
namespace arrow {
|
30 |
+
namespace csv {
|
31 |
+
|
32 |
+
class BlockParser;
|
33 |
+
|
34 |
+
class ARROW_EXPORT Converter {
|
35 |
+
public:
|
36 |
+
Converter(const std::shared_ptr<DataType>& type, const ConvertOptions& options,
|
37 |
+
MemoryPool* pool);
|
38 |
+
virtual ~Converter() = default;
|
39 |
+
|
40 |
+
virtual Result<std::shared_ptr<Array>> Convert(const BlockParser& parser,
|
41 |
+
int32_t col_index) = 0;
|
42 |
+
|
43 |
+
std::shared_ptr<DataType> type() const { return type_; }
|
44 |
+
|
45 |
+
// Create a Converter for the given data type
|
46 |
+
static Result<std::shared_ptr<Converter>> Make(
|
47 |
+
const std::shared_ptr<DataType>& type, const ConvertOptions& options,
|
48 |
+
MemoryPool* pool = default_memory_pool());
|
49 |
+
|
50 |
+
protected:
|
51 |
+
ARROW_DISALLOW_COPY_AND_ASSIGN(Converter);
|
52 |
+
|
53 |
+
virtual Status Initialize() = 0;
|
54 |
+
|
55 |
+
// CAUTION: ConvertOptions can grow large (if it customizes hundreds or
|
56 |
+
// thousands of columns), so avoid copying it in each Converter.
|
57 |
+
const ConvertOptions& options_;
|
58 |
+
MemoryPool* pool_;
|
59 |
+
std::shared_ptr<DataType> type_;
|
60 |
+
};
|
61 |
+
|
62 |
+
class ARROW_EXPORT DictionaryConverter : public Converter {
|
63 |
+
public:
|
64 |
+
DictionaryConverter(const std::shared_ptr<DataType>& value_type,
|
65 |
+
const ConvertOptions& options, MemoryPool* pool);
|
66 |
+
|
67 |
+
// If the dictionary length goes above this value, conversion will fail
|
68 |
+
// with Status::IndexError.
|
69 |
+
virtual void SetMaxCardinality(int32_t max_length) = 0;
|
70 |
+
|
71 |
+
// Create a Converter for the given dictionary value type.
|
72 |
+
// The dictionary index type will always be Int32.
|
73 |
+
static Result<std::shared_ptr<DictionaryConverter>> Make(
|
74 |
+
const std::shared_ptr<DataType>& value_type, const ConvertOptions& options,
|
75 |
+
MemoryPool* pool = default_memory_pool());
|
76 |
+
|
77 |
+
protected:
|
78 |
+
std::shared_ptr<DataType> value_type_;
|
79 |
+
};
|
80 |
+
|
81 |
+
} // namespace csv
|
82 |
+
} // namespace arrow
|
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/csv/invalid_row.h
ADDED
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include <functional>
|
21 |
+
#include <string_view>
|
22 |
+
|
23 |
+
namespace arrow {
|
24 |
+
namespace csv {
|
25 |
+
|
26 |
+
/// \brief Description of an invalid row
|
27 |
+
struct InvalidRow {
|
28 |
+
/// \brief Number of columns expected in the row
|
29 |
+
int32_t expected_columns;
|
30 |
+
/// \brief Actual number of columns found in the row
|
31 |
+
int32_t actual_columns;
|
32 |
+
/// \brief The physical row number if known or -1
|
33 |
+
///
|
34 |
+
/// This number is one-based and also accounts for non-data rows (such as
|
35 |
+
/// CSV header rows).
|
36 |
+
int64_t number;
|
37 |
+
/// \brief View of the entire row. Memory will be freed after callback returns
|
38 |
+
const std::string_view text;
|
39 |
+
};
|
40 |
+
|
41 |
+
/// \brief Result returned by an InvalidRowHandler
|
42 |
+
enum class InvalidRowResult {
|
43 |
+
// Generate an error describing this row
|
44 |
+
Error,
|
45 |
+
// Skip over this row
|
46 |
+
Skip
|
47 |
+
};
|
48 |
+
|
49 |
+
/// \brief callback for handling a row with an invalid number of columns while parsing
|
50 |
+
/// \return result indicating if an error should be returned from the parser or the row is
|
51 |
+
/// skipped
|
52 |
+
using InvalidRowHandler = std::function<InvalidRowResult(const InvalidRow&)>;
|
53 |
+
|
54 |
+
} // namespace csv
|
55 |
+
} // namespace arrow
|
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/csv/options.h
ADDED
@@ -0,0 +1,220 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include <cstdint>
|
21 |
+
#include <memory>
|
22 |
+
#include <string>
|
23 |
+
#include <unordered_map>
|
24 |
+
#include <vector>
|
25 |
+
|
26 |
+
#include "arrow/csv/invalid_row.h"
|
27 |
+
#include "arrow/csv/type_fwd.h"
|
28 |
+
#include "arrow/io/interfaces.h"
|
29 |
+
#include "arrow/status.h"
|
30 |
+
#include "arrow/util/visibility.h"
|
31 |
+
|
32 |
+
namespace arrow {
|
33 |
+
|
34 |
+
class DataType;
|
35 |
+
class TimestampParser;
|
36 |
+
|
37 |
+
namespace csv {
|
38 |
+
|
39 |
+
// Silly workaround for https://github.com/michaeljones/breathe/issues/453
|
40 |
+
constexpr char kDefaultEscapeChar = '\\';
|
41 |
+
|
42 |
+
struct ARROW_EXPORT ParseOptions {
|
43 |
+
// Parsing options
|
44 |
+
|
45 |
+
/// Field delimiter
|
46 |
+
char delimiter = ',';
|
47 |
+
/// Whether quoting is used
|
48 |
+
bool quoting = true;
|
49 |
+
/// Quoting character (if `quoting` is true)
|
50 |
+
char quote_char = '"';
|
51 |
+
/// Whether a quote inside a value is double-quoted
|
52 |
+
bool double_quote = true;
|
53 |
+
/// Whether escaping is used
|
54 |
+
bool escaping = false;
|
55 |
+
/// Escaping character (if `escaping` is true)
|
56 |
+
char escape_char = kDefaultEscapeChar;
|
57 |
+
/// Whether values are allowed to contain CR (0x0d) and LF (0x0a) characters
|
58 |
+
bool newlines_in_values = false;
|
59 |
+
/// Whether empty lines are ignored. If false, an empty line represents
|
60 |
+
/// a single empty value (assuming a one-column CSV file).
|
61 |
+
bool ignore_empty_lines = true;
|
62 |
+
/// A handler function for rows which do not have the correct number of columns
|
63 |
+
InvalidRowHandler invalid_row_handler;
|
64 |
+
|
65 |
+
/// Create parsing options with default values
|
66 |
+
static ParseOptions Defaults();
|
67 |
+
|
68 |
+
/// \brief Test that all set options are valid
|
69 |
+
Status Validate() const;
|
70 |
+
};
|
71 |
+
|
72 |
+
struct ARROW_EXPORT ConvertOptions {
|
73 |
+
// Conversion options
|
74 |
+
|
75 |
+
/// Whether to check UTF8 validity of string columns
|
76 |
+
bool check_utf8 = true;
|
77 |
+
/// Optional per-column types (disabling type inference on those columns)
|
78 |
+
std::unordered_map<std::string, std::shared_ptr<DataType>> column_types;
|
79 |
+
/// Recognized spellings for null values
|
80 |
+
std::vector<std::string> null_values;
|
81 |
+
/// Recognized spellings for boolean true values
|
82 |
+
std::vector<std::string> true_values;
|
83 |
+
/// Recognized spellings for boolean false values
|
84 |
+
std::vector<std::string> false_values;
|
85 |
+
|
86 |
+
/// Whether string / binary columns can have null values.
|
87 |
+
///
|
88 |
+
/// If true, then strings in "null_values" are considered null for string columns.
|
89 |
+
/// If false, then all strings are valid string values.
|
90 |
+
bool strings_can_be_null = false;
|
91 |
+
|
92 |
+
/// Whether quoted values can be null.
|
93 |
+
///
|
94 |
+
/// If true, then strings in "null_values" are also considered null when they
|
95 |
+
/// appear quoted in the CSV file. Otherwise, quoted values are never considered null.
|
96 |
+
bool quoted_strings_can_be_null = true;
|
97 |
+
|
98 |
+
/// Whether to try to automatically dict-encode string / binary data.
|
99 |
+
/// If true, then when type inference detects a string or binary column,
|
100 |
+
/// it is dict-encoded up to `auto_dict_max_cardinality` distinct values
|
101 |
+
/// (per chunk), after which it switches to regular encoding.
|
102 |
+
///
|
103 |
+
/// This setting is ignored for non-inferred columns (those in `column_types`).
|
104 |
+
bool auto_dict_encode = false;
|
105 |
+
int32_t auto_dict_max_cardinality = 50;
|
106 |
+
|
107 |
+
/// Decimal point character for floating-point and decimal data
|
108 |
+
char decimal_point = '.';
|
109 |
+
|
110 |
+
// XXX Should we have a separate FilterOptions?
|
111 |
+
|
112 |
+
/// If non-empty, indicates the names of columns from the CSV file that should
|
113 |
+
/// be actually read and converted (in the vector's order).
|
114 |
+
/// Columns not in this vector will be ignored.
|
115 |
+
std::vector<std::string> include_columns;
|
116 |
+
/// If false, columns in `include_columns` but not in the CSV file will error out.
|
117 |
+
/// If true, columns in `include_columns` but not in the CSV file will produce
|
118 |
+
/// a column of nulls (whose type is selected using `column_types`,
|
119 |
+
/// or null by default)
|
120 |
+
/// This option is ignored if `include_columns` is empty.
|
121 |
+
bool include_missing_columns = false;
|
122 |
+
|
123 |
+
/// User-defined timestamp parsers, using the virtual parser interface in
|
124 |
+
/// arrow/util/value_parsing.h. More than one parser can be specified, and
|
125 |
+
/// the CSV conversion logic will try parsing values starting from the
|
126 |
+
/// beginning of this vector. If no parsers are specified, we use the default
|
127 |
+
/// built-in ISO-8601 parser.
|
128 |
+
std::vector<std::shared_ptr<TimestampParser>> timestamp_parsers;
|
129 |
+
|
130 |
+
/// Create conversion options with default values, including conventional
|
131 |
+
/// values for `null_values`, `true_values` and `false_values`
|
132 |
+
static ConvertOptions Defaults();
|
133 |
+
|
134 |
+
/// \brief Test that all set options are valid
|
135 |
+
Status Validate() const;
|
136 |
+
};
|
137 |
+
|
138 |
+
struct ARROW_EXPORT ReadOptions {
|
139 |
+
// Reader options
|
140 |
+
|
141 |
+
/// Whether to use the global CPU thread pool
|
142 |
+
bool use_threads = true;
|
143 |
+
|
144 |
+
/// \brief Block size we request from the IO layer.
|
145 |
+
///
|
146 |
+
/// This will determine multi-threading granularity as well as
|
147 |
+
/// the size of individual record batches.
|
148 |
+
/// Minimum valid value for block size is 1
|
149 |
+
int32_t block_size = 1 << 20; // 1 MB
|
150 |
+
|
151 |
+
/// Number of header rows to skip (not including the row of column names, if any)
|
152 |
+
int32_t skip_rows = 0;
|
153 |
+
|
154 |
+
/// Number of rows to skip after the column names are read, if any
|
155 |
+
int32_t skip_rows_after_names = 0;
|
156 |
+
|
157 |
+
/// Column names for the target table.
|
158 |
+
/// If empty, fall back on autogenerate_column_names.
|
159 |
+
std::vector<std::string> column_names;
|
160 |
+
|
161 |
+
/// Whether to autogenerate column names if `column_names` is empty.
|
162 |
+
/// If true, column names will be of the form "f0", "f1"...
|
163 |
+
/// If false, column names will be read from the first CSV row after `skip_rows`.
|
164 |
+
bool autogenerate_column_names = false;
|
165 |
+
|
166 |
+
/// Create read options with default values
|
167 |
+
static ReadOptions Defaults();
|
168 |
+
|
169 |
+
/// \brief Test that all set options are valid
|
170 |
+
Status Validate() const;
|
171 |
+
};
|
172 |
+
|
173 |
+
/// \brief Quoting style for CSV writing
|
174 |
+
enum class ARROW_EXPORT QuotingStyle {
|
175 |
+
/// Only enclose values in quotes which need them, because their CSV rendering can
|
176 |
+
/// contain quotes itself (e.g. strings or binary values)
|
177 |
+
Needed,
|
178 |
+
/// Enclose all valid values in quotes. Nulls are not quoted. May cause readers to
|
179 |
+
/// interpret all values as strings if schema is inferred.
|
180 |
+
AllValid,
|
181 |
+
/// Do not enclose any values in quotes. Prevents values from containing quotes ("),
|
182 |
+
/// cell delimiters (,) or line endings (\\r, \\n), (following RFC4180). If values
|
183 |
+
/// contain these characters, an error is caused when attempting to write.
|
184 |
+
None
|
185 |
+
};
|
186 |
+
|
187 |
+
struct ARROW_EXPORT WriteOptions {
|
188 |
+
/// Whether to write an initial header line with column names
|
189 |
+
bool include_header = true;
|
190 |
+
|
191 |
+
/// \brief Maximum number of rows processed at a time
|
192 |
+
///
|
193 |
+
/// The CSV writer converts and writes data in batches of N rows.
|
194 |
+
/// This number can impact performance.
|
195 |
+
int32_t batch_size = 1024;
|
196 |
+
|
197 |
+
/// Field delimiter
|
198 |
+
char delimiter = ',';
|
199 |
+
|
200 |
+
/// \brief The string to write for null values. Quotes are not allowed in this string.
|
201 |
+
std::string null_string;
|
202 |
+
|
203 |
+
/// \brief IO context for writing.
|
204 |
+
io::IOContext io_context;
|
205 |
+
|
206 |
+
/// \brief The end of line character to use for ending rows
|
207 |
+
std::string eol = "\n";
|
208 |
+
|
209 |
+
/// \brief Quoting style
|
210 |
+
QuotingStyle quoting_style = QuotingStyle::Needed;
|
211 |
+
|
212 |
+
/// Create write options with default values
|
213 |
+
static WriteOptions Defaults();
|
214 |
+
|
215 |
+
/// \brief Test that all set options are valid
|
216 |
+
Status Validate() const;
|
217 |
+
};
|
218 |
+
|
219 |
+
} // namespace csv
|
220 |
+
} // namespace arrow
|
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/csv/parser.h
ADDED
@@ -0,0 +1,228 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include <algorithm>
|
21 |
+
#include <cstddef>
|
22 |
+
#include <cstdint>
|
23 |
+
#include <memory>
|
24 |
+
#include <string_view>
|
25 |
+
#include <vector>
|
26 |
+
|
27 |
+
#include "arrow/buffer.h"
|
28 |
+
#include "arrow/csv/options.h"
|
29 |
+
#include "arrow/csv/type_fwd.h"
|
30 |
+
#include "arrow/status.h"
|
31 |
+
#include "arrow/util/macros.h"
|
32 |
+
#include "arrow/util/visibility.h"
|
33 |
+
|
34 |
+
namespace arrow {
|
35 |
+
|
36 |
+
class MemoryPool;
|
37 |
+
|
38 |
+
namespace csv {
|
39 |
+
|
40 |
+
/// Skip at most num_rows from the given input. The input pointer is updated
|
41 |
+
/// and the number of actually skipped rows is returns (may be less than
|
42 |
+
/// requested if the input is too short).
|
43 |
+
ARROW_EXPORT
|
44 |
+
int32_t SkipRows(const uint8_t* data, uint32_t size, int32_t num_rows,
|
45 |
+
const uint8_t** out_data);
|
46 |
+
|
47 |
+
class BlockParserImpl;
|
48 |
+
|
49 |
+
namespace detail {
|
50 |
+
|
51 |
+
struct ParsedValueDesc {
|
52 |
+
uint32_t offset : 31;
|
53 |
+
bool quoted : 1;
|
54 |
+
};
|
55 |
+
|
56 |
+
class ARROW_EXPORT DataBatch {
|
57 |
+
public:
|
58 |
+
explicit DataBatch(int32_t num_cols) : num_cols_(num_cols) {}
|
59 |
+
|
60 |
+
/// \brief Return the number of parsed rows (not skipped)
|
61 |
+
int32_t num_rows() const { return num_rows_; }
|
62 |
+
/// \brief Return the number of parsed columns
|
63 |
+
int32_t num_cols() const { return num_cols_; }
|
64 |
+
/// \brief Return the total size in bytes of parsed data
|
65 |
+
uint32_t num_bytes() const { return parsed_size_; }
|
66 |
+
/// \brief Return the number of skipped rows
|
67 |
+
int32_t num_skipped_rows() const { return static_cast<int32_t>(skipped_rows_.size()); }
|
68 |
+
|
69 |
+
template <typename Visitor>
|
70 |
+
Status VisitColumn(int32_t col_index, int64_t first_row, Visitor&& visit) const {
|
71 |
+
using detail::ParsedValueDesc;
|
72 |
+
|
73 |
+
int32_t batch_row = 0;
|
74 |
+
for (size_t buf_index = 0; buf_index < values_buffers_.size(); ++buf_index) {
|
75 |
+
const auto& values_buffer = values_buffers_[buf_index];
|
76 |
+
const auto values = reinterpret_cast<const ParsedValueDesc*>(values_buffer->data());
|
77 |
+
const auto max_pos =
|
78 |
+
static_cast<int32_t>(values_buffer->size() / sizeof(ParsedValueDesc)) - 1;
|
79 |
+
for (int32_t pos = col_index; pos < max_pos; pos += num_cols_, ++batch_row) {
|
80 |
+
auto start = values[pos].offset;
|
81 |
+
auto stop = values[pos + 1].offset;
|
82 |
+
auto quoted = values[pos + 1].quoted;
|
83 |
+
Status status = visit(parsed_ + start, stop - start, quoted);
|
84 |
+
if (ARROW_PREDICT_FALSE(!status.ok())) {
|
85 |
+
return DecorateWithRowNumber(std::move(status), first_row, batch_row);
|
86 |
+
}
|
87 |
+
}
|
88 |
+
}
|
89 |
+
return Status::OK();
|
90 |
+
}
|
91 |
+
|
92 |
+
template <typename Visitor>
|
93 |
+
Status VisitLastRow(Visitor&& visit) const {
|
94 |
+
using detail::ParsedValueDesc;
|
95 |
+
|
96 |
+
const auto& values_buffer = values_buffers_.back();
|
97 |
+
const auto values = reinterpret_cast<const ParsedValueDesc*>(values_buffer->data());
|
98 |
+
const auto start_pos =
|
99 |
+
static_cast<int32_t>(values_buffer->size() / sizeof(ParsedValueDesc)) -
|
100 |
+
num_cols_ - 1;
|
101 |
+
for (int32_t col_index = 0; col_index < num_cols_; ++col_index) {
|
102 |
+
auto start = values[start_pos + col_index].offset;
|
103 |
+
auto stop = values[start_pos + col_index + 1].offset;
|
104 |
+
auto quoted = values[start_pos + col_index + 1].quoted;
|
105 |
+
ARROW_RETURN_NOT_OK(visit(parsed_ + start, stop - start, quoted));
|
106 |
+
}
|
107 |
+
return Status::OK();
|
108 |
+
}
|
109 |
+
|
110 |
+
protected:
|
111 |
+
Status DecorateWithRowNumber(Status&& status, int64_t first_row,
|
112 |
+
int32_t batch_row) const {
|
113 |
+
if (first_row >= 0) {
|
114 |
+
// `skipped_rows_` is in ascending order by construction, so use bisection
|
115 |
+
// to find out how many rows were skipped before `batch_row`.
|
116 |
+
const auto skips_before =
|
117 |
+
std::upper_bound(skipped_rows_.begin(), skipped_rows_.end(), batch_row) -
|
118 |
+
skipped_rows_.begin();
|
119 |
+
status = status.WithMessage("Row #", batch_row + skips_before + first_row, ": ",
|
120 |
+
status.message());
|
121 |
+
}
|
122 |
+
// Use return_if so that when extra context is enabled it will be added
|
123 |
+
ARROW_RETURN_IF_(true, std::move(status), ARROW_STRINGIFY(status));
|
124 |
+
return std::move(status);
|
125 |
+
}
|
126 |
+
|
127 |
+
// The number of rows in this batch (not including any skipped ones)
|
128 |
+
int32_t num_rows_ = 0;
|
129 |
+
// The number of columns
|
130 |
+
int32_t num_cols_ = 0;
|
131 |
+
|
132 |
+
// XXX should we ensure the parsed buffer is padded with 8 or 16 excess zero bytes?
|
133 |
+
// It may help with null parsing...
|
134 |
+
std::vector<std::shared_ptr<Buffer>> values_buffers_;
|
135 |
+
std::shared_ptr<Buffer> parsed_buffer_;
|
136 |
+
const uint8_t* parsed_ = NULLPTR;
|
137 |
+
int32_t parsed_size_ = 0;
|
138 |
+
|
139 |
+
// Record the current num_rows_ each time a row is skipped
|
140 |
+
std::vector<int32_t> skipped_rows_;
|
141 |
+
|
142 |
+
friend class ::arrow::csv::BlockParserImpl;
|
143 |
+
};
|
144 |
+
|
145 |
+
} // namespace detail
|
146 |
+
|
147 |
+
constexpr int32_t kMaxParserNumRows = 100000;
|
148 |
+
|
149 |
+
/// \class BlockParser
|
150 |
+
/// \brief A reusable block-based parser for CSV data
|
151 |
+
///
|
152 |
+
/// The parser takes a block of CSV data and delimits rows and fields,
|
153 |
+
/// unquoting and unescaping them on the fly. Parsed data is own by the
|
154 |
+
/// parser, so the original buffer can be discarded after Parse() returns.
|
155 |
+
///
|
156 |
+
/// If the block is truncated (i.e. not all data can be parsed), it is up
|
157 |
+
/// to the caller to arrange the next block to start with the trailing data.
|
158 |
+
/// Also, if the previous block ends with CR (0x0d) and a new block starts
|
159 |
+
/// with LF (0x0a), the parser will consider the leading newline as an empty
|
160 |
+
/// line; the caller should therefore strip it.
|
161 |
+
class ARROW_EXPORT BlockParser {
|
162 |
+
public:
|
163 |
+
explicit BlockParser(ParseOptions options, int32_t num_cols = -1,
|
164 |
+
int64_t first_row = -1, int32_t max_num_rows = kMaxParserNumRows);
|
165 |
+
explicit BlockParser(MemoryPool* pool, ParseOptions options, int32_t num_cols = -1,
|
166 |
+
int64_t first_row = -1, int32_t max_num_rows = kMaxParserNumRows);
|
167 |
+
~BlockParser();
|
168 |
+
|
169 |
+
/// \brief Parse a block of data
|
170 |
+
///
|
171 |
+
/// Parse a block of CSV data, ingesting up to max_num_rows rows.
|
172 |
+
/// The number of bytes actually parsed is returned in out_size.
|
173 |
+
Status Parse(std::string_view data, uint32_t* out_size);
|
174 |
+
|
175 |
+
/// \brief Parse sequential blocks of data
|
176 |
+
///
|
177 |
+
/// Only the last block is allowed to be truncated.
|
178 |
+
Status Parse(const std::vector<std::string_view>& data, uint32_t* out_size);
|
179 |
+
|
180 |
+
/// \brief Parse the final block of data
|
181 |
+
///
|
182 |
+
/// Like Parse(), but called with the final block in a file.
|
183 |
+
/// The last row may lack a trailing line separator.
|
184 |
+
Status ParseFinal(std::string_view data, uint32_t* out_size);
|
185 |
+
|
186 |
+
/// \brief Parse the final sequential blocks of data
|
187 |
+
///
|
188 |
+
/// Only the last block is allowed to be truncated.
|
189 |
+
Status ParseFinal(const std::vector<std::string_view>& data, uint32_t* out_size);
|
190 |
+
|
191 |
+
/// \brief Return the number of parsed rows
|
192 |
+
int32_t num_rows() const { return parsed_batch().num_rows(); }
|
193 |
+
/// \brief Return the number of parsed columns
|
194 |
+
int32_t num_cols() const { return parsed_batch().num_cols(); }
|
195 |
+
/// \brief Return the total size in bytes of parsed data
|
196 |
+
uint32_t num_bytes() const { return parsed_batch().num_bytes(); }
|
197 |
+
|
198 |
+
/// \brief Return the total number of rows including rows which were skipped
|
199 |
+
int32_t total_num_rows() const {
|
200 |
+
return parsed_batch().num_rows() + parsed_batch().num_skipped_rows();
|
201 |
+
}
|
202 |
+
|
203 |
+
/// \brief Return the row number of the first row in the block or -1 if unsupported
|
204 |
+
int64_t first_row_num() const;
|
205 |
+
|
206 |
+
/// \brief Visit parsed values in a column
|
207 |
+
///
|
208 |
+
/// The signature of the visitor is
|
209 |
+
/// Status(const uint8_t* data, uint32_t size, bool quoted)
|
210 |
+
template <typename Visitor>
|
211 |
+
Status VisitColumn(int32_t col_index, Visitor&& visit) const {
|
212 |
+
return parsed_batch().VisitColumn(col_index, first_row_num(),
|
213 |
+
std::forward<Visitor>(visit));
|
214 |
+
}
|
215 |
+
|
216 |
+
template <typename Visitor>
|
217 |
+
Status VisitLastRow(Visitor&& visit) const {
|
218 |
+
return parsed_batch().VisitLastRow(std::forward<Visitor>(visit));
|
219 |
+
}
|
220 |
+
|
221 |
+
protected:
|
222 |
+
std::unique_ptr<BlockParserImpl> impl_;
|
223 |
+
|
224 |
+
const detail::DataBatch& parsed_batch() const;
|
225 |
+
};
|
226 |
+
|
227 |
+
} // namespace csv
|
228 |
+
} // namespace arrow
|
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/csv/reader.h
ADDED
@@ -0,0 +1,112 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include <memory>
|
21 |
+
|
22 |
+
#include "arrow/csv/options.h" // IWYU pragma: keep
|
23 |
+
#include "arrow/io/interfaces.h"
|
24 |
+
#include "arrow/record_batch.h"
|
25 |
+
#include "arrow/result.h"
|
26 |
+
#include "arrow/type.h"
|
27 |
+
#include "arrow/type_fwd.h"
|
28 |
+
#include "arrow/util/future.h"
|
29 |
+
#include "arrow/util/thread_pool.h"
|
30 |
+
#include "arrow/util/visibility.h"
|
31 |
+
|
32 |
+
namespace arrow {
|
33 |
+
namespace io {
|
34 |
+
class InputStream;
|
35 |
+
} // namespace io
|
36 |
+
|
37 |
+
namespace csv {
|
38 |
+
|
39 |
+
/// A class that reads an entire CSV file into a Arrow Table
|
40 |
+
class ARROW_EXPORT TableReader {
|
41 |
+
public:
|
42 |
+
virtual ~TableReader() = default;
|
43 |
+
|
44 |
+
/// Read the entire CSV file and convert it to a Arrow Table
|
45 |
+
virtual Result<std::shared_ptr<Table>> Read() = 0;
|
46 |
+
/// Read the entire CSV file and convert it to a Arrow Table
|
47 |
+
virtual Future<std::shared_ptr<Table>> ReadAsync() = 0;
|
48 |
+
|
49 |
+
/// Create a TableReader instance
|
50 |
+
static Result<std::shared_ptr<TableReader>> Make(io::IOContext io_context,
|
51 |
+
std::shared_ptr<io::InputStream> input,
|
52 |
+
const ReadOptions&,
|
53 |
+
const ParseOptions&,
|
54 |
+
const ConvertOptions&);
|
55 |
+
};
|
56 |
+
|
57 |
+
/// \brief A class that reads a CSV file incrementally
|
58 |
+
///
|
59 |
+
/// Caveats:
|
60 |
+
/// - For now, this is always single-threaded (regardless of `ReadOptions::use_threads`.
|
61 |
+
/// - Type inference is done on the first block and types are frozen afterwards;
|
62 |
+
/// to make sure the right data types are inferred, either set
|
63 |
+
/// `ReadOptions::block_size` to a large enough value, or use
|
64 |
+
/// `ConvertOptions::column_types` to set the desired data types explicitly.
|
65 |
+
class ARROW_EXPORT StreamingReader : public RecordBatchReader {
|
66 |
+
public:
|
67 |
+
virtual ~StreamingReader() = default;
|
68 |
+
|
69 |
+
virtual Future<std::shared_ptr<RecordBatch>> ReadNextAsync() = 0;
|
70 |
+
|
71 |
+
/// \brief Return the number of bytes which have been read and processed
|
72 |
+
///
|
73 |
+
/// The returned number includes CSV bytes which the StreamingReader has
|
74 |
+
/// finished processing, but not bytes for which some processing (e.g.
|
75 |
+
/// CSV parsing or conversion to Arrow layout) is still ongoing.
|
76 |
+
///
|
77 |
+
/// Furthermore, the following rules apply:
|
78 |
+
/// - bytes skipped by `ReadOptions.skip_rows` are counted as being read before
|
79 |
+
/// any records are returned.
|
80 |
+
/// - bytes read while parsing the header are counted as being read before any
|
81 |
+
/// records are returned.
|
82 |
+
/// - bytes skipped by `ReadOptions.skip_rows_after_names` are counted after the
|
83 |
+
/// first batch is returned.
|
84 |
+
virtual int64_t bytes_read() const = 0;
|
85 |
+
|
86 |
+
/// Create a StreamingReader instance
|
87 |
+
///
|
88 |
+
/// This involves some I/O as the first batch must be loaded during the creation process
|
89 |
+
/// so it is returned as a future
|
90 |
+
///
|
91 |
+
/// Currently, the StreamingReader is not async-reentrant and does not do any fan-out
|
92 |
+
/// parsing (see ARROW-11889)
|
93 |
+
static Future<std::shared_ptr<StreamingReader>> MakeAsync(
|
94 |
+
io::IOContext io_context, std::shared_ptr<io::InputStream> input,
|
95 |
+
arrow::internal::Executor* cpu_executor, const ReadOptions&, const ParseOptions&,
|
96 |
+
const ConvertOptions&);
|
97 |
+
|
98 |
+
static Result<std::shared_ptr<StreamingReader>> Make(
|
99 |
+
io::IOContext io_context, std::shared_ptr<io::InputStream> input,
|
100 |
+
const ReadOptions&, const ParseOptions&, const ConvertOptions&);
|
101 |
+
};
|
102 |
+
|
103 |
+
/// \brief Count the logical rows of data in a CSV file (i.e. the
|
104 |
+
/// number of rows you would get if you read the file into a table).
|
105 |
+
ARROW_EXPORT
|
106 |
+
Future<int64_t> CountRowsAsync(io::IOContext io_context,
|
107 |
+
std::shared_ptr<io::InputStream> input,
|
108 |
+
arrow::internal::Executor* cpu_executor,
|
109 |
+
const ReadOptions&, const ParseOptions&);
|
110 |
+
|
111 |
+
} // namespace csv
|
112 |
+
} // namespace arrow
|
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/csv/test_common.h
ADDED
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include <functional>
|
21 |
+
#include <memory>
|
22 |
+
#include <string>
|
23 |
+
#include <vector>
|
24 |
+
|
25 |
+
#include "arrow/csv/parser.h"
|
26 |
+
#include "arrow/testing/visibility.h"
|
27 |
+
|
28 |
+
namespace arrow {
|
29 |
+
namespace csv {
|
30 |
+
|
31 |
+
ARROW_TESTING_EXPORT
|
32 |
+
std::string MakeCSVData(std::vector<std::string> lines);
|
33 |
+
|
34 |
+
// Make a BlockParser from a vector of lines representing a CSV file
|
35 |
+
ARROW_TESTING_EXPORT
|
36 |
+
void MakeCSVParser(std::vector<std::string> lines, ParseOptions options, int32_t num_cols,
|
37 |
+
MemoryPool* pool, std::shared_ptr<BlockParser>* out);
|
38 |
+
|
39 |
+
ARROW_TESTING_EXPORT
|
40 |
+
void MakeCSVParser(std::vector<std::string> lines, ParseOptions options,
|
41 |
+
std::shared_ptr<BlockParser>* out);
|
42 |
+
|
43 |
+
ARROW_TESTING_EXPORT
|
44 |
+
void MakeCSVParser(std::vector<std::string> lines, std::shared_ptr<BlockParser>* out);
|
45 |
+
|
46 |
+
// Make a BlockParser from a vector of strings representing a single CSV column
|
47 |
+
ARROW_TESTING_EXPORT
|
48 |
+
void MakeColumnParser(std::vector<std::string> items, std::shared_ptr<BlockParser>* out);
|
49 |
+
|
50 |
+
ARROW_TESTING_EXPORT
|
51 |
+
Result<std::shared_ptr<Buffer>> MakeSampleCsvBuffer(
|
52 |
+
size_t num_rows, std::function<bool(size_t row_num)> is_valid = {});
|
53 |
+
|
54 |
+
} // namespace csv
|
55 |
+
} // namespace arrow
|
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/csv/type_fwd.h
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
namespace arrow {
|
19 |
+
namespace csv {
|
20 |
+
|
21 |
+
class TableReader;
|
22 |
+
struct ConvertOptions;
|
23 |
+
struct ReadOptions;
|
24 |
+
struct ParseOptions;
|
25 |
+
struct WriteOptions;
|
26 |
+
|
27 |
+
} // namespace csv
|
28 |
+
} // namespace arrow
|
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/csv/writer.h
ADDED
@@ -0,0 +1,89 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include <memory>
|
21 |
+
|
22 |
+
#include "arrow/csv/options.h"
|
23 |
+
#include "arrow/io/interfaces.h"
|
24 |
+
#include "arrow/ipc/type_fwd.h"
|
25 |
+
#include "arrow/record_batch.h"
|
26 |
+
#include "arrow/table.h"
|
27 |
+
|
28 |
+
namespace arrow {
|
29 |
+
namespace csv {
|
30 |
+
|
31 |
+
// Functionality for converting Arrow data to Comma separated value text.
|
32 |
+
// This library supports all primitive types that can be cast to a StringArrays.
|
33 |
+
// It applies to following formatting rules:
|
34 |
+
// - For non-binary types no quotes surround values. Nulls are represented as the empty
|
35 |
+
// string.
|
36 |
+
// - For binary types all non-null data is quoted (and quotes within data are escaped
|
37 |
+
// with an additional quote).
|
38 |
+
// Null values are empty and unquoted.
|
39 |
+
|
40 |
+
/// \defgroup csv-write-functions High-level functions for writing CSV files
|
41 |
+
/// @{
|
42 |
+
|
43 |
+
/// \brief Convert table to CSV and write the result to output.
|
44 |
+
/// Experimental
|
45 |
+
ARROW_EXPORT Status WriteCSV(const Table& table, const WriteOptions& options,
|
46 |
+
arrow::io::OutputStream* output);
|
47 |
+
/// \brief Convert batch to CSV and write the result to output.
|
48 |
+
/// Experimental
|
49 |
+
ARROW_EXPORT Status WriteCSV(const RecordBatch& batch, const WriteOptions& options,
|
50 |
+
arrow::io::OutputStream* output);
|
51 |
+
/// \brief Convert batches read through a RecordBatchReader
|
52 |
+
/// to CSV and write the results to output.
|
53 |
+
/// Experimental
|
54 |
+
ARROW_EXPORT Status WriteCSV(const std::shared_ptr<RecordBatchReader>& reader,
|
55 |
+
const WriteOptions& options,
|
56 |
+
arrow::io::OutputStream* output);
|
57 |
+
|
58 |
+
/// @}
|
59 |
+
|
60 |
+
/// \defgroup csv-writer-factories Functions for creating an incremental CSV writer
|
61 |
+
/// @{
|
62 |
+
|
63 |
+
/// \brief Create a new CSV writer. User is responsible for closing the
|
64 |
+
/// actual OutputStream.
|
65 |
+
///
|
66 |
+
/// \param[in] sink output stream to write to
|
67 |
+
/// \param[in] schema the schema of the record batches to be written
|
68 |
+
/// \param[in] options options for serialization
|
69 |
+
/// \return Result<std::shared_ptr<RecordBatchWriter>>
|
70 |
+
ARROW_EXPORT
|
71 |
+
Result<std::shared_ptr<ipc::RecordBatchWriter>> MakeCSVWriter(
|
72 |
+
std::shared_ptr<io::OutputStream> sink, const std::shared_ptr<Schema>& schema,
|
73 |
+
const WriteOptions& options = WriteOptions::Defaults());
|
74 |
+
|
75 |
+
/// \brief Create a new CSV writer.
|
76 |
+
///
|
77 |
+
/// \param[in] sink output stream to write to (does not take ownership)
|
78 |
+
/// \param[in] schema the schema of the record batches to be written
|
79 |
+
/// \param[in] options options for serialization
|
80 |
+
/// \return Result<std::shared_ptr<RecordBatchWriter>>
|
81 |
+
ARROW_EXPORT
|
82 |
+
Result<std::shared_ptr<ipc::RecordBatchWriter>> MakeCSVWriter(
|
83 |
+
io::OutputStream* sink, const std::shared_ptr<Schema>& schema,
|
84 |
+
const WriteOptions& options = WriteOptions::Defaults());
|
85 |
+
|
86 |
+
/// @}
|
87 |
+
|
88 |
+
} // namespace csv
|
89 |
+
} // namespace arrow
|
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/datum.h
ADDED
@@ -0,0 +1,311 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include <cstdint>
|
21 |
+
#include <memory>
|
22 |
+
#include <string>
|
23 |
+
#include <type_traits>
|
24 |
+
#include <utility>
|
25 |
+
#include <variant>
|
26 |
+
#include <vector>
|
27 |
+
|
28 |
+
#include "arrow/array/data.h"
|
29 |
+
#include "arrow/scalar.h"
|
30 |
+
#include "arrow/type.h"
|
31 |
+
#include "arrow/type_traits.h"
|
32 |
+
#include "arrow/util/checked_cast.h"
|
33 |
+
#include "arrow/util/macros.h"
|
34 |
+
#include "arrow/util/visibility.h"
|
35 |
+
|
36 |
+
namespace arrow {
|
37 |
+
|
38 |
+
class Array;
|
39 |
+
class ChunkedArray;
|
40 |
+
class RecordBatch;
|
41 |
+
class Table;
|
42 |
+
|
43 |
+
/// \class Datum
|
44 |
+
/// \brief Variant type for various Arrow C++ data structures
|
45 |
+
struct ARROW_EXPORT Datum {
|
46 |
+
/// \brief The kind of datum stored
|
47 |
+
enum Kind { NONE, SCALAR, ARRAY, CHUNKED_ARRAY, RECORD_BATCH, TABLE };
|
48 |
+
|
49 |
+
/// \brief A placeholder type to represent empty datum
|
50 |
+
struct Empty {};
|
51 |
+
|
52 |
+
/// \brief Datums variants may have a length. This special value indicate that the
|
53 |
+
/// current variant does not have a length.
|
54 |
+
static constexpr int64_t kUnknownLength = -1;
|
55 |
+
|
56 |
+
/// \brief Storage of the actual datum.
|
57 |
+
///
|
58 |
+
/// Note: For arrays, ArrayData is stored instead of Array for easier processing
|
59 |
+
std::variant<Empty, std::shared_ptr<Scalar>, std::shared_ptr<ArrayData>,
|
60 |
+
std::shared_ptr<ChunkedArray>, std::shared_ptr<RecordBatch>,
|
61 |
+
std::shared_ptr<Table>>
|
62 |
+
value;
|
63 |
+
|
64 |
+
/// \brief Empty datum, to be populated elsewhere
|
65 |
+
Datum() = default;
|
66 |
+
|
67 |
+
Datum(const Datum& other) = default;
|
68 |
+
Datum& operator=(const Datum& other) = default;
|
69 |
+
Datum(Datum&& other) = default;
|
70 |
+
Datum& operator=(Datum&& other) = default;
|
71 |
+
|
72 |
+
/// \brief Construct from a Scalar
|
73 |
+
Datum(std::shared_ptr<Scalar> value) // NOLINT implicit conversion
|
74 |
+
: value(std::move(value)) {}
|
75 |
+
|
76 |
+
/// \brief Construct from an ArrayData
|
77 |
+
Datum(std::shared_ptr<ArrayData> value) // NOLINT implicit conversion
|
78 |
+
: value(std::move(value)) {}
|
79 |
+
|
80 |
+
/// \brief Construct from an ArrayData
|
81 |
+
Datum(ArrayData arg) // NOLINT implicit conversion
|
82 |
+
: value(std::make_shared<ArrayData>(std::move(arg))) {}
|
83 |
+
|
84 |
+
/// \brief Construct from an Array
|
85 |
+
Datum(const Array& value); // NOLINT implicit conversion
|
86 |
+
|
87 |
+
/// \brief Construct from an Array
|
88 |
+
Datum(const std::shared_ptr<Array>& value); // NOLINT implicit conversion
|
89 |
+
|
90 |
+
/// \brief Construct from a ChunkedArray
|
91 |
+
Datum(std::shared_ptr<ChunkedArray> value); // NOLINT implicit conversion
|
92 |
+
|
93 |
+
/// \brief Construct from a RecordBatch
|
94 |
+
Datum(std::shared_ptr<RecordBatch> value); // NOLINT implicit conversion
|
95 |
+
|
96 |
+
/// \brief Construct from a Table
|
97 |
+
Datum(std::shared_ptr<Table> value); // NOLINT implicit conversion
|
98 |
+
|
99 |
+
/// \brief Construct from a ChunkedArray.
|
100 |
+
///
|
101 |
+
/// This can be expensive, prefer the shared_ptr<ChunkedArray> constructor
|
102 |
+
explicit Datum(const ChunkedArray& value);
|
103 |
+
|
104 |
+
/// \brief Construct from a RecordBatch.
|
105 |
+
///
|
106 |
+
/// This can be expensive, prefer the shared_ptr<RecordBatch> constructor
|
107 |
+
explicit Datum(const RecordBatch& value);
|
108 |
+
|
109 |
+
/// \brief Construct from a Table.
|
110 |
+
///
|
111 |
+
/// This can be expensive, prefer the shared_ptr<Table> constructor
|
112 |
+
explicit Datum(const Table& value);
|
113 |
+
|
114 |
+
/// \brief Cast from concrete subtypes of Array or Scalar to Datum
|
115 |
+
template <typename T, bool IsArray = std::is_base_of_v<Array, T>,
|
116 |
+
bool IsScalar = std::is_base_of_v<Scalar, T>,
|
117 |
+
typename = enable_if_t<IsArray || IsScalar>>
|
118 |
+
Datum(std::shared_ptr<T> value) // NOLINT implicit conversion
|
119 |
+
: Datum(std::shared_ptr<typename std::conditional<IsArray, Array, Scalar>::type>(
|
120 |
+
std::move(value))) {}
|
121 |
+
|
122 |
+
/// \brief Cast from concrete subtypes of Array or Scalar to Datum
|
123 |
+
template <typename T, typename TV = typename std::remove_reference_t<T>,
|
124 |
+
bool IsArray = std::is_base_of_v<Array, T>,
|
125 |
+
bool IsScalar = std::is_base_of_v<Scalar, T>,
|
126 |
+
typename = enable_if_t<IsArray || IsScalar>>
|
127 |
+
Datum(T&& value) // NOLINT implicit conversion
|
128 |
+
: Datum(std::make_shared<TV>(std::forward<T>(value))) {}
|
129 |
+
|
130 |
+
/// \brief Copy from concrete subtypes of Scalar.
|
131 |
+
///
|
132 |
+
/// The concrete scalar type must be copyable (not all of them are).
|
133 |
+
template <typename T, typename = enable_if_t<std::is_base_of_v<Scalar, T>>>
|
134 |
+
Datum(const T& value) // NOLINT implicit conversion
|
135 |
+
: Datum(std::make_shared<T>(value)) {}
|
136 |
+
|
137 |
+
// Convenience constructors
|
138 |
+
/// \brief Convenience constructor storing a bool scalar.
|
139 |
+
explicit Datum(bool value);
|
140 |
+
/// \brief Convenience constructor storing an int8 scalar.
|
141 |
+
explicit Datum(int8_t value);
|
142 |
+
/// \brief Convenience constructor storing a uint8 scalar.
|
143 |
+
explicit Datum(uint8_t value);
|
144 |
+
/// \brief Convenience constructor storing an int16 scalar.
|
145 |
+
explicit Datum(int16_t value);
|
146 |
+
/// \brief Convenience constructor storing a uint16 scalar.
|
147 |
+
explicit Datum(uint16_t value);
|
148 |
+
/// \brief Convenience constructor storing an int32 scalar.
|
149 |
+
explicit Datum(int32_t value);
|
150 |
+
/// \brief Convenience constructor storing a uint32 scalar.
|
151 |
+
explicit Datum(uint32_t value);
|
152 |
+
/// \brief Convenience constructor storing an int64 scalar.
|
153 |
+
explicit Datum(int64_t value);
|
154 |
+
/// \brief Convenience constructor storing a uint64 scalar.
|
155 |
+
explicit Datum(uint64_t value);
|
156 |
+
/// \brief Convenience constructor storing a float scalar.
|
157 |
+
explicit Datum(float value);
|
158 |
+
/// \brief Convenience constructor storing a double scalar.
|
159 |
+
explicit Datum(double value);
|
160 |
+
/// \brief Convenience constructor storing a string scalar.
|
161 |
+
explicit Datum(std::string value);
|
162 |
+
/// \brief Convenience constructor storing a string scalar.
|
163 |
+
explicit Datum(const char* value);
|
164 |
+
|
165 |
+
/// \brief Convenience constructor for a DurationScalar from std::chrono::duration
|
166 |
+
template <template <typename, typename> class StdDuration, typename Rep,
|
167 |
+
typename Period,
|
168 |
+
typename = decltype(DurationScalar{StdDuration<Rep, Period>{}})>
|
169 |
+
explicit Datum(StdDuration<Rep, Period> d) : Datum{DurationScalar(d)} {}
|
170 |
+
|
171 |
+
/// \brief The kind of data stored in Datum
|
172 |
+
Datum::Kind kind() const {
|
173 |
+
switch (this->value.index()) {
|
174 |
+
case 0:
|
175 |
+
return Datum::NONE;
|
176 |
+
case 1:
|
177 |
+
return Datum::SCALAR;
|
178 |
+
case 2:
|
179 |
+
return Datum::ARRAY;
|
180 |
+
case 3:
|
181 |
+
return Datum::CHUNKED_ARRAY;
|
182 |
+
case 4:
|
183 |
+
return Datum::RECORD_BATCH;
|
184 |
+
case 5:
|
185 |
+
return Datum::TABLE;
|
186 |
+
default:
|
187 |
+
return Datum::NONE;
|
188 |
+
}
|
189 |
+
}
|
190 |
+
|
191 |
+
/// \brief Retrieve the stored array as ArrayData
|
192 |
+
///
|
193 |
+
/// Use make_array() if an Array is desired (which is more expensive).
|
194 |
+
/// \throws std::bad_variant_access if the datum is not an array
|
195 |
+
const std::shared_ptr<ArrayData>& array() const {
|
196 |
+
return std::get<std::shared_ptr<ArrayData>>(this->value);
|
197 |
+
}
|
198 |
+
|
199 |
+
/// \brief The sum of bytes in each buffer referenced by the datum
|
200 |
+
/// Note: Scalars report a size of 0
|
201 |
+
/// \see arrow::util::TotalBufferSize for caveats
|
202 |
+
int64_t TotalBufferSize() const;
|
203 |
+
|
204 |
+
/// \brief Get the stored ArrayData in mutable form
|
205 |
+
///
|
206 |
+
/// For internal use primarily. Keep in mind a shared_ptr<Datum> may have multiple
|
207 |
+
/// owners.
|
208 |
+
ArrayData* mutable_array() const { return this->array().get(); }
|
209 |
+
|
210 |
+
/// \brief Retrieve the stored array as Array
|
211 |
+
/// \throws std::bad_variant_access if the datum is not an array
|
212 |
+
std::shared_ptr<Array> make_array() const;
|
213 |
+
|
214 |
+
/// \brief Retrieve the chunked array stored
|
215 |
+
/// \throws std::bad_variant_access if the datum is not a chunked array
|
216 |
+
const std::shared_ptr<ChunkedArray>& chunked_array() const {
|
217 |
+
return std::get<std::shared_ptr<ChunkedArray>>(this->value);
|
218 |
+
}
|
219 |
+
|
220 |
+
/// \brief Retrieve the record batch stored
|
221 |
+
/// \throws std::bad_variant_access if the datum is not a record batch
|
222 |
+
const std::shared_ptr<RecordBatch>& record_batch() const {
|
223 |
+
return std::get<std::shared_ptr<RecordBatch>>(this->value);
|
224 |
+
}
|
225 |
+
|
226 |
+
/// \brief Retrieve the table stored
|
227 |
+
/// \throws std::bad_variant_access if the datum is not a table
|
228 |
+
const std::shared_ptr<Table>& table() const {
|
229 |
+
return std::get<std::shared_ptr<Table>>(this->value);
|
230 |
+
}
|
231 |
+
|
232 |
+
/// \brief Retrieve the scalar stored
|
233 |
+
/// \throws std::bad_variant_access if the datum is not a scalar
|
234 |
+
const std::shared_ptr<Scalar>& scalar() const {
|
235 |
+
return std::get<std::shared_ptr<Scalar>>(this->value);
|
236 |
+
}
|
237 |
+
|
238 |
+
/// \brief Retrieve the datum as its concrete array type
|
239 |
+
/// \throws std::bad_variant_access if the datum is not an array
|
240 |
+
/// \tparam ExactType the expected array type, may cause undefined behavior if it is not
|
241 |
+
/// the type of the stored array
|
242 |
+
template <typename ExactType>
|
243 |
+
std::shared_ptr<ExactType> array_as() const {
|
244 |
+
return internal::checked_pointer_cast<ExactType>(this->make_array());
|
245 |
+
}
|
246 |
+
|
247 |
+
/// \brief Retrieve the datum as its concrete scalar type
|
248 |
+
/// \throws std::bad_variant_access if the datum is not a scalar
|
249 |
+
/// \tparam ExactType the expected scalar type, may cause undefined behavior if it is
|
250 |
+
/// not the type of the stored scalar
|
251 |
+
template <typename ExactType>
|
252 |
+
const ExactType& scalar_as() const {
|
253 |
+
return internal::checked_cast<const ExactType&>(*this->scalar());
|
254 |
+
}
|
255 |
+
|
256 |
+
/// \brief True if Datum contains an array
|
257 |
+
bool is_array() const { return this->kind() == Datum::ARRAY; }
|
258 |
+
|
259 |
+
/// \brief True if Datum contains a chunked array
|
260 |
+
bool is_chunked_array() const { return this->kind() == Datum::CHUNKED_ARRAY; }
|
261 |
+
|
262 |
+
/// \brief True if Datum contains an array or a chunked array
|
263 |
+
bool is_arraylike() const {
|
264 |
+
return this->kind() == Datum::ARRAY || this->kind() == Datum::CHUNKED_ARRAY;
|
265 |
+
}
|
266 |
+
|
267 |
+
/// \brief True if Datum contains a scalar
|
268 |
+
bool is_scalar() const { return this->kind() == Datum::SCALAR; }
|
269 |
+
|
270 |
+
/// \brief True if Datum contains a scalar or array-like data
|
271 |
+
bool is_value() const { return this->is_arraylike() || this->is_scalar(); }
|
272 |
+
|
273 |
+
/// \brief Return the null count.
|
274 |
+
///
|
275 |
+
/// Only valid for scalar and array-like data.
|
276 |
+
int64_t null_count() const;
|
277 |
+
|
278 |
+
/// \brief The value type of the variant, if any
|
279 |
+
///
|
280 |
+
/// \return nullptr if no type
|
281 |
+
const std::shared_ptr<DataType>& type() const;
|
282 |
+
|
283 |
+
/// \brief The schema of the variant, if any
|
284 |
+
///
|
285 |
+
/// \return nullptr if no schema
|
286 |
+
const std::shared_ptr<Schema>& schema() const;
|
287 |
+
|
288 |
+
/// \brief The value length of the variant, if any
|
289 |
+
///
|
290 |
+
/// \return kUnknownLength if no type
|
291 |
+
int64_t length() const;
|
292 |
+
|
293 |
+
/// \brief The array chunks of the variant, if any
|
294 |
+
///
|
295 |
+
/// \return empty if not arraylike
|
296 |
+
ArrayVector chunks() const;
|
297 |
+
|
298 |
+
/// \brief True if the two data are equal
|
299 |
+
bool Equals(const Datum& other) const;
|
300 |
+
|
301 |
+
bool operator==(const Datum& other) const { return Equals(other); }
|
302 |
+
bool operator!=(const Datum& other) const { return !Equals(other); }
|
303 |
+
|
304 |
+
std::string ToString() const;
|
305 |
+
};
|
306 |
+
|
307 |
+
ARROW_EXPORT void PrintTo(const Datum&, std::ostream*);
|
308 |
+
|
309 |
+
ARROW_EXPORT std::string ToString(Datum::Kind kind);
|
310 |
+
|
311 |
+
} // namespace arrow
|
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/extension/fixed_shape_tensor.h
ADDED
@@ -0,0 +1,119 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#include "arrow/extension_type.h"
|
19 |
+
|
20 |
+
namespace arrow {
|
21 |
+
namespace extension {
|
22 |
+
|
23 |
+
class ARROW_EXPORT FixedShapeTensorArray : public ExtensionArray {
|
24 |
+
public:
|
25 |
+
using ExtensionArray::ExtensionArray;
|
26 |
+
|
27 |
+
/// \brief Create a FixedShapeTensorArray from a Tensor
|
28 |
+
///
|
29 |
+
/// This method will create a FixedShapeTensorArray from a Tensor, taking its first
|
30 |
+
/// dimension as the number of elements in the resulting array and the remaining
|
31 |
+
/// dimensions as the shape of the individual tensors. If Tensor provides strides,
|
32 |
+
/// they will be used to determine dimension permutation. Otherwise, row-major layout
|
33 |
+
/// (i.e. no permutation) will be assumed.
|
34 |
+
///
|
35 |
+
/// \param[in] tensor The Tensor to convert to a FixedShapeTensorArray
|
36 |
+
static Result<std::shared_ptr<FixedShapeTensorArray>> FromTensor(
|
37 |
+
const std::shared_ptr<Tensor>& tensor);
|
38 |
+
|
39 |
+
/// \brief Create a Tensor from FixedShapeTensorArray
|
40 |
+
///
|
41 |
+
/// This method will create a Tensor from a FixedShapeTensorArray, setting its first
|
42 |
+
/// dimension as length equal to the FixedShapeTensorArray's length and the remaining
|
43 |
+
/// dimensions as the FixedShapeTensorType's shape. Shape and dim_names will be
|
44 |
+
/// permuted according to permutation stored in the FixedShapeTensorType metadata.
|
45 |
+
const Result<std::shared_ptr<Tensor>> ToTensor() const;
|
46 |
+
};
|
47 |
+
|
48 |
+
/// \brief Concrete type class for constant-size Tensor data.
|
49 |
+
/// This is a canonical arrow extension type.
|
50 |
+
/// See: https://arrow.apache.org/docs/format/CanonicalExtensions.html
|
51 |
+
class ARROW_EXPORT FixedShapeTensorType : public ExtensionType {
|
52 |
+
public:
|
53 |
+
FixedShapeTensorType(const std::shared_ptr<DataType>& value_type, const int32_t& size,
|
54 |
+
const std::vector<int64_t>& shape,
|
55 |
+
const std::vector<int64_t>& permutation = {},
|
56 |
+
const std::vector<std::string>& dim_names = {})
|
57 |
+
: ExtensionType(fixed_size_list(value_type, size)),
|
58 |
+
value_type_(value_type),
|
59 |
+
shape_(shape),
|
60 |
+
permutation_(permutation),
|
61 |
+
dim_names_(dim_names) {}
|
62 |
+
|
63 |
+
std::string extension_name() const override { return "arrow.fixed_shape_tensor"; }
|
64 |
+
std::string ToString() const override;
|
65 |
+
|
66 |
+
/// Number of dimensions of tensor elements
|
67 |
+
size_t ndim() { return shape_.size(); }
|
68 |
+
|
69 |
+
/// Shape of tensor elements
|
70 |
+
const std::vector<int64_t> shape() const { return shape_; }
|
71 |
+
|
72 |
+
/// Value type of tensor elements
|
73 |
+
const std::shared_ptr<DataType> value_type() const { return value_type_; }
|
74 |
+
|
75 |
+
/// Strides of tensor elements. Strides state offset in bytes between adjacent
|
76 |
+
/// elements along each dimension. In case permutation is non-empty strides are
|
77 |
+
/// computed from permuted tensor element's shape.
|
78 |
+
const std::vector<int64_t>& strides();
|
79 |
+
|
80 |
+
/// Permutation mapping from logical to physical memory layout of tensor elements
|
81 |
+
const std::vector<int64_t>& permutation() const { return permutation_; }
|
82 |
+
|
83 |
+
/// Dimension names of tensor elements. Dimensions are ordered physically.
|
84 |
+
const std::vector<std::string>& dim_names() const { return dim_names_; }
|
85 |
+
|
86 |
+
bool ExtensionEquals(const ExtensionType& other) const override;
|
87 |
+
|
88 |
+
std::string Serialize() const override;
|
89 |
+
|
90 |
+
Result<std::shared_ptr<DataType>> Deserialize(
|
91 |
+
std::shared_ptr<DataType> storage_type,
|
92 |
+
const std::string& serialized_data) const override;
|
93 |
+
|
94 |
+
/// Create a FixedShapeTensorArray from ArrayData
|
95 |
+
std::shared_ptr<Array> MakeArray(std::shared_ptr<ArrayData> data) const override;
|
96 |
+
|
97 |
+
/// \brief Create a FixedShapeTensorType instance
|
98 |
+
static Result<std::shared_ptr<DataType>> Make(
|
99 |
+
const std::shared_ptr<DataType>& value_type, const std::vector<int64_t>& shape,
|
100 |
+
const std::vector<int64_t>& permutation = {},
|
101 |
+
const std::vector<std::string>& dim_names = {});
|
102 |
+
|
103 |
+
private:
|
104 |
+
std::shared_ptr<DataType> storage_type_;
|
105 |
+
std::shared_ptr<DataType> value_type_;
|
106 |
+
std::vector<int64_t> shape_;
|
107 |
+
std::vector<int64_t> strides_;
|
108 |
+
std::vector<int64_t> permutation_;
|
109 |
+
std::vector<std::string> dim_names_;
|
110 |
+
};
|
111 |
+
|
112 |
+
/// \brief Return a FixedShapeTensorType instance.
|
113 |
+
ARROW_EXPORT std::shared_ptr<DataType> fixed_shape_tensor(
|
114 |
+
const std::shared_ptr<DataType>& storage_type, const std::vector<int64_t>& shape,
|
115 |
+
const std::vector<int64_t>& permutation = {},
|
116 |
+
const std::vector<std::string>& dim_names = {});
|
117 |
+
|
118 |
+
} // namespace extension
|
119 |
+
} // namespace arrow
|
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/extension_type.h
ADDED
@@ -0,0 +1,165 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
/// User-defined extension types.
|
19 |
+
/// \since 0.13.0
|
20 |
+
|
21 |
+
#pragma once
|
22 |
+
|
23 |
+
#include <memory>
|
24 |
+
#include <string>
|
25 |
+
|
26 |
+
#include "arrow/array/array_base.h"
|
27 |
+
#include "arrow/array/data.h"
|
28 |
+
#include "arrow/result.h"
|
29 |
+
#include "arrow/status.h"
|
30 |
+
#include "arrow/type.h"
|
31 |
+
#include "arrow/type_fwd.h"
|
32 |
+
#include "arrow/util/checked_cast.h"
|
33 |
+
#include "arrow/util/macros.h"
|
34 |
+
#include "arrow/util/visibility.h"
|
35 |
+
|
36 |
+
namespace arrow {
|
37 |
+
|
38 |
+
/// \brief The base class for custom / user-defined types.
|
39 |
+
class ARROW_EXPORT ExtensionType : public DataType {
|
40 |
+
public:
|
41 |
+
static constexpr Type::type type_id = Type::EXTENSION;
|
42 |
+
|
43 |
+
static constexpr const char* type_name() { return "extension"; }
|
44 |
+
|
45 |
+
/// \brief The type of array used to represent this extension type's data
|
46 |
+
const std::shared_ptr<DataType>& storage_type() const { return storage_type_; }
|
47 |
+
|
48 |
+
/// \brief Return the type category of the storage type
|
49 |
+
Type::type storage_id() const override { return storage_type_->id(); }
|
50 |
+
|
51 |
+
DataTypeLayout layout() const override;
|
52 |
+
|
53 |
+
std::string ToString() const override;
|
54 |
+
|
55 |
+
std::string name() const override { return "extension"; }
|
56 |
+
|
57 |
+
/// \brief Unique name of extension type used to identify type for
|
58 |
+
/// serialization
|
59 |
+
/// \return the string name of the extension
|
60 |
+
virtual std::string extension_name() const = 0;
|
61 |
+
|
62 |
+
/// \brief Determine if two instances of the same extension types are
|
63 |
+
/// equal. Invoked from ExtensionType::Equals
|
64 |
+
/// \param[in] other the type to compare this type with
|
65 |
+
/// \return bool true if type instances are equal
|
66 |
+
virtual bool ExtensionEquals(const ExtensionType& other) const = 0;
|
67 |
+
|
68 |
+
/// \brief Wrap built-in Array type in a user-defined ExtensionArray instance
|
69 |
+
/// \param[in] data the physical storage for the extension type
|
70 |
+
virtual std::shared_ptr<Array> MakeArray(std::shared_ptr<ArrayData> data) const = 0;
|
71 |
+
|
72 |
+
/// \brief Create an instance of the ExtensionType given the actual storage
|
73 |
+
/// type and the serialized representation
|
74 |
+
/// \param[in] storage_type the physical storage type of the extension
|
75 |
+
/// \param[in] serialized_data the serialized representation produced by
|
76 |
+
/// Serialize
|
77 |
+
virtual Result<std::shared_ptr<DataType>> Deserialize(
|
78 |
+
std::shared_ptr<DataType> storage_type,
|
79 |
+
const std::string& serialized_data) const = 0;
|
80 |
+
|
81 |
+
/// \brief Create a serialized representation of the extension type's
|
82 |
+
/// metadata. The storage type will be handled automatically in IPC code
|
83 |
+
/// paths
|
84 |
+
/// \return the serialized representation
|
85 |
+
virtual std::string Serialize() const = 0;
|
86 |
+
|
87 |
+
/// \brief Wrap the given storage array as an extension array
|
88 |
+
static std::shared_ptr<Array> WrapArray(const std::shared_ptr<DataType>& ext_type,
|
89 |
+
const std::shared_ptr<Array>& storage);
|
90 |
+
|
91 |
+
/// \brief Wrap the given chunked storage array as a chunked extension array
|
92 |
+
static std::shared_ptr<ChunkedArray> WrapArray(
|
93 |
+
const std::shared_ptr<DataType>& ext_type,
|
94 |
+
const std::shared_ptr<ChunkedArray>& storage);
|
95 |
+
|
96 |
+
protected:
|
97 |
+
explicit ExtensionType(std::shared_ptr<DataType> storage_type)
|
98 |
+
: DataType(Type::EXTENSION), storage_type_(storage_type) {}
|
99 |
+
|
100 |
+
std::shared_ptr<DataType> storage_type_;
|
101 |
+
};
|
102 |
+
|
103 |
+
/// \brief Base array class for user-defined extension types
|
104 |
+
class ARROW_EXPORT ExtensionArray : public Array {
|
105 |
+
public:
|
106 |
+
using TypeClass = ExtensionType;
|
107 |
+
/// \brief Construct an ExtensionArray from an ArrayData.
|
108 |
+
///
|
109 |
+
/// The ArrayData must have the right ExtensionType.
|
110 |
+
explicit ExtensionArray(const std::shared_ptr<ArrayData>& data);
|
111 |
+
|
112 |
+
/// \brief Construct an ExtensionArray from a type and the underlying storage.
|
113 |
+
ExtensionArray(const std::shared_ptr<DataType>& type,
|
114 |
+
const std::shared_ptr<Array>& storage);
|
115 |
+
|
116 |
+
const ExtensionType* extension_type() const {
|
117 |
+
return internal::checked_cast<const ExtensionType*>(data_->type.get());
|
118 |
+
}
|
119 |
+
|
120 |
+
/// \brief The physical storage for the extension array
|
121 |
+
const std::shared_ptr<Array>& storage() const { return storage_; }
|
122 |
+
|
123 |
+
protected:
|
124 |
+
void SetData(const std::shared_ptr<ArrayData>& data);
|
125 |
+
std::shared_ptr<Array> storage_;
|
126 |
+
};
|
127 |
+
|
128 |
+
class ARROW_EXPORT ExtensionTypeRegistry {
|
129 |
+
public:
|
130 |
+
/// \brief Provide access to the global registry to allow code to control for
|
131 |
+
/// race conditions in registry teardown when some types need to be
|
132 |
+
/// unregistered and destroyed first
|
133 |
+
static std::shared_ptr<ExtensionTypeRegistry> GetGlobalRegistry();
|
134 |
+
|
135 |
+
virtual ~ExtensionTypeRegistry() = default;
|
136 |
+
|
137 |
+
virtual Status RegisterType(std::shared_ptr<ExtensionType> type) = 0;
|
138 |
+
virtual Status UnregisterType(const std::string& type_name) = 0;
|
139 |
+
virtual std::shared_ptr<ExtensionType> GetType(const std::string& type_name) = 0;
|
140 |
+
};
|
141 |
+
|
142 |
+
/// \brief Register an extension type globally. The name returned by the type's
|
143 |
+
/// extension_name() method should be unique. This method is thread-safe
|
144 |
+
/// \param[in] type an instance of the extension type
|
145 |
+
/// \return Status
|
146 |
+
ARROW_EXPORT
|
147 |
+
Status RegisterExtensionType(std::shared_ptr<ExtensionType> type);
|
148 |
+
|
149 |
+
/// \brief Delete an extension type from the global registry. This method is
|
150 |
+
/// thread-safe
|
151 |
+
/// \param[in] type_name the unique name of a registered extension type
|
152 |
+
/// \return Status error if the type name is unknown
|
153 |
+
ARROW_EXPORT
|
154 |
+
Status UnregisterExtensionType(const std::string& type_name);
|
155 |
+
|
156 |
+
/// \brief Retrieve an extension type from the global registry. Returns nullptr
|
157 |
+
/// if not found. This method is thread-safe
|
158 |
+
/// \return the globally-registered extension type
|
159 |
+
ARROW_EXPORT
|
160 |
+
std::shared_ptr<ExtensionType> GetExtensionType(const std::string& type_name);
|
161 |
+
|
162 |
+
ARROW_EXPORT extern const char kExtensionTypeKeyName[];
|
163 |
+
ARROW_EXPORT extern const char kExtensionMetadataKeyName[];
|
164 |
+
|
165 |
+
} // namespace arrow
|
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/memory_pool_test.h
ADDED
@@ -0,0 +1,110 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include <algorithm>
|
21 |
+
#include <cstddef>
|
22 |
+
#include <cstdint>
|
23 |
+
#include <limits>
|
24 |
+
|
25 |
+
#include <gtest/gtest.h>
|
26 |
+
|
27 |
+
#include "arrow/memory_pool.h"
|
28 |
+
#include "arrow/status.h"
|
29 |
+
#include "arrow/testing/gtest_util.h"
|
30 |
+
|
31 |
+
namespace arrow {
|
32 |
+
|
33 |
+
class TestMemoryPoolBase : public ::testing::Test {
|
34 |
+
public:
|
35 |
+
virtual ::arrow::MemoryPool* memory_pool() = 0;
|
36 |
+
|
37 |
+
void TestMemoryTracking() {
|
38 |
+
auto pool = memory_pool();
|
39 |
+
|
40 |
+
uint8_t* data;
|
41 |
+
ASSERT_OK(pool->Allocate(100, &data));
|
42 |
+
EXPECT_EQ(static_cast<uint64_t>(0), reinterpret_cast<uint64_t>(data) % 64);
|
43 |
+
ASSERT_EQ(100, pool->bytes_allocated());
|
44 |
+
|
45 |
+
uint8_t* data2;
|
46 |
+
ASSERT_OK(pool->Allocate(27, &data2));
|
47 |
+
EXPECT_EQ(static_cast<uint64_t>(0), reinterpret_cast<uint64_t>(data2) % 64);
|
48 |
+
ASSERT_EQ(127, pool->bytes_allocated());
|
49 |
+
|
50 |
+
pool->Free(data, 100);
|
51 |
+
ASSERT_EQ(27, pool->bytes_allocated());
|
52 |
+
pool->Free(data2, 27);
|
53 |
+
ASSERT_EQ(0, pool->bytes_allocated());
|
54 |
+
}
|
55 |
+
|
56 |
+
void TestOOM() {
|
57 |
+
auto pool = memory_pool();
|
58 |
+
|
59 |
+
uint8_t* data;
|
60 |
+
int64_t max_alloc = std::min<uint64_t>(std::numeric_limits<int64_t>::max(),
|
61 |
+
std::numeric_limits<size_t>::max());
|
62 |
+
// subtract 63 to prevent overflow after the size is aligned
|
63 |
+
for (int64_t to_alloc : {max_alloc, max_alloc - 63, max_alloc - 127}) {
|
64 |
+
ASSERT_RAISES(OutOfMemory, pool->Allocate(to_alloc, &data));
|
65 |
+
}
|
66 |
+
}
|
67 |
+
|
68 |
+
void TestReallocate() {
|
69 |
+
auto pool = memory_pool();
|
70 |
+
|
71 |
+
uint8_t* data;
|
72 |
+
ASSERT_OK(pool->Allocate(10, &data));
|
73 |
+
ASSERT_EQ(10, pool->bytes_allocated());
|
74 |
+
data[0] = 35;
|
75 |
+
data[9] = 12;
|
76 |
+
|
77 |
+
// Expand
|
78 |
+
ASSERT_OK(pool->Reallocate(10, 20, &data));
|
79 |
+
ASSERT_EQ(data[9], 12);
|
80 |
+
ASSERT_EQ(20, pool->bytes_allocated());
|
81 |
+
|
82 |
+
// Shrink
|
83 |
+
ASSERT_OK(pool->Reallocate(20, 5, &data));
|
84 |
+
ASSERT_EQ(data[0], 35);
|
85 |
+
ASSERT_EQ(5, pool->bytes_allocated());
|
86 |
+
|
87 |
+
// Free
|
88 |
+
pool->Free(data, 5);
|
89 |
+
ASSERT_EQ(0, pool->bytes_allocated());
|
90 |
+
}
|
91 |
+
|
92 |
+
void TestAlignment() {
|
93 |
+
auto pool = memory_pool();
|
94 |
+
{
|
95 |
+
uint8_t* data64;
|
96 |
+
ASSERT_OK(pool->Allocate(10, &data64));
|
97 |
+
ASSERT_EQ(reinterpret_cast<uintptr_t>(data64) % kDefaultBufferAlignment, 0);
|
98 |
+
pool->Free(data64, 10);
|
99 |
+
}
|
100 |
+
|
101 |
+
{
|
102 |
+
uint8_t* data512;
|
103 |
+
ASSERT_OK(pool->Allocate(10, 512, &data512));
|
104 |
+
ASSERT_EQ(reinterpret_cast<uintptr_t>(data512) % 512, 0);
|
105 |
+
pool->Free(data512, 10, 512);
|
106 |
+
}
|
107 |
+
}
|
108 |
+
};
|
109 |
+
|
110 |
+
} // namespace arrow
|
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/pch.h
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
// Often-used headers, for precompiling.
|
19 |
+
// If updating this header, please make sure you check compilation speed
|
20 |
+
// before checking in. Adding headers which are not used extremely often
|
21 |
+
// may incur a slowdown, since it makes the precompiled header heavier to load.
|
22 |
+
|
23 |
+
#include "arrow/array.h"
|
24 |
+
#include "arrow/buffer.h"
|
25 |
+
#include "arrow/record_batch.h"
|
26 |
+
#include "arrow/result.h"
|
27 |
+
#include "arrow/status.h"
|
28 |
+
#include "arrow/table.h"
|
29 |
+
#include "arrow/type.h"
|
30 |
+
#include "arrow/type_traits.h"
|
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/pretty_print.h
ADDED
@@ -0,0 +1,157 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include <iosfwd>
|
21 |
+
#include <string>
|
22 |
+
#include <utility>
|
23 |
+
|
24 |
+
#include "arrow/util/visibility.h"
|
25 |
+
|
26 |
+
namespace arrow {
|
27 |
+
|
28 |
+
class Array;
|
29 |
+
class ChunkedArray;
|
30 |
+
class RecordBatch;
|
31 |
+
class Schema;
|
32 |
+
class Status;
|
33 |
+
class Table;
|
34 |
+
|
35 |
+
/// \class PrettyPrintDelimiters
|
36 |
+
/// \brief Options for controlling which delimiters to use when printing
|
37 |
+
/// an Array or ChunkedArray.
|
38 |
+
struct ARROW_EXPORT PrettyPrintDelimiters {
|
39 |
+
/// Delimiter to use when opening an Array or ChunkedArray (e.g. "[")
|
40 |
+
std::string open = "[";
|
41 |
+
|
42 |
+
/// Delimiter to use when closing an Array or ChunkedArray (e.g. "]")
|
43 |
+
std::string close = "]";
|
44 |
+
|
45 |
+
/// Delimiter for separating individual elements of an Array (e.g. ","),
|
46 |
+
/// or individual chunks of a ChunkedArray
|
47 |
+
std::string element = ",";
|
48 |
+
|
49 |
+
/// Create a PrettyPrintDelimiters instance with default values
|
50 |
+
static PrettyPrintDelimiters Defaults() { return PrettyPrintDelimiters(); }
|
51 |
+
};
|
52 |
+
|
53 |
+
/// \class PrettyPrintOptions
|
54 |
+
/// \brief Options for controlling how various Arrow types should be printed.
|
55 |
+
struct ARROW_EXPORT PrettyPrintOptions {
|
56 |
+
PrettyPrintOptions() = default;
|
57 |
+
|
58 |
+
PrettyPrintOptions(int indent, // NOLINT runtime/explicit
|
59 |
+
int window = 10, int indent_size = 2, std::string null_rep = "null",
|
60 |
+
bool skip_new_lines = false, bool truncate_metadata = true,
|
61 |
+
int container_window = 2)
|
62 |
+
: indent(indent),
|
63 |
+
indent_size(indent_size),
|
64 |
+
window(window),
|
65 |
+
container_window(container_window),
|
66 |
+
null_rep(std::move(null_rep)),
|
67 |
+
skip_new_lines(skip_new_lines),
|
68 |
+
truncate_metadata(truncate_metadata) {}
|
69 |
+
|
70 |
+
/// Create a PrettyPrintOptions instance with default values
|
71 |
+
static PrettyPrintOptions Defaults() { return PrettyPrintOptions(); }
|
72 |
+
|
73 |
+
/// Number of spaces to shift entire formatted object to the right
|
74 |
+
int indent = 0;
|
75 |
+
|
76 |
+
/// Size of internal indents
|
77 |
+
int indent_size = 2;
|
78 |
+
|
79 |
+
/// Maximum number of elements to show at the beginning and at the end.
|
80 |
+
int window = 10;
|
81 |
+
|
82 |
+
/// Maximum number of elements to show at the beginning and at the end, for elements
|
83 |
+
/// that are containers (that is, list in ListArray and chunks in ChunkedArray)
|
84 |
+
int container_window = 2;
|
85 |
+
|
86 |
+
/// String to use for representing a null value, defaults to "null"
|
87 |
+
std::string null_rep = "null";
|
88 |
+
|
89 |
+
/// Skip new lines between elements, defaults to false
|
90 |
+
bool skip_new_lines = false;
|
91 |
+
|
92 |
+
/// Limit display of each KeyValueMetadata key/value pair to a single line at
|
93 |
+
/// 80 character width
|
94 |
+
bool truncate_metadata = true;
|
95 |
+
|
96 |
+
/// If true, display field metadata when pretty-printing a Schema
|
97 |
+
bool show_field_metadata = true;
|
98 |
+
|
99 |
+
/// If true, display schema metadata when pretty-printing a Schema
|
100 |
+
bool show_schema_metadata = true;
|
101 |
+
|
102 |
+
/// Delimiters to use when printing an Array
|
103 |
+
PrettyPrintDelimiters array_delimiters = PrettyPrintDelimiters::Defaults();
|
104 |
+
|
105 |
+
/// Delimiters to use when printing a ChunkedArray
|
106 |
+
PrettyPrintDelimiters chunked_array_delimiters = PrettyPrintDelimiters::Defaults();
|
107 |
+
};
|
108 |
+
|
109 |
+
/// \brief Print human-readable representation of RecordBatch
|
110 |
+
ARROW_EXPORT
|
111 |
+
Status PrettyPrint(const RecordBatch& batch, int indent, std::ostream* sink);
|
112 |
+
|
113 |
+
ARROW_EXPORT
|
114 |
+
Status PrettyPrint(const RecordBatch& batch, const PrettyPrintOptions& options,
|
115 |
+
std::ostream* sink);
|
116 |
+
|
117 |
+
/// \brief Print human-readable representation of Table
|
118 |
+
ARROW_EXPORT
|
119 |
+
Status PrettyPrint(const Table& table, const PrettyPrintOptions& options,
|
120 |
+
std::ostream* sink);
|
121 |
+
|
122 |
+
/// \brief Print human-readable representation of Array
|
123 |
+
ARROW_EXPORT
|
124 |
+
Status PrettyPrint(const Array& arr, int indent, std::ostream* sink);
|
125 |
+
|
126 |
+
/// \brief Print human-readable representation of Array
|
127 |
+
ARROW_EXPORT
|
128 |
+
Status PrettyPrint(const Array& arr, const PrettyPrintOptions& options,
|
129 |
+
std::ostream* sink);
|
130 |
+
|
131 |
+
/// \brief Print human-readable representation of Array
|
132 |
+
ARROW_EXPORT
|
133 |
+
Status PrettyPrint(const Array& arr, const PrettyPrintOptions& options,
|
134 |
+
std::string* result);
|
135 |
+
|
136 |
+
/// \brief Print human-readable representation of ChunkedArray
|
137 |
+
ARROW_EXPORT
|
138 |
+
Status PrettyPrint(const ChunkedArray& chunked_arr, const PrettyPrintOptions& options,
|
139 |
+
std::ostream* sink);
|
140 |
+
|
141 |
+
/// \brief Print human-readable representation of ChunkedArray
|
142 |
+
ARROW_EXPORT
|
143 |
+
Status PrettyPrint(const ChunkedArray& chunked_arr, const PrettyPrintOptions& options,
|
144 |
+
std::string* result);
|
145 |
+
|
146 |
+
ARROW_EXPORT
|
147 |
+
Status PrettyPrint(const Schema& schema, const PrettyPrintOptions& options,
|
148 |
+
std::ostream* sink);
|
149 |
+
|
150 |
+
ARROW_EXPORT
|
151 |
+
Status PrettyPrint(const Schema& schema, const PrettyPrintOptions& options,
|
152 |
+
std::string* result);
|
153 |
+
|
154 |
+
ARROW_EXPORT
|
155 |
+
Status DebugPrint(const Array& arr, int indent);
|
156 |
+
|
157 |
+
} // namespace arrow
|
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/python/api.h
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include "arrow/python/arrow_to_pandas.h"
|
21 |
+
#include "arrow/python/common.h"
|
22 |
+
#include "arrow/python/datetime.h"
|
23 |
+
#include "arrow/python/deserialize.h"
|
24 |
+
#include "arrow/python/helpers.h"
|
25 |
+
#include "arrow/python/inference.h"
|
26 |
+
#include "arrow/python/io.h"
|
27 |
+
#include "arrow/python/numpy_convert.h"
|
28 |
+
#include "arrow/python/numpy_to_arrow.h"
|
29 |
+
#include "arrow/python/python_to_arrow.h"
|
30 |
+
#include "arrow/python/serialize.h"
|
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/python/async.h
ADDED
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include <utility>
|
21 |
+
|
22 |
+
#include "arrow/python/common.h"
|
23 |
+
#include "arrow/status.h"
|
24 |
+
#include "arrow/util/future.h"
|
25 |
+
|
26 |
+
namespace arrow::py {
|
27 |
+
|
28 |
+
/// \brief Bind a Python callback to an arrow::Future.
|
29 |
+
///
|
30 |
+
/// If the Future finishes successfully, py_wrapper is called with its
|
31 |
+
/// result value and should return a PyObject*. If py_wrapper is successful,
|
32 |
+
/// py_cb is called with its return value.
|
33 |
+
///
|
34 |
+
/// If either the Future or py_wrapper fails, py_cb is called with the
|
35 |
+
/// associated Python exception.
|
36 |
+
///
|
37 |
+
/// \param future The future to bind to.
|
38 |
+
/// \param py_cb The Python callback function. Will be passed the result of
|
39 |
+
/// py_wrapper, or a Python exception if the future failed or one was
|
40 |
+
/// raised by py_wrapper.
|
41 |
+
/// \param py_wrapper A function (likely defined in Cython) to convert the C++
|
42 |
+
/// result of the future to a Python object.
|
43 |
+
template <typename T, typename PyWrapper = PyObject* (*)(T)>
|
44 |
+
void BindFuture(Future<T> future, PyObject* py_cb, PyWrapper py_wrapper) {
|
45 |
+
Py_INCREF(py_cb);
|
46 |
+
OwnedRefNoGIL cb_ref(py_cb);
|
47 |
+
|
48 |
+
auto future_cb = [cb_ref = std::move(cb_ref),
|
49 |
+
py_wrapper = std::move(py_wrapper)](Result<T> result) {
|
50 |
+
SafeCallIntoPythonVoid([&]() {
|
51 |
+
OwnedRef py_value_or_exc{WrapResult(std::move(result), std::move(py_wrapper))};
|
52 |
+
Py_XDECREF(
|
53 |
+
PyObject_CallFunctionObjArgs(cb_ref.obj(), py_value_or_exc.obj(), NULLPTR));
|
54 |
+
ARROW_WARN_NOT_OK(CheckPyError(), "Internal error in async call");
|
55 |
+
});
|
56 |
+
};
|
57 |
+
future.AddCallback(std::move(future_cb));
|
58 |
+
}
|
59 |
+
|
60 |
+
} // namespace arrow::py
|
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/python/benchmark.h
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include "arrow/python/platform.h"
|
21 |
+
|
22 |
+
#include "arrow/python/visibility.h"
|
23 |
+
|
24 |
+
namespace arrow {
|
25 |
+
namespace py {
|
26 |
+
namespace benchmark {
|
27 |
+
|
28 |
+
// Micro-benchmark routines for use from ASV
|
29 |
+
|
30 |
+
// Run PandasObjectIsNull() once over every object in *list*
|
31 |
+
ARROW_PYTHON_EXPORT
|
32 |
+
void Benchmark_PandasObjectIsNull(PyObject* list);
|
33 |
+
|
34 |
+
} // namespace benchmark
|
35 |
+
} // namespace py
|
36 |
+
} // namespace arrow
|
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/python/common.h
ADDED
@@ -0,0 +1,458 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include <functional>
|
21 |
+
#include <memory>
|
22 |
+
#include <optional>
|
23 |
+
#include <utility>
|
24 |
+
|
25 |
+
#include "arrow/buffer.h"
|
26 |
+
#include "arrow/python/pyarrow.h"
|
27 |
+
#include "arrow/python/visibility.h"
|
28 |
+
#include "arrow/result.h"
|
29 |
+
#include "arrow/util/macros.h"
|
30 |
+
|
31 |
+
namespace arrow {
|
32 |
+
|
33 |
+
class MemoryPool;
|
34 |
+
template <class T>
|
35 |
+
class Result;
|
36 |
+
|
37 |
+
namespace py {
|
38 |
+
|
39 |
+
// Convert current Python error to a Status. The Python error state is cleared
|
40 |
+
// and can be restored with RestorePyError().
|
41 |
+
ARROW_PYTHON_EXPORT Status ConvertPyError(StatusCode code = StatusCode::UnknownError);
|
42 |
+
// Query whether the given Status is a Python error (as wrapped by ConvertPyError()).
|
43 |
+
ARROW_PYTHON_EXPORT bool IsPyError(const Status& status);
|
44 |
+
// Restore a Python error wrapped in a Status.
|
45 |
+
ARROW_PYTHON_EXPORT void RestorePyError(const Status& status);
|
46 |
+
|
47 |
+
// Catch a pending Python exception and return the corresponding Status.
|
48 |
+
// If no exception is pending, Status::OK() is returned.
|
49 |
+
inline Status CheckPyError(StatusCode code = StatusCode::UnknownError) {
|
50 |
+
if (ARROW_PREDICT_TRUE(!PyErr_Occurred())) {
|
51 |
+
return Status::OK();
|
52 |
+
} else {
|
53 |
+
return ConvertPyError(code);
|
54 |
+
}
|
55 |
+
}
|
56 |
+
|
57 |
+
#define RETURN_IF_PYERROR() ARROW_RETURN_NOT_OK(CheckPyError())
|
58 |
+
|
59 |
+
#define PY_RETURN_IF_ERROR(CODE) ARROW_RETURN_NOT_OK(CheckPyError(CODE))
|
60 |
+
|
61 |
+
// For Cython, as you can't define template C++ functions in Cython, only use them.
|
62 |
+
// This function can set a Python exception. It assumes that T has a (cheap)
|
63 |
+
// default constructor.
|
64 |
+
template <class T>
|
65 |
+
T GetResultValue(Result<T> result) {
|
66 |
+
if (ARROW_PREDICT_TRUE(result.ok())) {
|
67 |
+
return *std::move(result);
|
68 |
+
} else {
|
69 |
+
int r = internal::check_status(result.status()); // takes the GIL
|
70 |
+
assert(r == -1); // should have errored out
|
71 |
+
ARROW_UNUSED(r);
|
72 |
+
return {};
|
73 |
+
}
|
74 |
+
}
|
75 |
+
|
76 |
+
/// \brief Wrap a Result and return the corresponding Python object.
|
77 |
+
///
|
78 |
+
/// If the Result is successful, py_wrapper is called with its result value
|
79 |
+
/// and should return a PyObject*. If py_wrapper is successful (returns
|
80 |
+
/// a non-NULL value), its return value is returned.
|
81 |
+
///
|
82 |
+
/// If either the Result or py_wrapper fails, the associated Python exception
|
83 |
+
/// is raised and NULL is returned.
|
84 |
+
//
|
85 |
+
/// \param result The Result whose value to wrap in a Python object.
|
86 |
+
/// \param py_wrapper A function (likely defined in Cython) to convert the C++
|
87 |
+
/// value of the Result to a Python object.
|
88 |
+
/// \return A new Python reference, or NULL if an exception occurred
|
89 |
+
template <typename T, typename PyWrapper = PyObject* (*)(T)>
|
90 |
+
PyObject* WrapResult(Result<T> result, PyWrapper&& py_wrapper) {
|
91 |
+
static_assert(std::is_same_v<PyObject*, decltype(py_wrapper(std::declval<T>()))>,
|
92 |
+
"PyWrapper argument to WrapResult should return a PyObject* "
|
93 |
+
"when called with a T*");
|
94 |
+
Status st = result.status();
|
95 |
+
if (st.ok()) {
|
96 |
+
PyObject* py_value = py_wrapper(result.MoveValueUnsafe());
|
97 |
+
st = CheckPyError();
|
98 |
+
if (st.ok()) {
|
99 |
+
return py_value;
|
100 |
+
}
|
101 |
+
Py_XDECREF(py_value); // should be null, but who knows
|
102 |
+
}
|
103 |
+
// Status is an error, convert it to an exception.
|
104 |
+
return internal::convert_status(st);
|
105 |
+
}
|
106 |
+
|
107 |
+
// A RAII-style helper that ensures the GIL is acquired inside a lexical block.
|
108 |
+
class ARROW_PYTHON_EXPORT PyAcquireGIL {
|
109 |
+
public:
|
110 |
+
PyAcquireGIL() : acquired_gil_(false) { acquire(); }
|
111 |
+
|
112 |
+
~PyAcquireGIL() { release(); }
|
113 |
+
|
114 |
+
void acquire() {
|
115 |
+
if (!acquired_gil_) {
|
116 |
+
state_ = PyGILState_Ensure();
|
117 |
+
acquired_gil_ = true;
|
118 |
+
}
|
119 |
+
}
|
120 |
+
|
121 |
+
// idempotent
|
122 |
+
void release() {
|
123 |
+
if (acquired_gil_) {
|
124 |
+
PyGILState_Release(state_);
|
125 |
+
acquired_gil_ = false;
|
126 |
+
}
|
127 |
+
}
|
128 |
+
|
129 |
+
private:
|
130 |
+
bool acquired_gil_;
|
131 |
+
PyGILState_STATE state_;
|
132 |
+
ARROW_DISALLOW_COPY_AND_ASSIGN(PyAcquireGIL);
|
133 |
+
};
|
134 |
+
|
135 |
+
// A RAII-style helper that releases the GIL until the end of a lexical block
|
136 |
+
class ARROW_PYTHON_EXPORT PyReleaseGIL {
|
137 |
+
public:
|
138 |
+
PyReleaseGIL() : ptr_(PyEval_SaveThread(), &unique_ptr_deleter) {}
|
139 |
+
|
140 |
+
private:
|
141 |
+
static void unique_ptr_deleter(PyThreadState* state) {
|
142 |
+
if (state) {
|
143 |
+
PyEval_RestoreThread(state);
|
144 |
+
}
|
145 |
+
}
|
146 |
+
std::unique_ptr<PyThreadState, decltype(&unique_ptr_deleter)> ptr_;
|
147 |
+
};
|
148 |
+
|
149 |
+
// A helper to call safely into the Python interpreter from arbitrary C++ code.
|
150 |
+
// The GIL is acquired, and the current thread's error status is preserved.
|
151 |
+
template <typename Function>
|
152 |
+
auto SafeCallIntoPython(Function&& func) -> decltype(func()) {
|
153 |
+
PyAcquireGIL lock;
|
154 |
+
PyObject* exc_type;
|
155 |
+
PyObject* exc_value;
|
156 |
+
PyObject* exc_traceback;
|
157 |
+
PyErr_Fetch(&exc_type, &exc_value, &exc_traceback);
|
158 |
+
auto maybe_status = std::forward<Function>(func)();
|
159 |
+
// If the return Status is a "Python error", the current Python error status
|
160 |
+
// describes the error and shouldn't be clobbered.
|
161 |
+
if (!IsPyError(::arrow::internal::GenericToStatus(maybe_status)) &&
|
162 |
+
exc_type != NULLPTR) {
|
163 |
+
PyErr_Restore(exc_type, exc_value, exc_traceback);
|
164 |
+
}
|
165 |
+
return maybe_status;
|
166 |
+
}
|
167 |
+
|
168 |
+
template <typename Function>
|
169 |
+
auto SafeCallIntoPythonVoid(Function&& func) -> decltype(func()) {
|
170 |
+
PyAcquireGIL lock;
|
171 |
+
PyObject* exc_type;
|
172 |
+
PyObject* exc_value;
|
173 |
+
PyObject* exc_traceback;
|
174 |
+
PyErr_Fetch(&exc_type, &exc_value, &exc_traceback);
|
175 |
+
func();
|
176 |
+
if (exc_type != NULLPTR) {
|
177 |
+
PyErr_Restore(exc_type, exc_value, exc_traceback);
|
178 |
+
}
|
179 |
+
}
|
180 |
+
|
181 |
+
// A RAII primitive that DECREFs the underlying PyObject* when it
|
182 |
+
// goes out of scope.
|
183 |
+
class ARROW_PYTHON_EXPORT OwnedRef {
|
184 |
+
public:
|
185 |
+
OwnedRef() : obj_(NULLPTR) {}
|
186 |
+
OwnedRef(OwnedRef&& other) : OwnedRef(other.detach()) {}
|
187 |
+
explicit OwnedRef(PyObject* obj) : obj_(obj) {}
|
188 |
+
|
189 |
+
OwnedRef& operator=(OwnedRef&& other) {
|
190 |
+
obj_ = other.detach();
|
191 |
+
return *this;
|
192 |
+
}
|
193 |
+
|
194 |
+
~OwnedRef() {
|
195 |
+
// GH-38626: destructor may be called after the Python interpreter is finalized.
|
196 |
+
if (Py_IsInitialized()) {
|
197 |
+
reset();
|
198 |
+
}
|
199 |
+
}
|
200 |
+
|
201 |
+
void reset(PyObject* obj) {
|
202 |
+
Py_XDECREF(obj_);
|
203 |
+
obj_ = obj;
|
204 |
+
}
|
205 |
+
|
206 |
+
void reset() { reset(NULLPTR); }
|
207 |
+
|
208 |
+
PyObject* detach() {
|
209 |
+
PyObject* result = obj_;
|
210 |
+
obj_ = NULLPTR;
|
211 |
+
return result;
|
212 |
+
}
|
213 |
+
|
214 |
+
PyObject* obj() const { return obj_; }
|
215 |
+
|
216 |
+
PyObject** ref() { return &obj_; }
|
217 |
+
|
218 |
+
operator bool() const { return obj_ != NULLPTR; }
|
219 |
+
|
220 |
+
private:
|
221 |
+
ARROW_DISALLOW_COPY_AND_ASSIGN(OwnedRef);
|
222 |
+
|
223 |
+
PyObject* obj_;
|
224 |
+
};
|
225 |
+
|
226 |
+
// Same as OwnedRef, but ensures the GIL is taken when it goes out of scope.
|
227 |
+
// This is for situations where the GIL is not always known to be held
|
228 |
+
// (e.g. if it is released in the middle of a function for performance reasons)
|
229 |
+
class ARROW_PYTHON_EXPORT OwnedRefNoGIL : public OwnedRef {
|
230 |
+
public:
|
231 |
+
OwnedRefNoGIL() : OwnedRef() {}
|
232 |
+
OwnedRefNoGIL(OwnedRefNoGIL&& other) : OwnedRef(other.detach()) {}
|
233 |
+
explicit OwnedRefNoGIL(PyObject* obj) : OwnedRef(obj) {}
|
234 |
+
|
235 |
+
~OwnedRefNoGIL() {
|
236 |
+
// GH-38626: destructor may be called after the Python interpreter is finalized.
|
237 |
+
if (Py_IsInitialized() && obj() != NULLPTR) {
|
238 |
+
PyAcquireGIL lock;
|
239 |
+
reset();
|
240 |
+
}
|
241 |
+
}
|
242 |
+
};
|
243 |
+
|
244 |
+
template <template <typename...> typename SmartPtr, typename... Ts>
|
245 |
+
class SmartPtrNoGIL : public SmartPtr<Ts...> {
|
246 |
+
using Base = SmartPtr<Ts...>;
|
247 |
+
|
248 |
+
public:
|
249 |
+
template <typename... Args>
|
250 |
+
SmartPtrNoGIL(Args&&... args) : Base(std::forward<Args>(args)...) {}
|
251 |
+
|
252 |
+
~SmartPtrNoGIL() { reset(); }
|
253 |
+
|
254 |
+
template <typename... Args>
|
255 |
+
void reset(Args&&... args) {
|
256 |
+
auto release_guard = optional_gil_release();
|
257 |
+
Base::reset(std::forward<Args>(args)...);
|
258 |
+
}
|
259 |
+
|
260 |
+
template <typename V>
|
261 |
+
SmartPtrNoGIL& operator=(V&& v) {
|
262 |
+
auto release_guard = optional_gil_release();
|
263 |
+
Base::operator=(std::forward<V>(v));
|
264 |
+
return *this;
|
265 |
+
}
|
266 |
+
|
267 |
+
private:
|
268 |
+
// Only release the GIL if we own an object *and* the Python runtime is
|
269 |
+
// valid *and* the GIL is held.
|
270 |
+
std::optional<PyReleaseGIL> optional_gil_release() const {
|
271 |
+
if (this->get() != nullptr && Py_IsInitialized() && PyGILState_Check()) {
|
272 |
+
return PyReleaseGIL();
|
273 |
+
}
|
274 |
+
return {};
|
275 |
+
}
|
276 |
+
};
|
277 |
+
|
278 |
+
/// \brief A std::shared_ptr<T, ...> subclass that releases the GIL when destroying T
|
279 |
+
template <typename... Ts>
|
280 |
+
using SharedPtrNoGIL = SmartPtrNoGIL<std::shared_ptr, Ts...>;
|
281 |
+
|
282 |
+
/// \brief A std::unique_ptr<T, ...> subclass that releases the GIL when destroying T
|
283 |
+
template <typename... Ts>
|
284 |
+
using UniquePtrNoGIL = SmartPtrNoGIL<std::unique_ptr, Ts...>;
|
285 |
+
|
286 |
+
template <typename Fn>
|
287 |
+
struct BoundFunction;
|
288 |
+
|
289 |
+
template <typename... Args>
|
290 |
+
struct BoundFunction<void(PyObject*, Args...)> {
|
291 |
+
// We bind `cdef void fn(object, ...)` to get a `Status(...)`
|
292 |
+
// where the Status contains any Python error raised by `fn`
|
293 |
+
using Unbound = void(PyObject*, Args...);
|
294 |
+
using Bound = Status(Args...);
|
295 |
+
|
296 |
+
BoundFunction(Unbound* unbound, PyObject* bound_arg)
|
297 |
+
: unbound_(unbound), bound_arg_(bound_arg) {}
|
298 |
+
|
299 |
+
Status Invoke(Args... args) const {
|
300 |
+
PyAcquireGIL lock;
|
301 |
+
unbound_(bound_arg_.obj(), std::forward<Args>(args)...);
|
302 |
+
RETURN_IF_PYERROR();
|
303 |
+
return Status::OK();
|
304 |
+
}
|
305 |
+
|
306 |
+
Unbound* unbound_;
|
307 |
+
OwnedRefNoGIL bound_arg_;
|
308 |
+
};
|
309 |
+
|
310 |
+
template <typename Return, typename... Args>
|
311 |
+
struct BoundFunction<Return(PyObject*, Args...)> {
|
312 |
+
// We bind `cdef Return fn(object, ...)` to get a `Result<Return>(...)`
|
313 |
+
// where the Result contains any Python error raised by `fn` or the
|
314 |
+
// return value from `fn`.
|
315 |
+
using Unbound = Return(PyObject*, Args...);
|
316 |
+
using Bound = Result<Return>(Args...);
|
317 |
+
|
318 |
+
BoundFunction(Unbound* unbound, PyObject* bound_arg)
|
319 |
+
: unbound_(unbound), bound_arg_(bound_arg) {}
|
320 |
+
|
321 |
+
Result<Return> Invoke(Args... args) const {
|
322 |
+
PyAcquireGIL lock;
|
323 |
+
Return ret = unbound_(bound_arg_.obj(), std::forward<Args>(args)...);
|
324 |
+
RETURN_IF_PYERROR();
|
325 |
+
return ret;
|
326 |
+
}
|
327 |
+
|
328 |
+
Unbound* unbound_;
|
329 |
+
OwnedRefNoGIL bound_arg_;
|
330 |
+
};
|
331 |
+
|
332 |
+
template <typename OutFn, typename Return, typename... Args>
|
333 |
+
std::function<OutFn> BindFunction(Return (*unbound)(PyObject*, Args...),
|
334 |
+
PyObject* bound_arg) {
|
335 |
+
using Fn = BoundFunction<Return(PyObject*, Args...)>;
|
336 |
+
|
337 |
+
static_assert(std::is_same<typename Fn::Bound, OutFn>::value,
|
338 |
+
"requested bound function of unsupported type");
|
339 |
+
|
340 |
+
Py_XINCREF(bound_arg);
|
341 |
+
auto bound_fn = std::make_shared<Fn>(unbound, bound_arg);
|
342 |
+
return
|
343 |
+
[bound_fn](Args... args) { return bound_fn->Invoke(std::forward<Args>(args)...); };
|
344 |
+
}
|
345 |
+
|
346 |
+
// A temporary conversion of a Python object to a bytes area.
|
347 |
+
struct PyBytesView {
|
348 |
+
const char* bytes;
|
349 |
+
Py_ssize_t size;
|
350 |
+
bool is_utf8;
|
351 |
+
|
352 |
+
static Result<PyBytesView> FromString(PyObject* obj, bool check_utf8 = false) {
|
353 |
+
PyBytesView self;
|
354 |
+
ARROW_RETURN_NOT_OK(self.ParseString(obj, check_utf8));
|
355 |
+
return std::move(self);
|
356 |
+
}
|
357 |
+
|
358 |
+
static Result<PyBytesView> FromUnicode(PyObject* obj) {
|
359 |
+
PyBytesView self;
|
360 |
+
ARROW_RETURN_NOT_OK(self.ParseUnicode(obj));
|
361 |
+
return std::move(self);
|
362 |
+
}
|
363 |
+
|
364 |
+
static Result<PyBytesView> FromBinary(PyObject* obj) {
|
365 |
+
PyBytesView self;
|
366 |
+
ARROW_RETURN_NOT_OK(self.ParseBinary(obj));
|
367 |
+
return std::move(self);
|
368 |
+
}
|
369 |
+
|
370 |
+
// View the given Python object as string-like, i.e. str or (utf8) bytes
|
371 |
+
Status ParseString(PyObject* obj, bool check_utf8 = false) {
|
372 |
+
if (PyUnicode_Check(obj)) {
|
373 |
+
return ParseUnicode(obj);
|
374 |
+
} else {
|
375 |
+
ARROW_RETURN_NOT_OK(ParseBinary(obj));
|
376 |
+
if (check_utf8) {
|
377 |
+
// Check the bytes are utf8 utf-8
|
378 |
+
OwnedRef decoded(PyUnicode_FromStringAndSize(bytes, size));
|
379 |
+
if (ARROW_PREDICT_TRUE(!PyErr_Occurred())) {
|
380 |
+
is_utf8 = true;
|
381 |
+
} else {
|
382 |
+
PyErr_Clear();
|
383 |
+
is_utf8 = false;
|
384 |
+
}
|
385 |
+
}
|
386 |
+
return Status::OK();
|
387 |
+
}
|
388 |
+
}
|
389 |
+
|
390 |
+
// View the given Python object as unicode string
|
391 |
+
Status ParseUnicode(PyObject* obj) {
|
392 |
+
// The utf-8 representation is cached on the unicode object
|
393 |
+
bytes = PyUnicode_AsUTF8AndSize(obj, &size);
|
394 |
+
RETURN_IF_PYERROR();
|
395 |
+
is_utf8 = true;
|
396 |
+
return Status::OK();
|
397 |
+
}
|
398 |
+
|
399 |
+
// View the given Python object as binary-like, i.e. bytes
|
400 |
+
Status ParseBinary(PyObject* obj) {
|
401 |
+
if (PyBytes_Check(obj)) {
|
402 |
+
bytes = PyBytes_AS_STRING(obj);
|
403 |
+
size = PyBytes_GET_SIZE(obj);
|
404 |
+
is_utf8 = false;
|
405 |
+
} else if (PyByteArray_Check(obj)) {
|
406 |
+
bytes = PyByteArray_AS_STRING(obj);
|
407 |
+
size = PyByteArray_GET_SIZE(obj);
|
408 |
+
is_utf8 = false;
|
409 |
+
} else if (PyMemoryView_Check(obj)) {
|
410 |
+
PyObject* ref = PyMemoryView_GetContiguous(obj, PyBUF_READ, 'C');
|
411 |
+
RETURN_IF_PYERROR();
|
412 |
+
Py_buffer* buffer = PyMemoryView_GET_BUFFER(ref);
|
413 |
+
bytes = reinterpret_cast<const char*>(buffer->buf);
|
414 |
+
size = buffer->len;
|
415 |
+
is_utf8 = false;
|
416 |
+
} else {
|
417 |
+
return Status::TypeError("Expected bytes, got a '", Py_TYPE(obj)->tp_name,
|
418 |
+
"' object");
|
419 |
+
}
|
420 |
+
return Status::OK();
|
421 |
+
}
|
422 |
+
|
423 |
+
protected:
|
424 |
+
OwnedRef ref;
|
425 |
+
};
|
426 |
+
|
427 |
+
class ARROW_PYTHON_EXPORT PyBuffer : public Buffer {
|
428 |
+
public:
|
429 |
+
/// While memoryview objects support multi-dimensional buffers, PyBuffer only supports
|
430 |
+
/// one-dimensional byte buffers.
|
431 |
+
~PyBuffer();
|
432 |
+
|
433 |
+
static Result<std::shared_ptr<Buffer>> FromPyObject(PyObject* obj);
|
434 |
+
|
435 |
+
private:
|
436 |
+
PyBuffer();
|
437 |
+
Status Init(PyObject*);
|
438 |
+
|
439 |
+
Py_buffer py_buf_;
|
440 |
+
};
|
441 |
+
|
442 |
+
// Return the common PyArrow memory pool
|
443 |
+
ARROW_PYTHON_EXPORT void set_default_memory_pool(MemoryPool* pool);
|
444 |
+
ARROW_PYTHON_EXPORT MemoryPool* get_memory_pool();
|
445 |
+
|
446 |
+
// This is annoying: because C++11 does not allow implicit conversion of string
|
447 |
+
// literals to non-const char*, we need to go through some gymnastics to use
|
448 |
+
// PyObject_CallMethod without a lot of pain (its arguments are non-const
|
449 |
+
// char*)
|
450 |
+
template <typename... ArgTypes>
|
451 |
+
static inline PyObject* cpp_PyObject_CallMethod(PyObject* obj, const char* method_name,
|
452 |
+
const char* argspec, ArgTypes... args) {
|
453 |
+
return PyObject_CallMethod(obj, const_cast<char*>(method_name),
|
454 |
+
const_cast<char*>(argspec), args...);
|
455 |
+
}
|
456 |
+
|
457 |
+
} // namespace py
|
458 |
+
} // namespace arrow
|
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/python/csv.h
ADDED
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include <functional>
|
21 |
+
#include <memory>
|
22 |
+
#include <string>
|
23 |
+
#include <vector>
|
24 |
+
|
25 |
+
#include "arrow/csv/options.h"
|
26 |
+
#include "arrow/python/common.h"
|
27 |
+
#include "arrow/util/macros.h"
|
28 |
+
|
29 |
+
namespace arrow {
|
30 |
+
namespace py {
|
31 |
+
namespace csv {
|
32 |
+
|
33 |
+
using PyInvalidRowCallback = std::function<::arrow::csv::InvalidRowResult(
|
34 |
+
PyObject*, const ::arrow::csv::InvalidRow&)>;
|
35 |
+
|
36 |
+
ARROW_PYTHON_EXPORT
|
37 |
+
::arrow::csv::InvalidRowHandler MakeInvalidRowHandler(PyInvalidRowCallback,
|
38 |
+
PyObject* handler);
|
39 |
+
|
40 |
+
} // namespace csv
|
41 |
+
} // namespace py
|
42 |
+
} // namespace arrow
|
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/python/datetime.h
ADDED
@@ -0,0 +1,231 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include <algorithm>
|
21 |
+
#include <chrono>
|
22 |
+
|
23 |
+
#include "arrow/python/platform.h"
|
24 |
+
#include "arrow/python/visibility.h"
|
25 |
+
#include "arrow/result.h"
|
26 |
+
#include "arrow/status.h"
|
27 |
+
#include "arrow/type.h"
|
28 |
+
#include "arrow/type_fwd.h"
|
29 |
+
#include "arrow/util/int_util_overflow.h"
|
30 |
+
#include "arrow/util/logging.h"
|
31 |
+
|
32 |
+
// By default, PyDateTimeAPI is a *static* variable. This forces
|
33 |
+
// PyDateTime_IMPORT to be called in every C/C++ module using the
|
34 |
+
// C datetime API. This is error-prone and potentially costly.
|
35 |
+
// Instead, we redefine PyDateTimeAPI to point to a global variable,
|
36 |
+
// which is initialized once by calling InitDatetime().
|
37 |
+
#ifdef PYPY_VERSION
|
38 |
+
#include "datetime.h"
|
39 |
+
#else
|
40 |
+
#define PyDateTimeAPI ::arrow::py::internal::datetime_api
|
41 |
+
#endif
|
42 |
+
|
43 |
+
namespace arrow {
|
44 |
+
using internal::AddWithOverflow;
|
45 |
+
using internal::MultiplyWithOverflow;
|
46 |
+
namespace py {
|
47 |
+
namespace internal {
|
48 |
+
|
49 |
+
#ifndef PYPY_VERSION
|
50 |
+
extern PyDateTime_CAPI* datetime_api;
|
51 |
+
|
52 |
+
ARROW_PYTHON_EXPORT
|
53 |
+
void InitDatetime();
|
54 |
+
#endif
|
55 |
+
|
56 |
+
// Returns the MonthDayNano namedtuple type (increments the reference count).
|
57 |
+
ARROW_PYTHON_EXPORT
|
58 |
+
PyObject* NewMonthDayNanoTupleType();
|
59 |
+
|
60 |
+
ARROW_PYTHON_EXPORT
|
61 |
+
inline int64_t PyTime_to_us(PyObject* pytime) {
|
62 |
+
return (PyDateTime_TIME_GET_HOUR(pytime) * 3600000000LL +
|
63 |
+
PyDateTime_TIME_GET_MINUTE(pytime) * 60000000LL +
|
64 |
+
PyDateTime_TIME_GET_SECOND(pytime) * 1000000LL +
|
65 |
+
PyDateTime_TIME_GET_MICROSECOND(pytime));
|
66 |
+
}
|
67 |
+
|
68 |
+
ARROW_PYTHON_EXPORT
|
69 |
+
inline int64_t PyTime_to_s(PyObject* pytime) { return PyTime_to_us(pytime) / 1000000; }
|
70 |
+
|
71 |
+
ARROW_PYTHON_EXPORT
|
72 |
+
inline int64_t PyTime_to_ms(PyObject* pytime) { return PyTime_to_us(pytime) / 1000; }
|
73 |
+
|
74 |
+
ARROW_PYTHON_EXPORT
|
75 |
+
inline int64_t PyTime_to_ns(PyObject* pytime) { return PyTime_to_us(pytime) * 1000; }
|
76 |
+
|
77 |
+
ARROW_PYTHON_EXPORT
|
78 |
+
Status PyTime_from_int(int64_t val, const TimeUnit::type unit, PyObject** out);
|
79 |
+
|
80 |
+
ARROW_PYTHON_EXPORT
|
81 |
+
Status PyDate_from_int(int64_t val, const DateUnit unit, PyObject** out);
|
82 |
+
|
83 |
+
// WARNING: This function returns a naive datetime.
|
84 |
+
ARROW_PYTHON_EXPORT
|
85 |
+
Status PyDateTime_from_int(int64_t val, const TimeUnit::type unit, PyObject** out);
|
86 |
+
|
87 |
+
// This declaration must be the same as in filesystem/filesystem.h
|
88 |
+
using TimePoint =
|
89 |
+
std::chrono::time_point<std::chrono::system_clock, std::chrono::nanoseconds>;
|
90 |
+
|
91 |
+
ARROW_PYTHON_EXPORT
|
92 |
+
int64_t PyDate_to_days(PyDateTime_Date* pydate);
|
93 |
+
|
94 |
+
ARROW_PYTHON_EXPORT
|
95 |
+
inline int64_t PyDate_to_s(PyDateTime_Date* pydate) {
|
96 |
+
return PyDate_to_days(pydate) * 86400LL;
|
97 |
+
}
|
98 |
+
|
99 |
+
ARROW_PYTHON_EXPORT
|
100 |
+
inline int64_t PyDate_to_ms(PyDateTime_Date* pydate) {
|
101 |
+
return PyDate_to_days(pydate) * 86400000LL;
|
102 |
+
}
|
103 |
+
|
104 |
+
ARROW_PYTHON_EXPORT
|
105 |
+
inline int64_t PyDateTime_to_s(PyDateTime_DateTime* pydatetime) {
|
106 |
+
return (PyDate_to_s(reinterpret_cast<PyDateTime_Date*>(pydatetime)) +
|
107 |
+
PyDateTime_DATE_GET_HOUR(pydatetime) * 3600LL +
|
108 |
+
PyDateTime_DATE_GET_MINUTE(pydatetime) * 60LL +
|
109 |
+
PyDateTime_DATE_GET_SECOND(pydatetime));
|
110 |
+
}
|
111 |
+
|
112 |
+
ARROW_PYTHON_EXPORT
|
113 |
+
inline int64_t PyDateTime_to_ms(PyDateTime_DateTime* pydatetime) {
|
114 |
+
return (PyDateTime_to_s(pydatetime) * 1000LL +
|
115 |
+
PyDateTime_DATE_GET_MICROSECOND(pydatetime) / 1000);
|
116 |
+
}
|
117 |
+
|
118 |
+
ARROW_PYTHON_EXPORT
|
119 |
+
inline int64_t PyDateTime_to_us(PyDateTime_DateTime* pydatetime) {
|
120 |
+
return (PyDateTime_to_s(pydatetime) * 1000000LL +
|
121 |
+
PyDateTime_DATE_GET_MICROSECOND(pydatetime));
|
122 |
+
}
|
123 |
+
|
124 |
+
ARROW_PYTHON_EXPORT
|
125 |
+
inline int64_t PyDateTime_to_ns(PyDateTime_DateTime* pydatetime) {
|
126 |
+
return PyDateTime_to_us(pydatetime) * 1000LL;
|
127 |
+
}
|
128 |
+
|
129 |
+
ARROW_PYTHON_EXPORT
|
130 |
+
inline TimePoint PyDateTime_to_TimePoint(PyDateTime_DateTime* pydatetime) {
|
131 |
+
return TimePoint(TimePoint::duration(PyDateTime_to_ns(pydatetime)));
|
132 |
+
}
|
133 |
+
|
134 |
+
ARROW_PYTHON_EXPORT
|
135 |
+
inline int64_t TimePoint_to_ns(TimePoint val) { return val.time_since_epoch().count(); }
|
136 |
+
|
137 |
+
ARROW_PYTHON_EXPORT
|
138 |
+
inline TimePoint TimePoint_from_s(double val) {
|
139 |
+
return TimePoint(TimePoint::duration(static_cast<int64_t>(1e9 * val)));
|
140 |
+
}
|
141 |
+
|
142 |
+
ARROW_PYTHON_EXPORT
|
143 |
+
inline TimePoint TimePoint_from_ns(int64_t val) {
|
144 |
+
return TimePoint(TimePoint::duration(val));
|
145 |
+
}
|
146 |
+
|
147 |
+
ARROW_PYTHON_EXPORT
|
148 |
+
inline int64_t PyDelta_to_s(PyDateTime_Delta* pytimedelta) {
|
149 |
+
return (PyDateTime_DELTA_GET_DAYS(pytimedelta) * 86400LL +
|
150 |
+
PyDateTime_DELTA_GET_SECONDS(pytimedelta));
|
151 |
+
}
|
152 |
+
|
153 |
+
ARROW_PYTHON_EXPORT
|
154 |
+
inline int64_t PyDelta_to_ms(PyDateTime_Delta* pytimedelta) {
|
155 |
+
return (PyDelta_to_s(pytimedelta) * 1000LL +
|
156 |
+
PyDateTime_DELTA_GET_MICROSECONDS(pytimedelta) / 1000);
|
157 |
+
}
|
158 |
+
|
159 |
+
ARROW_PYTHON_EXPORT
|
160 |
+
inline Result<int64_t> PyDelta_to_us(PyDateTime_Delta* pytimedelta) {
|
161 |
+
int64_t result = PyDelta_to_s(pytimedelta);
|
162 |
+
if (MultiplyWithOverflow(result, 1000000LL, &result)) {
|
163 |
+
return Status::Invalid("Timedelta too large to fit in 64-bit integer");
|
164 |
+
}
|
165 |
+
if (AddWithOverflow(result, PyDateTime_DELTA_GET_MICROSECONDS(pytimedelta), &result)) {
|
166 |
+
return Status::Invalid("Timedelta too large to fit in 64-bit integer");
|
167 |
+
}
|
168 |
+
return result;
|
169 |
+
}
|
170 |
+
|
171 |
+
ARROW_PYTHON_EXPORT
|
172 |
+
inline Result<int64_t> PyDelta_to_ns(PyDateTime_Delta* pytimedelta) {
|
173 |
+
ARROW_ASSIGN_OR_RAISE(int64_t result, PyDelta_to_us(pytimedelta));
|
174 |
+
if (MultiplyWithOverflow(result, 1000LL, &result)) {
|
175 |
+
return Status::Invalid("Timedelta too large to fit in 64-bit integer");
|
176 |
+
}
|
177 |
+
return result;
|
178 |
+
}
|
179 |
+
|
180 |
+
ARROW_PYTHON_EXPORT
|
181 |
+
Result<int64_t> PyDateTime_utcoffset_s(PyObject* pydatetime);
|
182 |
+
|
183 |
+
/// \brief Convert a time zone name into a time zone object.
|
184 |
+
///
|
185 |
+
/// Supported input strings are:
|
186 |
+
/// * As used in the Olson time zone database (the "tz database" or
|
187 |
+
/// "tzdata"), such as "America/New_York"
|
188 |
+
/// * An absolute time zone offset of the form +XX:XX or -XX:XX, such as +07:30
|
189 |
+
/// GIL must be held when calling this method.
|
190 |
+
ARROW_PYTHON_EXPORT
|
191 |
+
Result<PyObject*> StringToTzinfo(const std::string& tz);
|
192 |
+
|
193 |
+
/// \brief Convert a time zone object to a string representation.
|
194 |
+
///
|
195 |
+
/// The output strings are:
|
196 |
+
/// * An absolute time zone offset of the form +XX:XX or -XX:XX, such as +07:30
|
197 |
+
/// if the input object is either an instance of pytz._FixedOffset or
|
198 |
+
/// datetime.timedelta
|
199 |
+
/// * The timezone's name if the input object's tzname() method returns with a
|
200 |
+
/// non-empty timezone name such as "UTC" or "America/New_York"
|
201 |
+
///
|
202 |
+
/// GIL must be held when calling this method.
|
203 |
+
ARROW_PYTHON_EXPORT
|
204 |
+
Result<std::string> TzinfoToString(PyObject* pytzinfo);
|
205 |
+
|
206 |
+
/// \brief Convert MonthDayNano to a python namedtuple.
|
207 |
+
///
|
208 |
+
/// Return a named tuple (pyarrow.MonthDayNano) containing attributes
|
209 |
+
/// "months", "days", "nanoseconds" in the given order
|
210 |
+
/// with values extracted from the fields on interval.
|
211 |
+
///
|
212 |
+
/// GIL must be held when calling this method.
|
213 |
+
ARROW_PYTHON_EXPORT
|
214 |
+
PyObject* MonthDayNanoIntervalToNamedTuple(
|
215 |
+
const MonthDayNanoIntervalType::MonthDayNanos& interval);
|
216 |
+
|
217 |
+
/// \brief Convert the given Array to a PyList object containing
|
218 |
+
/// pyarrow.MonthDayNano objects.
|
219 |
+
ARROW_PYTHON_EXPORT
|
220 |
+
Result<PyObject*> MonthDayNanoIntervalArrayToPyList(
|
221 |
+
const MonthDayNanoIntervalArray& array);
|
222 |
+
|
223 |
+
/// \brief Convert the Scalar object to a pyarrow.MonthDayNano (or None if
|
224 |
+
/// is isn't valid).
|
225 |
+
ARROW_PYTHON_EXPORT
|
226 |
+
Result<PyObject*> MonthDayNanoIntervalScalarToPyObject(
|
227 |
+
const MonthDayNanoIntervalScalar& scalar);
|
228 |
+
|
229 |
+
} // namespace internal
|
230 |
+
} // namespace py
|
231 |
+
} // namespace arrow
|
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/python/deserialize.h
ADDED
@@ -0,0 +1,106 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include <cstdint>
|
21 |
+
#include <memory>
|
22 |
+
#include <vector>
|
23 |
+
|
24 |
+
#include "arrow/python/serialize.h"
|
25 |
+
#include "arrow/python/visibility.h"
|
26 |
+
#include "arrow/status.h"
|
27 |
+
|
28 |
+
namespace arrow {
|
29 |
+
|
30 |
+
class RecordBatch;
|
31 |
+
class Tensor;
|
32 |
+
|
33 |
+
namespace io {
|
34 |
+
|
35 |
+
class RandomAccessFile;
|
36 |
+
|
37 |
+
} // namespace io
|
38 |
+
|
39 |
+
namespace py {
|
40 |
+
|
41 |
+
struct ARROW_PYTHON_EXPORT SparseTensorCounts {
|
42 |
+
int coo;
|
43 |
+
int csr;
|
44 |
+
int csc;
|
45 |
+
int csf;
|
46 |
+
int ndim_csf;
|
47 |
+
|
48 |
+
int num_total_tensors() const { return coo + csr + csc + csf; }
|
49 |
+
int num_total_buffers() const {
|
50 |
+
return coo * 3 + csr * 4 + csc * 4 + 2 * ndim_csf + csf;
|
51 |
+
}
|
52 |
+
};
|
53 |
+
|
54 |
+
/// \brief Read serialized Python sequence from file interface using Arrow IPC
|
55 |
+
/// \param[in] src a RandomAccessFile
|
56 |
+
/// \param[out] out the reconstructed data
|
57 |
+
/// \return Status
|
58 |
+
ARROW_PYTHON_EXPORT
|
59 |
+
Status ReadSerializedObject(io::RandomAccessFile* src, SerializedPyObject* out);
|
60 |
+
|
61 |
+
/// \brief Reconstruct SerializedPyObject from representation produced by
|
62 |
+
/// SerializedPyObject::GetComponents.
|
63 |
+
///
|
64 |
+
/// \param[in] num_tensors number of tensors in the object
|
65 |
+
/// \param[in] num_sparse_tensors number of sparse tensors in the object
|
66 |
+
/// \param[in] num_ndarrays number of numpy Ndarrays in the object
|
67 |
+
/// \param[in] num_buffers number of buffers in the object
|
68 |
+
/// \param[in] data a list containing pyarrow.Buffer instances. It must be 1 +
|
69 |
+
/// num_tensors * 2 + num_coo_tensors * 3 + num_csr_tensors * 4 + num_csc_tensors * 4 +
|
70 |
+
/// num_csf_tensors * (2 * ndim_csf + 3) + num_buffers in length
|
71 |
+
/// \param[out] out the reconstructed object
|
72 |
+
/// \return Status
|
73 |
+
ARROW_PYTHON_EXPORT
|
74 |
+
Status GetSerializedFromComponents(int num_tensors,
|
75 |
+
const SparseTensorCounts& num_sparse_tensors,
|
76 |
+
int num_ndarrays, int num_buffers, PyObject* data,
|
77 |
+
SerializedPyObject* out);
|
78 |
+
|
79 |
+
/// \brief Reconstruct Python object from Arrow-serialized representation
|
80 |
+
/// \param[in] context Serialization context which contains custom serialization
|
81 |
+
/// and deserialization callbacks. Can be any Python object with a
|
82 |
+
/// _serialize_callback method for serialization and a _deserialize_callback
|
83 |
+
/// method for deserialization. If context is None, no custom serialization
|
84 |
+
/// will be attempted.
|
85 |
+
/// \param[in] object Object to deserialize
|
86 |
+
/// \param[in] base a Python object holding the underlying data that any NumPy
|
87 |
+
/// arrays will reference, to avoid premature deallocation
|
88 |
+
/// \param[out] out The returned object
|
89 |
+
/// \return Status
|
90 |
+
/// This acquires the GIL
|
91 |
+
ARROW_PYTHON_EXPORT
|
92 |
+
Status DeserializeObject(PyObject* context, const SerializedPyObject& object,
|
93 |
+
PyObject* base, PyObject** out);
|
94 |
+
|
95 |
+
/// \brief Reconstruct Ndarray from Arrow-serialized representation
|
96 |
+
/// \param[in] object Object to deserialize
|
97 |
+
/// \param[out] out The deserialized tensor
|
98 |
+
/// \return Status
|
99 |
+
ARROW_PYTHON_EXPORT
|
100 |
+
Status DeserializeNdarray(const SerializedPyObject& object, std::shared_ptr<Tensor>* out);
|
101 |
+
|
102 |
+
ARROW_PYTHON_EXPORT
|
103 |
+
Status NdarrayFromBuffer(std::shared_ptr<Buffer> src, std::shared_ptr<Tensor>* out);
|
104 |
+
|
105 |
+
} // namespace py
|
106 |
+
} // namespace arrow
|
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/python/filesystem.h
ADDED
@@ -0,0 +1,126 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include <memory>
|
21 |
+
#include <string>
|
22 |
+
#include <vector>
|
23 |
+
|
24 |
+
#include "arrow/filesystem/filesystem.h"
|
25 |
+
#include "arrow/python/common.h"
|
26 |
+
#include "arrow/python/visibility.h"
|
27 |
+
#include "arrow/util/macros.h"
|
28 |
+
|
29 |
+
namespace arrow {
|
30 |
+
namespace py {
|
31 |
+
namespace fs {
|
32 |
+
|
33 |
+
class ARROW_PYTHON_EXPORT PyFileSystemVtable {
|
34 |
+
public:
|
35 |
+
std::function<void(PyObject*, std::string* out)> get_type_name;
|
36 |
+
std::function<bool(PyObject*, const arrow::fs::FileSystem& other)> equals;
|
37 |
+
|
38 |
+
std::function<void(PyObject*, const std::string& path, arrow::fs::FileInfo* out)>
|
39 |
+
get_file_info;
|
40 |
+
std::function<void(PyObject*, const std::vector<std::string>& paths,
|
41 |
+
std::vector<arrow::fs::FileInfo>* out)>
|
42 |
+
get_file_info_vector;
|
43 |
+
std::function<void(PyObject*, const arrow::fs::FileSelector&,
|
44 |
+
std::vector<arrow::fs::FileInfo>* out)>
|
45 |
+
get_file_info_selector;
|
46 |
+
|
47 |
+
std::function<void(PyObject*, const std::string& path, bool)> create_dir;
|
48 |
+
std::function<void(PyObject*, const std::string& path)> delete_dir;
|
49 |
+
std::function<void(PyObject*, const std::string& path, bool)> delete_dir_contents;
|
50 |
+
std::function<void(PyObject*)> delete_root_dir_contents;
|
51 |
+
std::function<void(PyObject*, const std::string& path)> delete_file;
|
52 |
+
std::function<void(PyObject*, const std::string& src, const std::string& dest)> move;
|
53 |
+
std::function<void(PyObject*, const std::string& src, const std::string& dest)>
|
54 |
+
copy_file;
|
55 |
+
|
56 |
+
std::function<void(PyObject*, const std::string& path,
|
57 |
+
std::shared_ptr<io::InputStream>* out)>
|
58 |
+
open_input_stream;
|
59 |
+
std::function<void(PyObject*, const std::string& path,
|
60 |
+
std::shared_ptr<io::RandomAccessFile>* out)>
|
61 |
+
open_input_file;
|
62 |
+
std::function<void(PyObject*, const std::string& path,
|
63 |
+
const std::shared_ptr<const KeyValueMetadata>&,
|
64 |
+
std::shared_ptr<io::OutputStream>* out)>
|
65 |
+
open_output_stream;
|
66 |
+
std::function<void(PyObject*, const std::string& path,
|
67 |
+
const std::shared_ptr<const KeyValueMetadata>&,
|
68 |
+
std::shared_ptr<io::OutputStream>* out)>
|
69 |
+
open_append_stream;
|
70 |
+
|
71 |
+
std::function<void(PyObject*, const std::string& path, std::string* out)>
|
72 |
+
normalize_path;
|
73 |
+
};
|
74 |
+
|
75 |
+
class ARROW_PYTHON_EXPORT PyFileSystem : public arrow::fs::FileSystem {
|
76 |
+
public:
|
77 |
+
PyFileSystem(PyObject* handler, PyFileSystemVtable vtable);
|
78 |
+
~PyFileSystem() override;
|
79 |
+
|
80 |
+
static std::shared_ptr<PyFileSystem> Make(PyObject* handler, PyFileSystemVtable vtable);
|
81 |
+
|
82 |
+
std::string type_name() const override;
|
83 |
+
|
84 |
+
bool Equals(const FileSystem& other) const override;
|
85 |
+
|
86 |
+
Result<arrow::fs::FileInfo> GetFileInfo(const std::string& path) override;
|
87 |
+
Result<std::vector<arrow::fs::FileInfo>> GetFileInfo(
|
88 |
+
const std::vector<std::string>& paths) override;
|
89 |
+
Result<std::vector<arrow::fs::FileInfo>> GetFileInfo(
|
90 |
+
const arrow::fs::FileSelector& select) override;
|
91 |
+
|
92 |
+
Status CreateDir(const std::string& path, bool recursive = true) override;
|
93 |
+
|
94 |
+
Status DeleteDir(const std::string& path) override;
|
95 |
+
Status DeleteDirContents(const std::string& path, bool missing_dir_ok = false) override;
|
96 |
+
Status DeleteRootDirContents() override;
|
97 |
+
|
98 |
+
Status DeleteFile(const std::string& path) override;
|
99 |
+
|
100 |
+
Status Move(const std::string& src, const std::string& dest) override;
|
101 |
+
|
102 |
+
Status CopyFile(const std::string& src, const std::string& dest) override;
|
103 |
+
|
104 |
+
Result<std::shared_ptr<io::InputStream>> OpenInputStream(
|
105 |
+
const std::string& path) override;
|
106 |
+
Result<std::shared_ptr<io::RandomAccessFile>> OpenInputFile(
|
107 |
+
const std::string& path) override;
|
108 |
+
Result<std::shared_ptr<io::OutputStream>> OpenOutputStream(
|
109 |
+
const std::string& path,
|
110 |
+
const std::shared_ptr<const KeyValueMetadata>& metadata = {}) override;
|
111 |
+
Result<std::shared_ptr<io::OutputStream>> OpenAppendStream(
|
112 |
+
const std::string& path,
|
113 |
+
const std::shared_ptr<const KeyValueMetadata>& metadata = {}) override;
|
114 |
+
|
115 |
+
Result<std::string> NormalizePath(std::string path) override;
|
116 |
+
|
117 |
+
PyObject* handler() const { return handler_.obj(); }
|
118 |
+
|
119 |
+
private:
|
120 |
+
OwnedRefNoGIL handler_;
|
121 |
+
PyFileSystemVtable vtable_;
|
122 |
+
};
|
123 |
+
|
124 |
+
} // namespace fs
|
125 |
+
} // namespace py
|
126 |
+
} // namespace arrow
|
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/python/flight.h
ADDED
@@ -0,0 +1,350 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include <memory>
|
21 |
+
#include <string>
|
22 |
+
#include <vector>
|
23 |
+
|
24 |
+
#include "arrow/flight/api.h"
|
25 |
+
#include "arrow/ipc/dictionary.h"
|
26 |
+
#include "arrow/python/common.h"
|
27 |
+
|
28 |
+
#if defined(_WIN32) || defined(__CYGWIN__) // Windows
|
29 |
+
#if defined(_MSC_VER)
|
30 |
+
#pragma warning(disable : 4251)
|
31 |
+
#else
|
32 |
+
#pragma GCC diagnostic ignored "-Wattributes"
|
33 |
+
#endif
|
34 |
+
|
35 |
+
#ifdef ARROW_PYTHON_STATIC
|
36 |
+
#define ARROW_PYFLIGHT_EXPORT
|
37 |
+
#elif defined(ARROW_PYFLIGHT_EXPORTING)
|
38 |
+
#define ARROW_PYFLIGHT_EXPORT __declspec(dllexport)
|
39 |
+
#else
|
40 |
+
#define ARROW_PYFLIGHT_EXPORT __declspec(dllimport)
|
41 |
+
#endif
|
42 |
+
|
43 |
+
#else // Not Windows
|
44 |
+
#ifndef ARROW_PYFLIGHT_EXPORT
|
45 |
+
#define ARROW_PYFLIGHT_EXPORT __attribute__((visibility("default")))
|
46 |
+
#endif
|
47 |
+
#endif // Non-Windows
|
48 |
+
|
49 |
+
namespace arrow {
|
50 |
+
|
51 |
+
namespace py {
|
52 |
+
|
53 |
+
namespace flight {
|
54 |
+
|
55 |
+
ARROW_PYFLIGHT_EXPORT
|
56 |
+
extern const char* kPyServerMiddlewareName;
|
57 |
+
|
58 |
+
/// \brief A table of function pointers for calling from C++ into
|
59 |
+
/// Python.
|
60 |
+
class ARROW_PYFLIGHT_EXPORT PyFlightServerVtable {
|
61 |
+
public:
|
62 |
+
std::function<Status(PyObject*, const arrow::flight::ServerCallContext&,
|
63 |
+
const arrow::flight::Criteria*,
|
64 |
+
std::unique_ptr<arrow::flight::FlightListing>*)>
|
65 |
+
list_flights;
|
66 |
+
std::function<Status(PyObject*, const arrow::flight::ServerCallContext&,
|
67 |
+
const arrow::flight::FlightDescriptor&,
|
68 |
+
std::unique_ptr<arrow::flight::FlightInfo>*)>
|
69 |
+
get_flight_info;
|
70 |
+
std::function<Status(PyObject*, const arrow::flight::ServerCallContext&,
|
71 |
+
const arrow::flight::FlightDescriptor&,
|
72 |
+
std::unique_ptr<arrow::flight::SchemaResult>*)>
|
73 |
+
get_schema;
|
74 |
+
std::function<Status(PyObject*, const arrow::flight::ServerCallContext&,
|
75 |
+
const arrow::flight::Ticket&,
|
76 |
+
std::unique_ptr<arrow::flight::FlightDataStream>*)>
|
77 |
+
do_get;
|
78 |
+
std::function<Status(PyObject*, const arrow::flight::ServerCallContext&,
|
79 |
+
std::unique_ptr<arrow::flight::FlightMessageReader>,
|
80 |
+
std::unique_ptr<arrow::flight::FlightMetadataWriter>)>
|
81 |
+
do_put;
|
82 |
+
std::function<Status(PyObject*, const arrow::flight::ServerCallContext&,
|
83 |
+
std::unique_ptr<arrow::flight::FlightMessageReader>,
|
84 |
+
std::unique_ptr<arrow::flight::FlightMessageWriter>)>
|
85 |
+
do_exchange;
|
86 |
+
std::function<Status(PyObject*, const arrow::flight::ServerCallContext&,
|
87 |
+
const arrow::flight::Action&,
|
88 |
+
std::unique_ptr<arrow::flight::ResultStream>*)>
|
89 |
+
do_action;
|
90 |
+
std::function<Status(PyObject*, const arrow::flight::ServerCallContext&,
|
91 |
+
std::vector<arrow::flight::ActionType>*)>
|
92 |
+
list_actions;
|
93 |
+
};
|
94 |
+
|
95 |
+
class ARROW_PYFLIGHT_EXPORT PyServerAuthHandlerVtable {
|
96 |
+
public:
|
97 |
+
std::function<Status(PyObject*, arrow::flight::ServerAuthSender*,
|
98 |
+
arrow::flight::ServerAuthReader*)>
|
99 |
+
authenticate;
|
100 |
+
std::function<Status(PyObject*, const std::string&, std::string*)> is_valid;
|
101 |
+
};
|
102 |
+
|
103 |
+
class ARROW_PYFLIGHT_EXPORT PyClientAuthHandlerVtable {
|
104 |
+
public:
|
105 |
+
std::function<Status(PyObject*, arrow::flight::ClientAuthSender*,
|
106 |
+
arrow::flight::ClientAuthReader*)>
|
107 |
+
authenticate;
|
108 |
+
std::function<Status(PyObject*, std::string*)> get_token;
|
109 |
+
};
|
110 |
+
|
111 |
+
/// \brief A helper to implement an auth mechanism in Python.
|
112 |
+
class ARROW_PYFLIGHT_EXPORT PyServerAuthHandler
|
113 |
+
: public arrow::flight::ServerAuthHandler {
|
114 |
+
public:
|
115 |
+
explicit PyServerAuthHandler(PyObject* handler,
|
116 |
+
const PyServerAuthHandlerVtable& vtable);
|
117 |
+
Status Authenticate(arrow::flight::ServerAuthSender* outgoing,
|
118 |
+
arrow::flight::ServerAuthReader* incoming) override;
|
119 |
+
Status IsValid(const std::string& token, std::string* peer_identity) override;
|
120 |
+
|
121 |
+
private:
|
122 |
+
OwnedRefNoGIL handler_;
|
123 |
+
PyServerAuthHandlerVtable vtable_;
|
124 |
+
};
|
125 |
+
|
126 |
+
/// \brief A helper to implement an auth mechanism in Python.
|
127 |
+
class ARROW_PYFLIGHT_EXPORT PyClientAuthHandler
|
128 |
+
: public arrow::flight::ClientAuthHandler {
|
129 |
+
public:
|
130 |
+
explicit PyClientAuthHandler(PyObject* handler,
|
131 |
+
const PyClientAuthHandlerVtable& vtable);
|
132 |
+
Status Authenticate(arrow::flight::ClientAuthSender* outgoing,
|
133 |
+
arrow::flight::ClientAuthReader* incoming) override;
|
134 |
+
Status GetToken(std::string* token) override;
|
135 |
+
|
136 |
+
private:
|
137 |
+
OwnedRefNoGIL handler_;
|
138 |
+
PyClientAuthHandlerVtable vtable_;
|
139 |
+
};
|
140 |
+
|
141 |
+
class ARROW_PYFLIGHT_EXPORT PyFlightServer : public arrow::flight::FlightServerBase {
|
142 |
+
public:
|
143 |
+
explicit PyFlightServer(PyObject* server, const PyFlightServerVtable& vtable);
|
144 |
+
|
145 |
+
// Like Serve(), but set up signals and invoke Python signal handlers
|
146 |
+
// if necessary. This function may return with a Python exception set.
|
147 |
+
Status ServeWithSignals();
|
148 |
+
|
149 |
+
Status ListFlights(const arrow::flight::ServerCallContext& context,
|
150 |
+
const arrow::flight::Criteria* criteria,
|
151 |
+
std::unique_ptr<arrow::flight::FlightListing>* listings) override;
|
152 |
+
Status GetFlightInfo(const arrow::flight::ServerCallContext& context,
|
153 |
+
const arrow::flight::FlightDescriptor& request,
|
154 |
+
std::unique_ptr<arrow::flight::FlightInfo>* info) override;
|
155 |
+
Status GetSchema(const arrow::flight::ServerCallContext& context,
|
156 |
+
const arrow::flight::FlightDescriptor& request,
|
157 |
+
std::unique_ptr<arrow::flight::SchemaResult>* result) override;
|
158 |
+
Status DoGet(const arrow::flight::ServerCallContext& context,
|
159 |
+
const arrow::flight::Ticket& request,
|
160 |
+
std::unique_ptr<arrow::flight::FlightDataStream>* stream) override;
|
161 |
+
Status DoPut(const arrow::flight::ServerCallContext& context,
|
162 |
+
std::unique_ptr<arrow::flight::FlightMessageReader> reader,
|
163 |
+
std::unique_ptr<arrow::flight::FlightMetadataWriter> writer) override;
|
164 |
+
Status DoExchange(const arrow::flight::ServerCallContext& context,
|
165 |
+
std::unique_ptr<arrow::flight::FlightMessageReader> reader,
|
166 |
+
std::unique_ptr<arrow::flight::FlightMessageWriter> writer) override;
|
167 |
+
Status DoAction(const arrow::flight::ServerCallContext& context,
|
168 |
+
const arrow::flight::Action& action,
|
169 |
+
std::unique_ptr<arrow::flight::ResultStream>* result) override;
|
170 |
+
Status ListActions(const arrow::flight::ServerCallContext& context,
|
171 |
+
std::vector<arrow::flight::ActionType>* actions) override;
|
172 |
+
|
173 |
+
private:
|
174 |
+
OwnedRefNoGIL server_;
|
175 |
+
PyFlightServerVtable vtable_;
|
176 |
+
};
|
177 |
+
|
178 |
+
/// \brief A callback that obtains the next result from a Flight action.
|
179 |
+
typedef std::function<Status(PyObject*, std::unique_ptr<arrow::flight::Result>*)>
|
180 |
+
PyFlightResultStreamCallback;
|
181 |
+
|
182 |
+
/// \brief A ResultStream built around a Python callback.
|
183 |
+
class ARROW_PYFLIGHT_EXPORT PyFlightResultStream : public arrow::flight::ResultStream {
|
184 |
+
public:
|
185 |
+
/// \brief Construct a FlightResultStream from a Python object and callback.
|
186 |
+
/// Must only be called while holding the GIL.
|
187 |
+
explicit PyFlightResultStream(PyObject* generator,
|
188 |
+
PyFlightResultStreamCallback callback);
|
189 |
+
arrow::Result<std::unique_ptr<arrow::flight::Result>> Next() override;
|
190 |
+
|
191 |
+
private:
|
192 |
+
OwnedRefNoGIL generator_;
|
193 |
+
PyFlightResultStreamCallback callback_;
|
194 |
+
};
|
195 |
+
|
196 |
+
/// \brief A wrapper around a FlightDataStream that keeps alive a
|
197 |
+
/// Python object backing it.
|
198 |
+
class ARROW_PYFLIGHT_EXPORT PyFlightDataStream : public arrow::flight::FlightDataStream {
|
199 |
+
public:
|
200 |
+
/// \brief Construct a FlightDataStream from a Python object and underlying stream.
|
201 |
+
/// Must only be called while holding the GIL.
|
202 |
+
explicit PyFlightDataStream(PyObject* data_source,
|
203 |
+
std::unique_ptr<arrow::flight::FlightDataStream> stream);
|
204 |
+
|
205 |
+
std::shared_ptr<Schema> schema() override;
|
206 |
+
arrow::Result<arrow::flight::FlightPayload> GetSchemaPayload() override;
|
207 |
+
arrow::Result<arrow::flight::FlightPayload> Next() override;
|
208 |
+
|
209 |
+
private:
|
210 |
+
OwnedRefNoGIL data_source_;
|
211 |
+
std::unique_ptr<arrow::flight::FlightDataStream> stream_;
|
212 |
+
};
|
213 |
+
|
214 |
+
class ARROW_PYFLIGHT_EXPORT PyServerMiddlewareFactory
|
215 |
+
: public arrow::flight::ServerMiddlewareFactory {
|
216 |
+
public:
|
217 |
+
/// \brief A callback to create the middleware instance in Python
|
218 |
+
typedef std::function<Status(
|
219 |
+
PyObject*, const arrow::flight::CallInfo& info,
|
220 |
+
const arrow::flight::CallHeaders& incoming_headers,
|
221 |
+
std::shared_ptr<arrow::flight::ServerMiddleware>* middleware)>
|
222 |
+
StartCallCallback;
|
223 |
+
|
224 |
+
/// \brief Must only be called while holding the GIL.
|
225 |
+
explicit PyServerMiddlewareFactory(PyObject* factory, StartCallCallback start_call);
|
226 |
+
|
227 |
+
Status StartCall(const arrow::flight::CallInfo& info,
|
228 |
+
const arrow::flight::CallHeaders& incoming_headers,
|
229 |
+
std::shared_ptr<arrow::flight::ServerMiddleware>* middleware) override;
|
230 |
+
|
231 |
+
private:
|
232 |
+
OwnedRefNoGIL factory_;
|
233 |
+
StartCallCallback start_call_;
|
234 |
+
};
|
235 |
+
|
236 |
+
class ARROW_PYFLIGHT_EXPORT PyServerMiddleware : public arrow::flight::ServerMiddleware {
|
237 |
+
public:
|
238 |
+
typedef std::function<Status(PyObject*,
|
239 |
+
arrow::flight::AddCallHeaders* outgoing_headers)>
|
240 |
+
SendingHeadersCallback;
|
241 |
+
typedef std::function<Status(PyObject*, const Status& status)> CallCompletedCallback;
|
242 |
+
|
243 |
+
struct Vtable {
|
244 |
+
SendingHeadersCallback sending_headers;
|
245 |
+
CallCompletedCallback call_completed;
|
246 |
+
};
|
247 |
+
|
248 |
+
/// \brief Must only be called while holding the GIL.
|
249 |
+
explicit PyServerMiddleware(PyObject* middleware, Vtable vtable);
|
250 |
+
|
251 |
+
void SendingHeaders(arrow::flight::AddCallHeaders* outgoing_headers) override;
|
252 |
+
void CallCompleted(const Status& status) override;
|
253 |
+
std::string name() const override;
|
254 |
+
/// \brief Get the underlying Python object.
|
255 |
+
PyObject* py_object() const;
|
256 |
+
|
257 |
+
private:
|
258 |
+
OwnedRefNoGIL middleware_;
|
259 |
+
Vtable vtable_;
|
260 |
+
};
|
261 |
+
|
262 |
+
class ARROW_PYFLIGHT_EXPORT PyClientMiddlewareFactory
|
263 |
+
: public arrow::flight::ClientMiddlewareFactory {
|
264 |
+
public:
|
265 |
+
/// \brief A callback to create the middleware instance in Python
|
266 |
+
typedef std::function<Status(
|
267 |
+
PyObject*, const arrow::flight::CallInfo& info,
|
268 |
+
std::unique_ptr<arrow::flight::ClientMiddleware>* middleware)>
|
269 |
+
StartCallCallback;
|
270 |
+
|
271 |
+
/// \brief Must only be called while holding the GIL.
|
272 |
+
explicit PyClientMiddlewareFactory(PyObject* factory, StartCallCallback start_call);
|
273 |
+
|
274 |
+
void StartCall(const arrow::flight::CallInfo& info,
|
275 |
+
std::unique_ptr<arrow::flight::ClientMiddleware>* middleware) override;
|
276 |
+
|
277 |
+
private:
|
278 |
+
OwnedRefNoGIL factory_;
|
279 |
+
StartCallCallback start_call_;
|
280 |
+
};
|
281 |
+
|
282 |
+
class ARROW_PYFLIGHT_EXPORT PyClientMiddleware : public arrow::flight::ClientMiddleware {
|
283 |
+
public:
|
284 |
+
typedef std::function<Status(PyObject*,
|
285 |
+
arrow::flight::AddCallHeaders* outgoing_headers)>
|
286 |
+
SendingHeadersCallback;
|
287 |
+
typedef std::function<Status(PyObject*,
|
288 |
+
const arrow::flight::CallHeaders& incoming_headers)>
|
289 |
+
ReceivedHeadersCallback;
|
290 |
+
typedef std::function<Status(PyObject*, const Status& status)> CallCompletedCallback;
|
291 |
+
|
292 |
+
struct Vtable {
|
293 |
+
SendingHeadersCallback sending_headers;
|
294 |
+
ReceivedHeadersCallback received_headers;
|
295 |
+
CallCompletedCallback call_completed;
|
296 |
+
};
|
297 |
+
|
298 |
+
/// \brief Must only be called while holding the GIL.
|
299 |
+
explicit PyClientMiddleware(PyObject* factory, Vtable vtable);
|
300 |
+
|
301 |
+
void SendingHeaders(arrow::flight::AddCallHeaders* outgoing_headers) override;
|
302 |
+
void ReceivedHeaders(const arrow::flight::CallHeaders& incoming_headers) override;
|
303 |
+
void CallCompleted(const Status& status) override;
|
304 |
+
|
305 |
+
private:
|
306 |
+
OwnedRefNoGIL middleware_;
|
307 |
+
Vtable vtable_;
|
308 |
+
};
|
309 |
+
|
310 |
+
/// \brief A callback that obtains the next payload from a Flight result stream.
|
311 |
+
typedef std::function<Status(PyObject*, arrow::flight::FlightPayload*)>
|
312 |
+
PyGeneratorFlightDataStreamCallback;
|
313 |
+
|
314 |
+
/// \brief A FlightDataStream built around a Python callback.
|
315 |
+
class ARROW_PYFLIGHT_EXPORT PyGeneratorFlightDataStream
|
316 |
+
: public arrow::flight::FlightDataStream {
|
317 |
+
public:
|
318 |
+
/// \brief Construct a FlightDataStream from a Python object and underlying stream.
|
319 |
+
/// Must only be called while holding the GIL.
|
320 |
+
explicit PyGeneratorFlightDataStream(PyObject* generator,
|
321 |
+
std::shared_ptr<arrow::Schema> schema,
|
322 |
+
PyGeneratorFlightDataStreamCallback callback,
|
323 |
+
const ipc::IpcWriteOptions& options);
|
324 |
+
std::shared_ptr<Schema> schema() override;
|
325 |
+
arrow::Result<arrow::flight::FlightPayload> GetSchemaPayload() override;
|
326 |
+
arrow::Result<arrow::flight::FlightPayload> Next() override;
|
327 |
+
|
328 |
+
private:
|
329 |
+
OwnedRefNoGIL generator_;
|
330 |
+
std::shared_ptr<arrow::Schema> schema_;
|
331 |
+
ipc::DictionaryFieldMapper mapper_;
|
332 |
+
ipc::IpcWriteOptions options_;
|
333 |
+
PyGeneratorFlightDataStreamCallback callback_;
|
334 |
+
};
|
335 |
+
|
336 |
+
ARROW_PYFLIGHT_EXPORT
|
337 |
+
Status CreateFlightInfo(const std::shared_ptr<arrow::Schema>& schema,
|
338 |
+
const arrow::flight::FlightDescriptor& descriptor,
|
339 |
+
const std::vector<arrow::flight::FlightEndpoint>& endpoints,
|
340 |
+
int64_t total_records, int64_t total_bytes,
|
341 |
+
std::unique_ptr<arrow::flight::FlightInfo>* out);
|
342 |
+
|
343 |
+
/// \brief Create a SchemaResult from schema.
|
344 |
+
ARROW_PYFLIGHT_EXPORT
|
345 |
+
Status CreateSchemaResult(const std::shared_ptr<arrow::Schema>& schema,
|
346 |
+
std::unique_ptr<arrow::flight::SchemaResult>* out);
|
347 |
+
|
348 |
+
} // namespace flight
|
349 |
+
} // namespace py
|
350 |
+
} // namespace arrow
|
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/python/gdb.h
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include "arrow/python/visibility.h"
|
21 |
+
|
22 |
+
namespace arrow {
|
23 |
+
namespace gdb {
|
24 |
+
|
25 |
+
ARROW_PYTHON_EXPORT
|
26 |
+
void TestSession();
|
27 |
+
|
28 |
+
} // namespace gdb
|
29 |
+
} // namespace arrow
|
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/python/helpers.h
ADDED
@@ -0,0 +1,159 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include "arrow/python/platform.h"
|
21 |
+
|
22 |
+
#include <limits>
|
23 |
+
#include <memory>
|
24 |
+
#include <string>
|
25 |
+
#include <utility>
|
26 |
+
|
27 |
+
#include "arrow/python/numpy_interop.h"
|
28 |
+
|
29 |
+
#include <numpy/halffloat.h>
|
30 |
+
|
31 |
+
#include "arrow/python/visibility.h"
|
32 |
+
#include "arrow/type.h"
|
33 |
+
#include "arrow/util/macros.h"
|
34 |
+
|
35 |
+
namespace arrow {
|
36 |
+
|
37 |
+
namespace py {
|
38 |
+
|
39 |
+
class OwnedRef;
|
40 |
+
|
41 |
+
// \brief Get an arrow DataType instance from Arrow's Type::type enum
|
42 |
+
// \param[in] type One of the values of Arrow's Type::type enum
|
43 |
+
// \return A shared pointer to DataType
|
44 |
+
ARROW_PYTHON_EXPORT std::shared_ptr<DataType> GetPrimitiveType(Type::type type);
|
45 |
+
|
46 |
+
// \brief Construct a np.float16 object from a npy_half value.
|
47 |
+
ARROW_PYTHON_EXPORT PyObject* PyHalf_FromHalf(npy_half value);
|
48 |
+
|
49 |
+
// \brief Convert a Python object to a npy_half value.
|
50 |
+
ARROW_PYTHON_EXPORT Status PyFloat_AsHalf(PyObject* obj, npy_half* out);
|
51 |
+
|
52 |
+
namespace internal {
|
53 |
+
|
54 |
+
// \brief Check that a Python module has been already imported
|
55 |
+
// \param[in] module_name The name of the module
|
56 |
+
Result<bool> IsModuleImported(const std::string& module_name);
|
57 |
+
|
58 |
+
// \brief Import a Python module
|
59 |
+
// \param[in] module_name The name of the module
|
60 |
+
// \param[out] ref The OwnedRef containing the module PyObject*
|
61 |
+
ARROW_PYTHON_EXPORT
|
62 |
+
Status ImportModule(const std::string& module_name, OwnedRef* ref);
|
63 |
+
|
64 |
+
// \brief Import an object from a Python module
|
65 |
+
// \param[in] module A Python module
|
66 |
+
// \param[in] name The name of the object to import
|
67 |
+
// \param[out] ref The OwnedRef containing the \c name attribute of the Python module \c
|
68 |
+
// module
|
69 |
+
ARROW_PYTHON_EXPORT
|
70 |
+
Status ImportFromModule(PyObject* module, const std::string& name, OwnedRef* ref);
|
71 |
+
|
72 |
+
// \brief Check whether obj is an integer, independent of Python versions.
|
73 |
+
inline bool IsPyInteger(PyObject* obj) { return PyLong_Check(obj); }
|
74 |
+
|
75 |
+
// \brief Import symbols from pandas that we need for various type-checking,
|
76 |
+
// like pandas.NaT or pandas.NA
|
77 |
+
void InitPandasStaticData();
|
78 |
+
|
79 |
+
// \brief Use pandas missing value semantics to check if a value is null
|
80 |
+
ARROW_PYTHON_EXPORT
|
81 |
+
bool PandasObjectIsNull(PyObject* obj);
|
82 |
+
|
83 |
+
// \brief Check that obj is a pandas.Timedelta instance
|
84 |
+
ARROW_PYTHON_EXPORT
|
85 |
+
bool IsPandasTimedelta(PyObject* obj);
|
86 |
+
|
87 |
+
// \brief Check that obj is a pandas.Timestamp instance
|
88 |
+
bool IsPandasTimestamp(PyObject* obj);
|
89 |
+
|
90 |
+
// \brief Returned a borrowed reference to the pandas.tseries.offsets.DateOffset
|
91 |
+
PyObject* BorrowPandasDataOffsetType();
|
92 |
+
|
93 |
+
// \brief Check whether obj is a floating-point NaN
|
94 |
+
ARROW_PYTHON_EXPORT
|
95 |
+
bool PyFloat_IsNaN(PyObject* obj);
|
96 |
+
|
97 |
+
inline bool IsPyBinary(PyObject* obj) {
|
98 |
+
return PyBytes_Check(obj) || PyByteArray_Check(obj) || PyMemoryView_Check(obj);
|
99 |
+
}
|
100 |
+
|
101 |
+
// \brief Convert a Python integer into a C integer
|
102 |
+
// \param[in] obj A Python integer
|
103 |
+
// \param[out] out A pointer to a C integer to hold the result of the conversion
|
104 |
+
// \return The status of the operation
|
105 |
+
template <typename Int>
|
106 |
+
Status CIntFromPython(PyObject* obj, Int* out, const std::string& overflow_message = "");
|
107 |
+
|
108 |
+
// \brief Convert a Python unicode string to a std::string
|
109 |
+
ARROW_PYTHON_EXPORT
|
110 |
+
Status PyUnicode_AsStdString(PyObject* obj, std::string* out);
|
111 |
+
|
112 |
+
// \brief Convert a Python bytes object to a std::string
|
113 |
+
ARROW_PYTHON_EXPORT
|
114 |
+
std::string PyBytes_AsStdString(PyObject* obj);
|
115 |
+
|
116 |
+
// \brief Call str() on the given object and return the result as a std::string
|
117 |
+
ARROW_PYTHON_EXPORT
|
118 |
+
Status PyObject_StdStringStr(PyObject* obj, std::string* out);
|
119 |
+
|
120 |
+
// \brief Return the repr() of the given object (always succeeds)
|
121 |
+
ARROW_PYTHON_EXPORT
|
122 |
+
std::string PyObject_StdStringRepr(PyObject* obj);
|
123 |
+
|
124 |
+
// \brief Cast the given size to int32_t, with error checking
|
125 |
+
inline Status CastSize(Py_ssize_t size, int32_t* out,
|
126 |
+
const char* error_msg = "Maximum size exceeded (2GB)") {
|
127 |
+
// size is assumed to be positive
|
128 |
+
if (size > std::numeric_limits<int32_t>::max()) {
|
129 |
+
return Status::Invalid(error_msg);
|
130 |
+
}
|
131 |
+
*out = static_cast<int32_t>(size);
|
132 |
+
return Status::OK();
|
133 |
+
}
|
134 |
+
|
135 |
+
inline Status CastSize(Py_ssize_t size, int64_t* out, const char* error_msg = NULLPTR) {
|
136 |
+
// size is assumed to be positive
|
137 |
+
*out = static_cast<int64_t>(size);
|
138 |
+
return Status::OK();
|
139 |
+
}
|
140 |
+
|
141 |
+
// \brief Print the Python object's __str__ form along with the passed error
|
142 |
+
// message
|
143 |
+
ARROW_PYTHON_EXPORT
|
144 |
+
Status InvalidValue(PyObject* obj, const std::string& why);
|
145 |
+
|
146 |
+
ARROW_PYTHON_EXPORT
|
147 |
+
Status InvalidType(PyObject* obj, const std::string& why);
|
148 |
+
|
149 |
+
ARROW_PYTHON_EXPORT
|
150 |
+
Status IntegerScalarToDoubleSafe(PyObject* obj, double* result);
|
151 |
+
ARROW_PYTHON_EXPORT
|
152 |
+
Status IntegerScalarToFloat32Safe(PyObject* obj, float* result);
|
153 |
+
|
154 |
+
// \brief Print Python object __repr__
|
155 |
+
void DebugPrint(PyObject* obj);
|
156 |
+
|
157 |
+
} // namespace internal
|
158 |
+
} // namespace py
|
159 |
+
} // namespace arrow
|
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/python/inference.h
ADDED
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
// Functions for converting between CPython built-in data structures and Arrow
|
19 |
+
// data structures
|
20 |
+
|
21 |
+
#pragma once
|
22 |
+
|
23 |
+
#include "arrow/python/platform.h"
|
24 |
+
|
25 |
+
#include <memory>
|
26 |
+
|
27 |
+
#include "arrow/python/visibility.h"
|
28 |
+
#include "arrow/type.h"
|
29 |
+
#include "arrow/util/macros.h"
|
30 |
+
|
31 |
+
#include "common.h"
|
32 |
+
|
33 |
+
namespace arrow {
|
34 |
+
|
35 |
+
class Array;
|
36 |
+
class Status;
|
37 |
+
|
38 |
+
namespace py {
|
39 |
+
|
40 |
+
// These functions take a sequence input, not arbitrary iterables
|
41 |
+
|
42 |
+
/// \brief Infer Arrow type from a Python sequence
|
43 |
+
/// \param[in] obj the sequence of values
|
44 |
+
/// \param[in] mask an optional mask where True values are null. May
|
45 |
+
/// be nullptr
|
46 |
+
/// \param[in] pandas_null_sentinels use pandas's null value markers
|
47 |
+
ARROW_PYTHON_EXPORT
|
48 |
+
Result<std::shared_ptr<arrow::DataType>> InferArrowType(PyObject* obj, PyObject* mask,
|
49 |
+
bool pandas_null_sentinels);
|
50 |
+
|
51 |
+
/// Checks whether the passed Python object is a boolean scalar
|
52 |
+
ARROW_PYTHON_EXPORT
|
53 |
+
bool IsPyBool(PyObject* obj);
|
54 |
+
|
55 |
+
/// Checks whether the passed Python object is an integer scalar
|
56 |
+
ARROW_PYTHON_EXPORT
|
57 |
+
bool IsPyInt(PyObject* obj);
|
58 |
+
|
59 |
+
/// Checks whether the passed Python object is a float scalar
|
60 |
+
ARROW_PYTHON_EXPORT
|
61 |
+
bool IsPyFloat(PyObject* obj);
|
62 |
+
|
63 |
+
} // namespace py
|
64 |
+
} // namespace arrow
|
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/python/io.h
ADDED
@@ -0,0 +1,121 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include <memory>
|
21 |
+
|
22 |
+
#include "arrow/io/interfaces.h"
|
23 |
+
#include "arrow/io/transform.h"
|
24 |
+
|
25 |
+
#include "arrow/python/common.h"
|
26 |
+
#include "arrow/python/visibility.h"
|
27 |
+
|
28 |
+
namespace arrow {
|
29 |
+
namespace py {
|
30 |
+
|
31 |
+
class ARROW_NO_EXPORT PythonFile;
|
32 |
+
|
33 |
+
class ARROW_PYTHON_EXPORT PyReadableFile : public io::RandomAccessFile {
|
34 |
+
public:
|
35 |
+
explicit PyReadableFile(PyObject* file);
|
36 |
+
~PyReadableFile() override;
|
37 |
+
|
38 |
+
Status Close() override;
|
39 |
+
Status Abort() override;
|
40 |
+
bool closed() const override;
|
41 |
+
|
42 |
+
Result<int64_t> Read(int64_t nbytes, void* out) override;
|
43 |
+
Result<std::shared_ptr<Buffer>> Read(int64_t nbytes) override;
|
44 |
+
|
45 |
+
// Thread-safe version
|
46 |
+
Result<int64_t> ReadAt(int64_t position, int64_t nbytes, void* out) override;
|
47 |
+
|
48 |
+
// Thread-safe version
|
49 |
+
Result<std::shared_ptr<Buffer>> ReadAt(int64_t position, int64_t nbytes) override;
|
50 |
+
|
51 |
+
Result<int64_t> GetSize() override;
|
52 |
+
|
53 |
+
Status Seek(int64_t position) override;
|
54 |
+
|
55 |
+
Result<int64_t> Tell() const override;
|
56 |
+
|
57 |
+
private:
|
58 |
+
std::unique_ptr<PythonFile> file_;
|
59 |
+
};
|
60 |
+
|
61 |
+
class ARROW_PYTHON_EXPORT PyOutputStream : public io::OutputStream {
|
62 |
+
public:
|
63 |
+
explicit PyOutputStream(PyObject* file);
|
64 |
+
~PyOutputStream() override;
|
65 |
+
|
66 |
+
Status Close() override;
|
67 |
+
Status Abort() override;
|
68 |
+
bool closed() const override;
|
69 |
+
Result<int64_t> Tell() const override;
|
70 |
+
Status Write(const void* data, int64_t nbytes) override;
|
71 |
+
Status Write(const std::shared_ptr<Buffer>& buffer) override;
|
72 |
+
|
73 |
+
private:
|
74 |
+
std::unique_ptr<PythonFile> file_;
|
75 |
+
int64_t position_;
|
76 |
+
};
|
77 |
+
|
78 |
+
// TODO(wesm): seekable output files
|
79 |
+
|
80 |
+
// A Buffer subclass that keeps a PyObject reference throughout its
|
81 |
+
// lifetime, such that the Python object is kept alive as long as the
|
82 |
+
// C++ buffer is still needed.
|
83 |
+
// Keeping the reference in a Python wrapper would be incorrect as
|
84 |
+
// the Python wrapper can get destroyed even though the wrapped C++
|
85 |
+
// buffer is still alive (ARROW-2270).
|
86 |
+
class ARROW_PYTHON_EXPORT PyForeignBuffer : public Buffer {
|
87 |
+
public:
|
88 |
+
static Status Make(const uint8_t* data, int64_t size, PyObject* base,
|
89 |
+
std::shared_ptr<Buffer>* out);
|
90 |
+
|
91 |
+
private:
|
92 |
+
PyForeignBuffer(const uint8_t* data, int64_t size, PyObject* base)
|
93 |
+
: Buffer(data, size) {
|
94 |
+
Py_INCREF(base);
|
95 |
+
base_.reset(base);
|
96 |
+
}
|
97 |
+
|
98 |
+
OwnedRefNoGIL base_;
|
99 |
+
};
|
100 |
+
|
101 |
+
// All this rigamarole because Cython is really poor with std::function<>
|
102 |
+
|
103 |
+
using TransformCallback = std::function<void(
|
104 |
+
PyObject*, const std::shared_ptr<Buffer>& src, std::shared_ptr<Buffer>* out)>;
|
105 |
+
|
106 |
+
struct TransformInputStreamVTable {
|
107 |
+
TransformCallback transform;
|
108 |
+
};
|
109 |
+
|
110 |
+
ARROW_PYTHON_EXPORT
|
111 |
+
std::shared_ptr<::arrow::io::InputStream> MakeTransformInputStream(
|
112 |
+
std::shared_ptr<::arrow::io::InputStream> wrapped, TransformInputStreamVTable vtable,
|
113 |
+
PyObject* arg);
|
114 |
+
|
115 |
+
using StreamWrapFunc = std::function<Result<std::shared_ptr<io::InputStream>>(
|
116 |
+
std::shared_ptr<io::InputStream>)>;
|
117 |
+
ARROW_PYTHON_EXPORT
|
118 |
+
std::shared_ptr<StreamWrapFunc> MakeStreamTransformFunc(TransformInputStreamVTable vtable,
|
119 |
+
PyObject* handler);
|
120 |
+
} // namespace py
|
121 |
+
} // namespace arrow
|
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/python/ipc.h
ADDED
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include <memory>
|
21 |
+
|
22 |
+
#include "arrow/python/common.h"
|
23 |
+
#include "arrow/python/visibility.h"
|
24 |
+
#include "arrow/record_batch.h"
|
25 |
+
#include "arrow/result.h"
|
26 |
+
#include "arrow/util/macros.h"
|
27 |
+
|
28 |
+
namespace arrow {
|
29 |
+
namespace py {
|
30 |
+
|
31 |
+
class ARROW_PYTHON_EXPORT PyRecordBatchReader : public RecordBatchReader {
|
32 |
+
public:
|
33 |
+
std::shared_ptr<Schema> schema() const override;
|
34 |
+
|
35 |
+
Status ReadNext(std::shared_ptr<RecordBatch>* batch) override;
|
36 |
+
|
37 |
+
// For use from Cython
|
38 |
+
// Assumes that `iterable` is borrowed
|
39 |
+
static Result<std::shared_ptr<RecordBatchReader>> Make(std::shared_ptr<Schema>,
|
40 |
+
PyObject* iterable);
|
41 |
+
|
42 |
+
protected:
|
43 |
+
PyRecordBatchReader();
|
44 |
+
|
45 |
+
Status Init(std::shared_ptr<Schema>, PyObject* iterable);
|
46 |
+
|
47 |
+
std::shared_ptr<Schema> schema_;
|
48 |
+
OwnedRefNoGIL iterator_;
|
49 |
+
};
|
50 |
+
|
51 |
+
} // namespace py
|
52 |
+
} // namespace arrow
|
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/python/iterators.h
ADDED
@@ -0,0 +1,194 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include <utility>
|
21 |
+
|
22 |
+
#include "arrow/array/array_primitive.h"
|
23 |
+
|
24 |
+
#include "arrow/python/common.h"
|
25 |
+
#include "arrow/python/numpy_internal.h"
|
26 |
+
|
27 |
+
namespace arrow {
|
28 |
+
namespace py {
|
29 |
+
namespace internal {
|
30 |
+
|
31 |
+
using arrow::internal::checked_cast;
|
32 |
+
|
33 |
+
// Visit the Python sequence, calling the given callable on each element. If
|
34 |
+
// the callable returns a non-OK status, iteration stops and the status is
|
35 |
+
// returned.
|
36 |
+
//
|
37 |
+
// The call signature for Visitor must be
|
38 |
+
//
|
39 |
+
// Visit(PyObject* obj, int64_t index, bool* keep_going)
|
40 |
+
//
|
41 |
+
// If keep_going is set to false, the iteration terminates
|
42 |
+
template <class VisitorFunc>
|
43 |
+
inline Status VisitSequenceGeneric(PyObject* obj, int64_t offset, VisitorFunc&& func) {
|
44 |
+
// VisitorFunc may set to false to terminate iteration
|
45 |
+
bool keep_going = true;
|
46 |
+
|
47 |
+
if (PyArray_Check(obj)) {
|
48 |
+
PyArrayObject* arr_obj = reinterpret_cast<PyArrayObject*>(obj);
|
49 |
+
if (PyArray_NDIM(arr_obj) != 1) {
|
50 |
+
return Status::Invalid("Only 1D arrays accepted");
|
51 |
+
}
|
52 |
+
|
53 |
+
if (PyArray_DESCR(arr_obj)->type_num == NPY_OBJECT) {
|
54 |
+
// It's an array object, we can fetch object pointers directly
|
55 |
+
const Ndarray1DIndexer<PyObject*> objects(arr_obj);
|
56 |
+
for (int64_t i = offset; keep_going && i < objects.size(); ++i) {
|
57 |
+
RETURN_NOT_OK(func(objects[i], i, &keep_going));
|
58 |
+
}
|
59 |
+
return Status::OK();
|
60 |
+
}
|
61 |
+
// It's a non-object array, fall back on regular sequence access.
|
62 |
+
// (note PyArray_GETITEM() is slightly different: it returns standard
|
63 |
+
// Python types, not Numpy scalar types)
|
64 |
+
// This code path is inefficient: callers should implement dedicated
|
65 |
+
// logic for non-object arrays.
|
66 |
+
}
|
67 |
+
if (PySequence_Check(obj)) {
|
68 |
+
if (PyList_Check(obj) || PyTuple_Check(obj)) {
|
69 |
+
// Use fast item access
|
70 |
+
const Py_ssize_t size = PySequence_Fast_GET_SIZE(obj);
|
71 |
+
for (Py_ssize_t i = offset; keep_going && i < size; ++i) {
|
72 |
+
PyObject* value = PySequence_Fast_GET_ITEM(obj, i);
|
73 |
+
RETURN_NOT_OK(func(value, static_cast<int64_t>(i), &keep_going));
|
74 |
+
}
|
75 |
+
} else {
|
76 |
+
// Regular sequence: avoid making a potentially large copy
|
77 |
+
const Py_ssize_t size = PySequence_Size(obj);
|
78 |
+
RETURN_IF_PYERROR();
|
79 |
+
for (Py_ssize_t i = offset; keep_going && i < size; ++i) {
|
80 |
+
OwnedRef value_ref(PySequence_ITEM(obj, i));
|
81 |
+
RETURN_IF_PYERROR();
|
82 |
+
RETURN_NOT_OK(func(value_ref.obj(), static_cast<int64_t>(i), &keep_going));
|
83 |
+
}
|
84 |
+
}
|
85 |
+
} else {
|
86 |
+
return Status::TypeError("Object is not a sequence");
|
87 |
+
}
|
88 |
+
return Status::OK();
|
89 |
+
}
|
90 |
+
|
91 |
+
// Visit sequence with no null mask
|
92 |
+
template <class VisitorFunc>
|
93 |
+
inline Status VisitSequence(PyObject* obj, int64_t offset, VisitorFunc&& func) {
|
94 |
+
return VisitSequenceGeneric(
|
95 |
+
obj, offset, [&func](PyObject* value, int64_t i /* unused */, bool* keep_going) {
|
96 |
+
return func(value, keep_going);
|
97 |
+
});
|
98 |
+
}
|
99 |
+
|
100 |
+
/// Visit sequence with null mask
|
101 |
+
template <class VisitorFunc>
|
102 |
+
inline Status VisitSequenceMasked(PyObject* obj, PyObject* mo, int64_t offset,
|
103 |
+
VisitorFunc&& func) {
|
104 |
+
if (PyArray_Check(mo)) {
|
105 |
+
PyArrayObject* mask = reinterpret_cast<PyArrayObject*>(mo);
|
106 |
+
if (PyArray_NDIM(mask) != 1) {
|
107 |
+
return Status::Invalid("Mask must be 1D array");
|
108 |
+
}
|
109 |
+
if (PyArray_SIZE(mask) != static_cast<int64_t>(PySequence_Size(obj))) {
|
110 |
+
return Status::Invalid("Mask was a different length from sequence being converted");
|
111 |
+
}
|
112 |
+
|
113 |
+
const int dtype = fix_numpy_type_num(PyArray_DESCR(mask)->type_num);
|
114 |
+
if (dtype == NPY_BOOL) {
|
115 |
+
Ndarray1DIndexer<uint8_t> mask_values(mask);
|
116 |
+
|
117 |
+
return VisitSequenceGeneric(
|
118 |
+
obj, offset,
|
119 |
+
[&func, &mask_values](PyObject* value, int64_t i, bool* keep_going) {
|
120 |
+
return func(value, mask_values[i], keep_going);
|
121 |
+
});
|
122 |
+
} else {
|
123 |
+
return Status::TypeError("Mask must be boolean dtype");
|
124 |
+
}
|
125 |
+
} else if (py::is_array(mo)) {
|
126 |
+
auto unwrap_mask_result = unwrap_array(mo);
|
127 |
+
ARROW_RETURN_NOT_OK(unwrap_mask_result);
|
128 |
+
std::shared_ptr<Array> mask_ = unwrap_mask_result.ValueOrDie();
|
129 |
+
if (mask_->type_id() != Type::type::BOOL) {
|
130 |
+
return Status::TypeError("Mask must be an array of booleans");
|
131 |
+
}
|
132 |
+
|
133 |
+
if (mask_->length() != PySequence_Size(obj)) {
|
134 |
+
return Status::Invalid("Mask was a different length from sequence being converted");
|
135 |
+
}
|
136 |
+
|
137 |
+
if (mask_->null_count() != 0) {
|
138 |
+
return Status::TypeError("Mask must be an array of booleans");
|
139 |
+
}
|
140 |
+
|
141 |
+
BooleanArray* boolmask = checked_cast<BooleanArray*>(mask_.get());
|
142 |
+
return VisitSequenceGeneric(
|
143 |
+
obj, offset, [&func, &boolmask](PyObject* value, int64_t i, bool* keep_going) {
|
144 |
+
return func(value, boolmask->Value(i), keep_going);
|
145 |
+
});
|
146 |
+
} else if (PySequence_Check(mo)) {
|
147 |
+
if (PySequence_Size(mo) != PySequence_Size(obj)) {
|
148 |
+
return Status::Invalid("Mask was a different length from sequence being converted");
|
149 |
+
}
|
150 |
+
RETURN_IF_PYERROR();
|
151 |
+
|
152 |
+
return VisitSequenceGeneric(
|
153 |
+
obj, offset, [&func, &mo](PyObject* value, int64_t i, bool* keep_going) {
|
154 |
+
OwnedRef value_ref(PySequence_ITEM(mo, i));
|
155 |
+
if (!PyBool_Check(value_ref.obj()))
|
156 |
+
return Status::TypeError("Mask must be a sequence of booleans");
|
157 |
+
return func(value, value_ref.obj() == Py_True, keep_going);
|
158 |
+
});
|
159 |
+
} else {
|
160 |
+
return Status::Invalid("Null mask must be a NumPy array, Arrow array or a Sequence");
|
161 |
+
}
|
162 |
+
|
163 |
+
return Status::OK();
|
164 |
+
}
|
165 |
+
|
166 |
+
// Like IterateSequence, but accepts any generic iterable (including
|
167 |
+
// non-restartable iterators, e.g. generators).
|
168 |
+
//
|
169 |
+
// The call signature for VisitorFunc must be Visit(PyObject*, bool*
|
170 |
+
// keep_going). If keep_going is set to false, the iteration terminates
|
171 |
+
template <class VisitorFunc>
|
172 |
+
inline Status VisitIterable(PyObject* obj, VisitorFunc&& func) {
|
173 |
+
if (PySequence_Check(obj)) {
|
174 |
+
// Numpy arrays fall here as well
|
175 |
+
return VisitSequence(obj, /*offset=*/0, std::forward<VisitorFunc>(func));
|
176 |
+
}
|
177 |
+
// Fall back on the iterator protocol
|
178 |
+
OwnedRef iter_ref(PyObject_GetIter(obj));
|
179 |
+
PyObject* iter = iter_ref.obj();
|
180 |
+
RETURN_IF_PYERROR();
|
181 |
+
PyObject* value;
|
182 |
+
|
183 |
+
bool keep_going = true;
|
184 |
+
while (keep_going && (value = PyIter_Next(iter))) {
|
185 |
+
OwnedRef value_ref(value);
|
186 |
+
RETURN_NOT_OK(func(value_ref.obj(), &keep_going));
|
187 |
+
}
|
188 |
+
RETURN_IF_PYERROR(); // __next__() might have raised
|
189 |
+
return Status::OK();
|
190 |
+
}
|
191 |
+
|
192 |
+
} // namespace internal
|
193 |
+
} // namespace py
|
194 |
+
} // namespace arrow
|
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/python/lib.h
ADDED
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
/* Generated by Cython 3.0.9 */
|
2 |
+
|
3 |
+
#ifndef __PYX_HAVE__pyarrow__lib
|
4 |
+
#define __PYX_HAVE__pyarrow__lib
|
5 |
+
|
6 |
+
#include "Python.h"
|
7 |
+
|
8 |
+
#ifndef __PYX_HAVE_API__pyarrow__lib
|
9 |
+
|
10 |
+
#ifdef CYTHON_EXTERN_C
|
11 |
+
#undef __PYX_EXTERN_C
|
12 |
+
#define __PYX_EXTERN_C CYTHON_EXTERN_C
|
13 |
+
#elif defined(__PYX_EXTERN_C)
|
14 |
+
#ifdef _MSC_VER
|
15 |
+
#pragma message ("Please do not define the '__PYX_EXTERN_C' macro externally. Use 'CYTHON_EXTERN_C' instead.")
|
16 |
+
#else
|
17 |
+
#warning Please do not define the '__PYX_EXTERN_C' macro externally. Use 'CYTHON_EXTERN_C' instead.
|
18 |
+
#endif
|
19 |
+
#else
|
20 |
+
#define __PYX_EXTERN_C extern "C++"
|
21 |
+
#endif
|
22 |
+
|
23 |
+
#ifndef DL_IMPORT
|
24 |
+
#define DL_IMPORT(_T) _T
|
25 |
+
#endif
|
26 |
+
|
27 |
+
__PYX_EXTERN_C PyObject *pyarrow_wrap_buffer(std::shared_ptr< arrow::Buffer> const &);
|
28 |
+
__PYX_EXTERN_C PyObject *pyarrow_wrap_resizable_buffer(std::shared_ptr< arrow::ResizableBuffer> const &);
|
29 |
+
__PYX_EXTERN_C PyObject *pyarrow_wrap_data_type(std::shared_ptr< arrow::DataType> const &);
|
30 |
+
__PYX_EXTERN_C PyObject *pyarrow_wrap_field(std::shared_ptr< arrow::Field> const &);
|
31 |
+
__PYX_EXTERN_C PyObject *pyarrow_wrap_schema(std::shared_ptr< arrow::Schema> const &);
|
32 |
+
__PYX_EXTERN_C PyObject *pyarrow_wrap_scalar(std::shared_ptr< arrow::Scalar> const &);
|
33 |
+
__PYX_EXTERN_C PyObject *pyarrow_wrap_array(std::shared_ptr< arrow::Array> const &);
|
34 |
+
__PYX_EXTERN_C PyObject *pyarrow_wrap_chunked_array(std::shared_ptr< arrow::ChunkedArray> const &);
|
35 |
+
__PYX_EXTERN_C PyObject *pyarrow_wrap_sparse_coo_tensor(std::shared_ptr< arrow::SparseCOOTensor> const &);
|
36 |
+
__PYX_EXTERN_C PyObject *pyarrow_wrap_sparse_csc_matrix(std::shared_ptr< arrow::SparseCSCMatrix> const &);
|
37 |
+
__PYX_EXTERN_C PyObject *pyarrow_wrap_sparse_csf_tensor(std::shared_ptr< arrow::SparseCSFTensor> const &);
|
38 |
+
__PYX_EXTERN_C PyObject *pyarrow_wrap_sparse_csr_matrix(std::shared_ptr< arrow::SparseCSRMatrix> const &);
|
39 |
+
__PYX_EXTERN_C PyObject *pyarrow_wrap_tensor(std::shared_ptr< arrow::Tensor> const &);
|
40 |
+
__PYX_EXTERN_C PyObject *pyarrow_wrap_batch(std::shared_ptr< arrow::RecordBatch> const &);
|
41 |
+
__PYX_EXTERN_C PyObject *pyarrow_wrap_table(std::shared_ptr< arrow::Table> const &);
|
42 |
+
__PYX_EXTERN_C std::shared_ptr< arrow::Buffer> pyarrow_unwrap_buffer(PyObject *);
|
43 |
+
__PYX_EXTERN_C std::shared_ptr< arrow::DataType> pyarrow_unwrap_data_type(PyObject *);
|
44 |
+
__PYX_EXTERN_C std::shared_ptr< arrow::Field> pyarrow_unwrap_field(PyObject *);
|
45 |
+
__PYX_EXTERN_C std::shared_ptr< arrow::Schema> pyarrow_unwrap_schema(PyObject *);
|
46 |
+
__PYX_EXTERN_C std::shared_ptr< arrow::Scalar> pyarrow_unwrap_scalar(PyObject *);
|
47 |
+
__PYX_EXTERN_C std::shared_ptr< arrow::Array> pyarrow_unwrap_array(PyObject *);
|
48 |
+
__PYX_EXTERN_C std::shared_ptr< arrow::ChunkedArray> pyarrow_unwrap_chunked_array(PyObject *);
|
49 |
+
__PYX_EXTERN_C std::shared_ptr< arrow::SparseCOOTensor> pyarrow_unwrap_sparse_coo_tensor(PyObject *);
|
50 |
+
__PYX_EXTERN_C std::shared_ptr< arrow::SparseCSCMatrix> pyarrow_unwrap_sparse_csc_matrix(PyObject *);
|
51 |
+
__PYX_EXTERN_C std::shared_ptr< arrow::SparseCSFTensor> pyarrow_unwrap_sparse_csf_tensor(PyObject *);
|
52 |
+
__PYX_EXTERN_C std::shared_ptr< arrow::SparseCSRMatrix> pyarrow_unwrap_sparse_csr_matrix(PyObject *);
|
53 |
+
__PYX_EXTERN_C std::shared_ptr< arrow::Tensor> pyarrow_unwrap_tensor(PyObject *);
|
54 |
+
__PYX_EXTERN_C std::shared_ptr< arrow::RecordBatch> pyarrow_unwrap_batch(PyObject *);
|
55 |
+
__PYX_EXTERN_C std::shared_ptr< arrow::Table> pyarrow_unwrap_table(PyObject *);
|
56 |
+
|
57 |
+
#endif /* !__PYX_HAVE_API__pyarrow__lib */
|
58 |
+
|
59 |
+
/* WARNING: the interface of the module init function changed in CPython 3.5. */
|
60 |
+
/* It now returns a PyModuleDef instance instead of a PyModule instance. */
|
61 |
+
|
62 |
+
#if PY_MAJOR_VERSION < 3
|
63 |
+
PyMODINIT_FUNC initlib(void);
|
64 |
+
#else
|
65 |
+
/* WARNING: Use PyImport_AppendInittab("lib", PyInit_lib) instead of calling PyInit_lib directly from Python 3.5 */
|
66 |
+
PyMODINIT_FUNC PyInit_lib(void);
|
67 |
+
|
68 |
+
#if PY_VERSION_HEX >= 0x03050000 && (defined(__GNUC__) || defined(__clang__) || defined(_MSC_VER) || (defined(__cplusplus) && __cplusplus >= 201402L))
|
69 |
+
#if defined(__cplusplus) && __cplusplus >= 201402L
|
70 |
+
[[deprecated("Use PyImport_AppendInittab(\"lib\", PyInit_lib) instead of calling PyInit_lib directly.")]] inline
|
71 |
+
#elif defined(__GNUC__) || defined(__clang__)
|
72 |
+
__attribute__ ((__deprecated__("Use PyImport_AppendInittab(\"lib\", PyInit_lib) instead of calling PyInit_lib directly."), __unused__)) __inline__
|
73 |
+
#elif defined(_MSC_VER)
|
74 |
+
__declspec(deprecated("Use PyImport_AppendInittab(\"lib\", PyInit_lib) instead of calling PyInit_lib directly.")) __inline
|
75 |
+
#endif
|
76 |
+
static PyObject* __PYX_WARN_IF_PyInit_lib_INIT_CALLED(PyObject* res) {
|
77 |
+
return res;
|
78 |
+
}
|
79 |
+
#define PyInit_lib() __PYX_WARN_IF_PyInit_lib_INIT_CALLED(PyInit_lib())
|
80 |
+
#endif
|
81 |
+
#endif
|
82 |
+
|
83 |
+
#endif /* !__PYX_HAVE__pyarrow__lib */
|
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/python/lib_api.h
ADDED
@@ -0,0 +1,201 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
/* Generated by Cython 3.0.9 */
|
2 |
+
|
3 |
+
#ifndef __PYX_HAVE_API__pyarrow__lib
|
4 |
+
#define __PYX_HAVE_API__pyarrow__lib
|
5 |
+
#ifdef __MINGW64__
|
6 |
+
#define MS_WIN64
|
7 |
+
#endif
|
8 |
+
#include "Python.h"
|
9 |
+
#include "lib.h"
|
10 |
+
|
11 |
+
static PyObject *(*__pyx_api_f_7pyarrow_3lib_box_memory_pool)( arrow::MemoryPool *) = 0;
|
12 |
+
#define box_memory_pool __pyx_api_f_7pyarrow_3lib_box_memory_pool
|
13 |
+
static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_buffer)(std::shared_ptr< arrow::Buffer> const &) = 0;
|
14 |
+
#define pyarrow_wrap_buffer __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_buffer
|
15 |
+
static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_resizable_buffer)(std::shared_ptr< arrow::ResizableBuffer> const &) = 0;
|
16 |
+
#define pyarrow_wrap_resizable_buffer __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_resizable_buffer
|
17 |
+
static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_data_type)(std::shared_ptr< arrow::DataType> const &) = 0;
|
18 |
+
#define pyarrow_wrap_data_type __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_data_type
|
19 |
+
static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_field)(std::shared_ptr< arrow::Field> const &) = 0;
|
20 |
+
#define pyarrow_wrap_field __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_field
|
21 |
+
static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_schema)(std::shared_ptr< arrow::Schema> const &) = 0;
|
22 |
+
#define pyarrow_wrap_schema __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_schema
|
23 |
+
static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_scalar)(std::shared_ptr< arrow::Scalar> const &) = 0;
|
24 |
+
#define pyarrow_wrap_scalar __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_scalar
|
25 |
+
static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_array)(std::shared_ptr< arrow::Array> const &) = 0;
|
26 |
+
#define pyarrow_wrap_array __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_array
|
27 |
+
static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_chunked_array)(std::shared_ptr< arrow::ChunkedArray> const &) = 0;
|
28 |
+
#define pyarrow_wrap_chunked_array __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_chunked_array
|
29 |
+
static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_sparse_coo_tensor)(std::shared_ptr< arrow::SparseCOOTensor> const &) = 0;
|
30 |
+
#define pyarrow_wrap_sparse_coo_tensor __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_sparse_coo_tensor
|
31 |
+
static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_sparse_csc_matrix)(std::shared_ptr< arrow::SparseCSCMatrix> const &) = 0;
|
32 |
+
#define pyarrow_wrap_sparse_csc_matrix __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_sparse_csc_matrix
|
33 |
+
static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_sparse_csf_tensor)(std::shared_ptr< arrow::SparseCSFTensor> const &) = 0;
|
34 |
+
#define pyarrow_wrap_sparse_csf_tensor __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_sparse_csf_tensor
|
35 |
+
static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_sparse_csr_matrix)(std::shared_ptr< arrow::SparseCSRMatrix> const &) = 0;
|
36 |
+
#define pyarrow_wrap_sparse_csr_matrix __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_sparse_csr_matrix
|
37 |
+
static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_tensor)(std::shared_ptr< arrow::Tensor> const &) = 0;
|
38 |
+
#define pyarrow_wrap_tensor __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_tensor
|
39 |
+
static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_batch)(std::shared_ptr< arrow::RecordBatch> const &) = 0;
|
40 |
+
#define pyarrow_wrap_batch __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_batch
|
41 |
+
static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_table)(std::shared_ptr< arrow::Table> const &) = 0;
|
42 |
+
#define pyarrow_wrap_table __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_table
|
43 |
+
static std::shared_ptr< arrow::Buffer> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_buffer)(PyObject *) = 0;
|
44 |
+
#define pyarrow_unwrap_buffer __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_buffer
|
45 |
+
static std::shared_ptr< arrow::DataType> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_data_type)(PyObject *) = 0;
|
46 |
+
#define pyarrow_unwrap_data_type __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_data_type
|
47 |
+
static std::shared_ptr< arrow::Field> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_field)(PyObject *) = 0;
|
48 |
+
#define pyarrow_unwrap_field __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_field
|
49 |
+
static std::shared_ptr< arrow::Schema> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_schema)(PyObject *) = 0;
|
50 |
+
#define pyarrow_unwrap_schema __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_schema
|
51 |
+
static std::shared_ptr< arrow::Scalar> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_scalar)(PyObject *) = 0;
|
52 |
+
#define pyarrow_unwrap_scalar __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_scalar
|
53 |
+
static std::shared_ptr< arrow::Array> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_array)(PyObject *) = 0;
|
54 |
+
#define pyarrow_unwrap_array __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_array
|
55 |
+
static std::shared_ptr< arrow::ChunkedArray> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_chunked_array)(PyObject *) = 0;
|
56 |
+
#define pyarrow_unwrap_chunked_array __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_chunked_array
|
57 |
+
static std::shared_ptr< arrow::SparseCOOTensor> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_sparse_coo_tensor)(PyObject *) = 0;
|
58 |
+
#define pyarrow_unwrap_sparse_coo_tensor __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_sparse_coo_tensor
|
59 |
+
static std::shared_ptr< arrow::SparseCSCMatrix> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_sparse_csc_matrix)(PyObject *) = 0;
|
60 |
+
#define pyarrow_unwrap_sparse_csc_matrix __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_sparse_csc_matrix
|
61 |
+
static std::shared_ptr< arrow::SparseCSFTensor> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_sparse_csf_tensor)(PyObject *) = 0;
|
62 |
+
#define pyarrow_unwrap_sparse_csf_tensor __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_sparse_csf_tensor
|
63 |
+
static std::shared_ptr< arrow::SparseCSRMatrix> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_sparse_csr_matrix)(PyObject *) = 0;
|
64 |
+
#define pyarrow_unwrap_sparse_csr_matrix __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_sparse_csr_matrix
|
65 |
+
static std::shared_ptr< arrow::Tensor> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_tensor)(PyObject *) = 0;
|
66 |
+
#define pyarrow_unwrap_tensor __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_tensor
|
67 |
+
static std::shared_ptr< arrow::RecordBatch> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_batch)(PyObject *) = 0;
|
68 |
+
#define pyarrow_unwrap_batch __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_batch
|
69 |
+
static std::shared_ptr< arrow::Table> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_table)(PyObject *) = 0;
|
70 |
+
#define pyarrow_unwrap_table __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_table
|
71 |
+
static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_internal_check_status)(arrow::Status const &) = 0;
|
72 |
+
#define pyarrow_internal_check_status __pyx_api_f_7pyarrow_3lib_pyarrow_internal_check_status
|
73 |
+
static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_internal_convert_status)(arrow::Status const &) = 0;
|
74 |
+
#define pyarrow_internal_convert_status __pyx_api_f_7pyarrow_3lib_pyarrow_internal_convert_status
|
75 |
+
static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_buffer)(PyObject *) = 0;
|
76 |
+
#define pyarrow_is_buffer __pyx_api_f_7pyarrow_3lib_pyarrow_is_buffer
|
77 |
+
static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_data_type)(PyObject *) = 0;
|
78 |
+
#define pyarrow_is_data_type __pyx_api_f_7pyarrow_3lib_pyarrow_is_data_type
|
79 |
+
static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_metadata)(PyObject *) = 0;
|
80 |
+
#define pyarrow_is_metadata __pyx_api_f_7pyarrow_3lib_pyarrow_is_metadata
|
81 |
+
static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_field)(PyObject *) = 0;
|
82 |
+
#define pyarrow_is_field __pyx_api_f_7pyarrow_3lib_pyarrow_is_field
|
83 |
+
static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_schema)(PyObject *) = 0;
|
84 |
+
#define pyarrow_is_schema __pyx_api_f_7pyarrow_3lib_pyarrow_is_schema
|
85 |
+
static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_array)(PyObject *) = 0;
|
86 |
+
#define pyarrow_is_array __pyx_api_f_7pyarrow_3lib_pyarrow_is_array
|
87 |
+
static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_chunked_array)(PyObject *) = 0;
|
88 |
+
#define pyarrow_is_chunked_array __pyx_api_f_7pyarrow_3lib_pyarrow_is_chunked_array
|
89 |
+
static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_scalar)(PyObject *) = 0;
|
90 |
+
#define pyarrow_is_scalar __pyx_api_f_7pyarrow_3lib_pyarrow_is_scalar
|
91 |
+
static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_tensor)(PyObject *) = 0;
|
92 |
+
#define pyarrow_is_tensor __pyx_api_f_7pyarrow_3lib_pyarrow_is_tensor
|
93 |
+
static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_sparse_coo_tensor)(PyObject *) = 0;
|
94 |
+
#define pyarrow_is_sparse_coo_tensor __pyx_api_f_7pyarrow_3lib_pyarrow_is_sparse_coo_tensor
|
95 |
+
static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_sparse_csr_matrix)(PyObject *) = 0;
|
96 |
+
#define pyarrow_is_sparse_csr_matrix __pyx_api_f_7pyarrow_3lib_pyarrow_is_sparse_csr_matrix
|
97 |
+
static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_sparse_csc_matrix)(PyObject *) = 0;
|
98 |
+
#define pyarrow_is_sparse_csc_matrix __pyx_api_f_7pyarrow_3lib_pyarrow_is_sparse_csc_matrix
|
99 |
+
static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_sparse_csf_tensor)(PyObject *) = 0;
|
100 |
+
#define pyarrow_is_sparse_csf_tensor __pyx_api_f_7pyarrow_3lib_pyarrow_is_sparse_csf_tensor
|
101 |
+
static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_table)(PyObject *) = 0;
|
102 |
+
#define pyarrow_is_table __pyx_api_f_7pyarrow_3lib_pyarrow_is_table
|
103 |
+
static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_batch)(PyObject *) = 0;
|
104 |
+
#define pyarrow_is_batch __pyx_api_f_7pyarrow_3lib_pyarrow_is_batch
|
105 |
+
#ifndef __PYX_HAVE_RT_ImportFunction_3_0_9
|
106 |
+
#define __PYX_HAVE_RT_ImportFunction_3_0_9
|
107 |
+
static int __Pyx_ImportFunction_3_0_9(PyObject *module, const char *funcname, void (**f)(void), const char *sig) {
|
108 |
+
PyObject *d = 0;
|
109 |
+
PyObject *cobj = 0;
|
110 |
+
union {
|
111 |
+
void (*fp)(void);
|
112 |
+
void *p;
|
113 |
+
} tmp;
|
114 |
+
d = PyObject_GetAttrString(module, (char *)"__pyx_capi__");
|
115 |
+
if (!d)
|
116 |
+
goto bad;
|
117 |
+
cobj = PyDict_GetItemString(d, funcname);
|
118 |
+
if (!cobj) {
|
119 |
+
PyErr_Format(PyExc_ImportError,
|
120 |
+
"%.200s does not export expected C function %.200s",
|
121 |
+
PyModule_GetName(module), funcname);
|
122 |
+
goto bad;
|
123 |
+
}
|
124 |
+
if (!PyCapsule_IsValid(cobj, sig)) {
|
125 |
+
PyErr_Format(PyExc_TypeError,
|
126 |
+
"C function %.200s.%.200s has wrong signature (expected %.500s, got %.500s)",
|
127 |
+
PyModule_GetName(module), funcname, sig, PyCapsule_GetName(cobj));
|
128 |
+
goto bad;
|
129 |
+
}
|
130 |
+
tmp.p = PyCapsule_GetPointer(cobj, sig);
|
131 |
+
*f = tmp.fp;
|
132 |
+
if (!(*f))
|
133 |
+
goto bad;
|
134 |
+
Py_DECREF(d);
|
135 |
+
return 0;
|
136 |
+
bad:
|
137 |
+
Py_XDECREF(d);
|
138 |
+
return -1;
|
139 |
+
}
|
140 |
+
#endif
|
141 |
+
|
142 |
+
|
143 |
+
static int import_pyarrow__lib(void) {
|
144 |
+
PyObject *module = 0;
|
145 |
+
module = PyImport_ImportModule("pyarrow.lib");
|
146 |
+
if (!module) goto bad;
|
147 |
+
if (__Pyx_ImportFunction_3_0_9(module, "box_memory_pool", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_box_memory_pool, "PyObject *( arrow::MemoryPool *)") < 0) goto bad;
|
148 |
+
if (__Pyx_ImportFunction_3_0_9(module, "pyarrow_wrap_buffer", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_buffer, "PyObject *(std::shared_ptr< arrow::Buffer> const &)") < 0) goto bad;
|
149 |
+
if (__Pyx_ImportFunction_3_0_9(module, "pyarrow_wrap_resizable_buffer", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_resizable_buffer, "PyObject *(std::shared_ptr< arrow::ResizableBuffer> const &)") < 0) goto bad;
|
150 |
+
if (__Pyx_ImportFunction_3_0_9(module, "pyarrow_wrap_data_type", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_data_type, "PyObject *(std::shared_ptr< arrow::DataType> const &)") < 0) goto bad;
|
151 |
+
if (__Pyx_ImportFunction_3_0_9(module, "pyarrow_wrap_field", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_field, "PyObject *(std::shared_ptr< arrow::Field> const &)") < 0) goto bad;
|
152 |
+
if (__Pyx_ImportFunction_3_0_9(module, "pyarrow_wrap_schema", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_schema, "PyObject *(std::shared_ptr< arrow::Schema> const &)") < 0) goto bad;
|
153 |
+
if (__Pyx_ImportFunction_3_0_9(module, "pyarrow_wrap_scalar", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_scalar, "PyObject *(std::shared_ptr< arrow::Scalar> const &)") < 0) goto bad;
|
154 |
+
if (__Pyx_ImportFunction_3_0_9(module, "pyarrow_wrap_array", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_array, "PyObject *(std::shared_ptr< arrow::Array> const &)") < 0) goto bad;
|
155 |
+
if (__Pyx_ImportFunction_3_0_9(module, "pyarrow_wrap_chunked_array", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_chunked_array, "PyObject *(std::shared_ptr< arrow::ChunkedArray> const &)") < 0) goto bad;
|
156 |
+
if (__Pyx_ImportFunction_3_0_9(module, "pyarrow_wrap_sparse_coo_tensor", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_sparse_coo_tensor, "PyObject *(std::shared_ptr< arrow::SparseCOOTensor> const &)") < 0) goto bad;
|
157 |
+
if (__Pyx_ImportFunction_3_0_9(module, "pyarrow_wrap_sparse_csc_matrix", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_sparse_csc_matrix, "PyObject *(std::shared_ptr< arrow::SparseCSCMatrix> const &)") < 0) goto bad;
|
158 |
+
if (__Pyx_ImportFunction_3_0_9(module, "pyarrow_wrap_sparse_csf_tensor", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_sparse_csf_tensor, "PyObject *(std::shared_ptr< arrow::SparseCSFTensor> const &)") < 0) goto bad;
|
159 |
+
if (__Pyx_ImportFunction_3_0_9(module, "pyarrow_wrap_sparse_csr_matrix", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_sparse_csr_matrix, "PyObject *(std::shared_ptr< arrow::SparseCSRMatrix> const &)") < 0) goto bad;
|
160 |
+
if (__Pyx_ImportFunction_3_0_9(module, "pyarrow_wrap_tensor", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_tensor, "PyObject *(std::shared_ptr< arrow::Tensor> const &)") < 0) goto bad;
|
161 |
+
if (__Pyx_ImportFunction_3_0_9(module, "pyarrow_wrap_batch", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_batch, "PyObject *(std::shared_ptr< arrow::RecordBatch> const &)") < 0) goto bad;
|
162 |
+
if (__Pyx_ImportFunction_3_0_9(module, "pyarrow_wrap_table", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_table, "PyObject *(std::shared_ptr< arrow::Table> const &)") < 0) goto bad;
|
163 |
+
if (__Pyx_ImportFunction_3_0_9(module, "pyarrow_unwrap_buffer", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_buffer, "std::shared_ptr< arrow::Buffer> (PyObject *)") < 0) goto bad;
|
164 |
+
if (__Pyx_ImportFunction_3_0_9(module, "pyarrow_unwrap_data_type", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_data_type, "std::shared_ptr< arrow::DataType> (PyObject *)") < 0) goto bad;
|
165 |
+
if (__Pyx_ImportFunction_3_0_9(module, "pyarrow_unwrap_field", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_field, "std::shared_ptr< arrow::Field> (PyObject *)") < 0) goto bad;
|
166 |
+
if (__Pyx_ImportFunction_3_0_9(module, "pyarrow_unwrap_schema", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_schema, "std::shared_ptr< arrow::Schema> (PyObject *)") < 0) goto bad;
|
167 |
+
if (__Pyx_ImportFunction_3_0_9(module, "pyarrow_unwrap_scalar", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_scalar, "std::shared_ptr< arrow::Scalar> (PyObject *)") < 0) goto bad;
|
168 |
+
if (__Pyx_ImportFunction_3_0_9(module, "pyarrow_unwrap_array", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_array, "std::shared_ptr< arrow::Array> (PyObject *)") < 0) goto bad;
|
169 |
+
if (__Pyx_ImportFunction_3_0_9(module, "pyarrow_unwrap_chunked_array", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_chunked_array, "std::shared_ptr< arrow::ChunkedArray> (PyObject *)") < 0) goto bad;
|
170 |
+
if (__Pyx_ImportFunction_3_0_9(module, "pyarrow_unwrap_sparse_coo_tensor", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_sparse_coo_tensor, "std::shared_ptr< arrow::SparseCOOTensor> (PyObject *)") < 0) goto bad;
|
171 |
+
if (__Pyx_ImportFunction_3_0_9(module, "pyarrow_unwrap_sparse_csc_matrix", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_sparse_csc_matrix, "std::shared_ptr< arrow::SparseCSCMatrix> (PyObject *)") < 0) goto bad;
|
172 |
+
if (__Pyx_ImportFunction_3_0_9(module, "pyarrow_unwrap_sparse_csf_tensor", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_sparse_csf_tensor, "std::shared_ptr< arrow::SparseCSFTensor> (PyObject *)") < 0) goto bad;
|
173 |
+
if (__Pyx_ImportFunction_3_0_9(module, "pyarrow_unwrap_sparse_csr_matrix", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_sparse_csr_matrix, "std::shared_ptr< arrow::SparseCSRMatrix> (PyObject *)") < 0) goto bad;
|
174 |
+
if (__Pyx_ImportFunction_3_0_9(module, "pyarrow_unwrap_tensor", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_tensor, "std::shared_ptr< arrow::Tensor> (PyObject *)") < 0) goto bad;
|
175 |
+
if (__Pyx_ImportFunction_3_0_9(module, "pyarrow_unwrap_batch", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_batch, "std::shared_ptr< arrow::RecordBatch> (PyObject *)") < 0) goto bad;
|
176 |
+
if (__Pyx_ImportFunction_3_0_9(module, "pyarrow_unwrap_table", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_table, "std::shared_ptr< arrow::Table> (PyObject *)") < 0) goto bad;
|
177 |
+
if (__Pyx_ImportFunction_3_0_9(module, "pyarrow_internal_check_status", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_internal_check_status, "int (arrow::Status const &)") < 0) goto bad;
|
178 |
+
if (__Pyx_ImportFunction_3_0_9(module, "pyarrow_internal_convert_status", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_internal_convert_status, "PyObject *(arrow::Status const &)") < 0) goto bad;
|
179 |
+
if (__Pyx_ImportFunction_3_0_9(module, "pyarrow_is_buffer", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_buffer, "int (PyObject *)") < 0) goto bad;
|
180 |
+
if (__Pyx_ImportFunction_3_0_9(module, "pyarrow_is_data_type", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_data_type, "int (PyObject *)") < 0) goto bad;
|
181 |
+
if (__Pyx_ImportFunction_3_0_9(module, "pyarrow_is_metadata", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_metadata, "int (PyObject *)") < 0) goto bad;
|
182 |
+
if (__Pyx_ImportFunction_3_0_9(module, "pyarrow_is_field", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_field, "int (PyObject *)") < 0) goto bad;
|
183 |
+
if (__Pyx_ImportFunction_3_0_9(module, "pyarrow_is_schema", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_schema, "int (PyObject *)") < 0) goto bad;
|
184 |
+
if (__Pyx_ImportFunction_3_0_9(module, "pyarrow_is_array", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_array, "int (PyObject *)") < 0) goto bad;
|
185 |
+
if (__Pyx_ImportFunction_3_0_9(module, "pyarrow_is_chunked_array", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_chunked_array, "int (PyObject *)") < 0) goto bad;
|
186 |
+
if (__Pyx_ImportFunction_3_0_9(module, "pyarrow_is_scalar", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_scalar, "int (PyObject *)") < 0) goto bad;
|
187 |
+
if (__Pyx_ImportFunction_3_0_9(module, "pyarrow_is_tensor", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_tensor, "int (PyObject *)") < 0) goto bad;
|
188 |
+
if (__Pyx_ImportFunction_3_0_9(module, "pyarrow_is_sparse_coo_tensor", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_sparse_coo_tensor, "int (PyObject *)") < 0) goto bad;
|
189 |
+
if (__Pyx_ImportFunction_3_0_9(module, "pyarrow_is_sparse_csr_matrix", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_sparse_csr_matrix, "int (PyObject *)") < 0) goto bad;
|
190 |
+
if (__Pyx_ImportFunction_3_0_9(module, "pyarrow_is_sparse_csc_matrix", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_sparse_csc_matrix, "int (PyObject *)") < 0) goto bad;
|
191 |
+
if (__Pyx_ImportFunction_3_0_9(module, "pyarrow_is_sparse_csf_tensor", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_sparse_csf_tensor, "int (PyObject *)") < 0) goto bad;
|
192 |
+
if (__Pyx_ImportFunction_3_0_9(module, "pyarrow_is_table", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_table, "int (PyObject *)") < 0) goto bad;
|
193 |
+
if (__Pyx_ImportFunction_3_0_9(module, "pyarrow_is_batch", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_batch, "int (PyObject *)") < 0) goto bad;
|
194 |
+
Py_DECREF(module); module = 0;
|
195 |
+
return 0;
|
196 |
+
bad:
|
197 |
+
Py_XDECREF(module);
|
198 |
+
return -1;
|
199 |
+
}
|
200 |
+
|
201 |
+
#endif /* !__PYX_HAVE_API__pyarrow__lib */
|